repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
KiDS-WL/Cat_to_Obs_K1000_P1
[ "0de7f79cab150416859ffe58ac2d0f5659aedb5d", "0de7f79cab150416859ffe58ac2d0f5659aedb5d", "0de7f79cab150416859ffe58ac2d0f5659aedb5d" ]
[ "GGL_LensCats/ldac.py", "Shear_Ratio_Test/cosmology.py", "data/kids/multiplicative_bias/read_m_values.py" ]
[ "###############\r\n# @file ldac.py\r\n# @author Douglas Applegate & Thomas Erben\r\n# @date 27/11/2013\r\n#\r\n# @brief Utilities to make accessing LDAC cats easier within Python\r\n###############\r\n\r\n# HISTORY INFORMATION:\r\n# ====================\r\n#\r\n# 01.09.2010:\r\n# I included treatment of slices through vectorkeys in\r\n# LDACCat.__getitem__\r\n#\r\n# 09.09.2010:\r\n# I made the module more robust against the non-existence\r\n# of necessary libraries\r\n#\r\n# 28.09.2010:\r\n# Bug fix: The from ... import statement must appear on the\r\n# top of the file.\r\n#\r\n# 20.11.2013:\r\n# significant extensions to the code; first proper implementations\r\n# of LDAC tables and LDAC catalogue objects.\r\n#\r\n# 25.11.2013:\r\n# - In the LDACCat class the header of the original catalogue is\r\n# preserved when storing the catalogue to a file.\r\n# - I add a method to add HISTORY keywords to the image header of\r\n# the catalogue\r\n#\r\n# 27.11.2013:\r\n# In the LDACTable I replaced the test for an empty LDAC table. Now\r\n# it is done by testing the data element for 'None'. Before it was\r\n# done by the 'size' method on the HDU. It seems that this size element\r\n# of HDU changed from a method to a simple 'int' in different pyfits\r\n# versions. Hence it is useless for code that should be compatible\r\n# to different versions of pyfits.\r\n#\r\n# 28.02.2014:\r\n# In the LDACTable 'setitem' method we do no longer create a new table\r\n# immediatly after a new column was added. This new table creation\r\n# takes long for big catalogues and it leads to necessary operations\r\n# if we add many columns without doing any operations in between. We\r\n# therefore only update column definitions and raise a flag that this\r\n# table needs an 'update'. The update then is done whenever necessary.\r\n#\r\n# 28.03.2014:\r\n# - I corrected function '__setitem__' in LDACCat. The previous\r\n# implementation was untested and did not work at all!\r\n# - I transfered Dominiks implementation of of the 'add' function in the\r\n# LDACTable. It enables concatenation of two tables with the same\r\n# keys and key types.\r\n#\r\n# 24.07.2015:\r\n# - The pyfits module was replaced by the equivalent 'astrpy.io.fits'.\r\n# pyfits will not be supported anymore at some point in the future.\r\n# - We substituted the depreceated astropy function 'new_table' with\r\n# the new BinTableHDU.from_columns. The former will no longer\r\n# be supported in a future version of astropy.\r\n# - We substituted constructs such as 'self.hdu.data == None' to\r\n# 'self.hdu.data is None' to avoid a 'FutureWarning: comparison to\r\n# `None` will result in an elementwise object comparison in the future.'\r\n# warning meesage. Although the expected future change of the '=='\r\n# operator would have no effect in the special cases the warning message\r\n# is confusing!\r\n# - We do no longer provide detailed error messages if the numpy or astropy\r\n# (former pyfits) modules are missing. They are quite standard and\r\n# sufficiently known by now.\r\n#\r\n# 16.08.2015:\r\n# - Bug fix: The LDACcat saveas function did not check whether tables\r\n# in the catalogue not to be updated before writing the whole catalogue\r\n# to a file - fixed!\r\n# - In the LDACTable class I had to change the name of the private\r\n# function '__update()' to '_update()' to be able to use it within\r\n# the LDACCat class.\r\n#\r\n# 09.11.2015:\r\n# I implemented the __delitem__ method in the LDACTable class\r\n#\r\n# 28.07.2016:\r\n# I converted the script to python3.\r\n#\r\n# 28.06.2017:\r\n# The __add__ function was moved from the LDACCat to the LDACTable object.\r\n# I never used this function and it was added by Dominik. I wrongly\r\n# transfered his code to the wrong class at the time.\r\n#\r\n# 18.05.2018:\r\n# substituted clobber argument with overwrite (astropy deprecation\r\n# warning)\r\n#\r\n# 18.06.2018:\r\n# I modified the add_history function to add a date/time stamp when the history\r\n# was added.\r\n#\r\n# 03.09.2018:\r\n# - I added handling of comments from keys:\r\n# There currently is not standardised way to handle comments for table keys\r\n# and astropy deleted those from non-standard conventions if new tables are\r\n# craeted. We now 'manually' create headers with comments in the TCOMM\r\n# notation and as comments from the TTYPE keyword.\r\n# - I added primitive treatment of units (you can set and retrieve them for\r\n# keys but not yet do calculations with them)\r\n#\r\n# 05.09.2018:\r\n# Bug fix: The change from 03.09. introduced a bug which did not allow\r\n# creation of new tables.\r\n\r\n\"\"\"\r\nWrapper module to work with LDAC catalogues and tables\r\n\"\"\"\r\n\r\n\r\n\r\n# standard-library includes:\r\nimport sys\r\nimport datetime as dt\r\nimport astropy.io.fits as aif\r\nimport numpy as np\r\n\r\nclass LDACCat(object):\r\n \"\"\"\r\n Class to represent an LDAC catalogue\r\n \"\"\"\r\n\r\n def __init__(self, cat=None):\r\n \"\"\"\r\n An LDAC catalogue can be instantiated either as an empty catalogue\r\n or with an existing catalogue on disk.\r\n\r\n >>> a = ldac.LDACCat('mag.cat') # reads the catalogue 'mag.cat' into\r\n # the variable 'a'.\r\n \"\"\"\r\n\r\n # The LDACCcat object contains a list of LDAC tables. We\r\n # internally also keep the header of the PrimaryHDU. It is\r\n # reused when the catalogue is saved to a file.\r\n\r\n # for an empty catalogue this list is empty:\r\n self.ldactables = []\r\n self.header = None\r\n\r\n if cat != None:\r\n # read tables from a catalogue on disk:\r\n if type(cat) == type(\"a\"):\r\n hdulist = aif.open(cat)\r\n\r\n for hdu in hdulist:\r\n if isinstance(hdu, aif.PrimaryHDU) == True:\r\n self.header = hdu.header\r\n if isinstance(hdu, aif.BinTableHDU) == True:\r\n self.ldactables.append(LDACTable(hdu))\r\n\r\n def __len__(self):\r\n \"\"\"\r\n return the number of LDAC tables in this catalogue\r\n\r\n >>> b = len(a) # number of LDAC tables in catalogue 'a'.\r\n \"\"\"\r\n\r\n return len(self.ldactables)\r\n\r\n def __getitem__(self, tablename):\r\n \"\"\"\r\n returns the named LDAC table. Returns 'None' if the table does\r\n not exist.\r\n\r\n Example:\r\n >>> b = a['OBJECTS'] # returns in 'b' the LDAC table with name\r\n # 'OBJECTS' from catalogue 'a'\r\n \"\"\"\r\n\r\n result = None\r\n for table in self.ldactables:\r\n if table.hdu.name == tablename:\r\n result = table\r\n\r\n return result\r\n\r\n def __setitem__(self, name, table):\r\n \"\"\"\r\n adds or replaces an LDAC table in this catalogue\r\n\r\n >>> a['NEW_TABLE'] = b['OBJECTS'] # adds the new table 'NEW_TABLE' in\r\n # 'a' from table 'OBJECTS' in 'b'.\r\n \"\"\"\r\n\r\n if isinstance(table, LDACTable):\r\n # check whether a table with name exists already:\r\n exists = False\r\n\r\n for i in range(len(self.ldactables)):\r\n if self.ldactables[i].hdu.name == name:\r\n self.ldactables[i] = table\r\n exists = True\r\n\r\n if exists == False:\r\n table.setname(name)\r\n self.ldactables.append(table)\r\n\r\n def tables(self):\r\n \"\"\"\r\n returns the names of the contained LDAC tables\r\n\r\n >>> c = a.tables() # gives a list of table names in catalogue 'a'\r\n \"\"\"\r\n tablenames = []\r\n\r\n for table in self.ldactables:\r\n tablenames.append(table.hdu.name)\r\n\r\n return tablenames\r\n\r\n def __iter__(self):\r\n return self.ldactables.__iter__()\r\n\r\n def __contains__(self, tablename):\r\n \"\"\"\r\n check whether a table with name 'tablename' is present\r\n in this catalogue\r\n \"\"\"\r\n\r\n return tablename in self.tables()\r\n\r\n def has_table(self, tablename):\r\n \"\"\"\r\n check whether a table with name 'tablename' is present\r\n in this catalogue\r\n\r\n >>> c = a.has_table('OBJECTS') # returns 'True' if a table named\r\n # 'OBJECTS' is in catalogue 'a'\r\n \"\"\"\r\n\r\n return self.__contains__(tablename)\r\n\r\n def add_history(self, history):\r\n \"\"\"\r\n add a history keyword to the header of the catalogue.\r\n In addition to the history string itself, a date/time\r\n stamp is added.\r\n\r\n >>> a.add_history('Catalogue created')\r\n \"\"\"\r\n\r\n # create an empty header if necessary\r\n if self.header is None:\r\n self.header = aif.Header()\r\n\r\n curr_date = dt.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\r\n\r\n # just delegate the work to an astropy method:\r\n self.header.add_history('') # empty line for separation from other\r\n # comment/history lines\r\n entry = \"history entry added at: %s\" % (curr_date)\r\n self.header.add_history(entry)\r\n self.header.add_history(history)\r\n\r\n def saveas(self, filename, checksum=False, overwrite=False):\r\n \"\"\"\r\n save the LDAC catalogue to a file.\r\n\r\n if overwrite=True an existing file is overwritten.\r\n\r\n >>> a.saveas('test.cat') # saves LDAC catalogue 'a' with all its\r\n # tables to file 'test.cat'\r\n \"\"\"\r\n\r\n primaryHDU = aif.PrimaryHDU(header=self.header)\r\n hdulist = aif.HDUList([primaryHDU])\r\n\r\n for table in self.ldactables:\r\n if table.update == 1:\r\n table._update()\r\n\r\n hdulist.append(table.hdu)\r\n\r\n hdulist.writeto(filename, checksum=checksum, overwrite=overwrite)\r\n\r\n\r\nclass LDACTable(object):\r\n \"\"\"\r\n Class to represent an LDAC table\r\n \"\"\"\r\n\r\n def __init__(self, hdu=None):\r\n \"\"\"\r\n An LDAC table can be instantiated either as am empty table\r\n or with an astropy BinaryTable HDU (existing table).\r\n \"\"\"\r\n\r\n # dictionaries to keep comments and units\r\n self._key_comments = {}\r\n self._key_units = {}\r\n self._key_ucd = {}\r\n\r\n if hdu is None:\r\n self.hdu = aif.BinTableHDU()\r\n self.hdu.data = None\r\n\r\n # We make sure that the table has 'some' proper name:\r\n self.hdu.name = \"OBJECTS\"\r\n else:\r\n self.hdu = hdu\r\n\r\n # collect comments and units from all the table keys:\r\n for i in range(len(hdu.columns.names)):\r\n name = hdu.header.get('TTYPE%d' % (i + 1))\r\n\r\n # first try comment from TCOMM and then comment from TTYPE:\r\n try:\r\n curr_comm = hdu.header['TCOMM%d' % (i + 1)]\r\n except:\r\n curr_comm = hdu.header.comments('TTYPE%d' % (i + 1))\r\n\r\n self._key_comments[name] = curr_comm\r\n\r\n # retrieve unit if available\r\n try:\r\n self._key_units[name] = hdu.header['TUNIT%d' % (i + 1)]\r\n except:\r\n pass\r\n\r\n self.update = 0 # does the table need an update (e.g. when\r\n # new columns were added?)\r\n\r\n def __len__(self):\r\n \"\"\"\r\n return the number of table entries (objects)\r\n \"\"\"\r\n\r\n if self.update == 1:\r\n self._update()\r\n\r\n # 'self.hdu.data' leads to an exception for an empty catalogue.\r\n # Hence we check for this first:\r\n if self.hdu.size == 0:\r\n return 0\r\n else:\r\n return len(self.hdu.data)\r\n\r\n def __getitem__(self, key):\r\n \"\"\"\r\n returns the contents of an existing LDAC key as numpy array\r\n\r\n Example:\r\n >>> b = a['Xpos'] # store in 'b' the contents (numpy array)\r\n # of key 'Xpos' from table 'a'.\r\n \"\"\"\r\n\r\n if self.update == 1:\r\n self._update()\r\n\r\n if type(key) == type(5) or \\\r\n type(key) == type(slice(5)):\r\n return self.hdu.data[key]\r\n\r\n if type(key) == type(\"a\"):\r\n # we need to deal with slices through vector keys\r\n # such as 'MAG_APER(2)'\r\n startind = key.find(\"(\")\r\n endind = key.find(\")\")\r\n\r\n if startind > 0 and endind > 0:\r\n keyname = key[:startind]\r\n keyindex = int(key[startind + 1:endind]) - 1\r\n\r\n try:\r\n return self.hdu.data.field(keyname)[:, keyindex]\r\n except AttributeError:\r\n raise KeyError(key)\r\n else:\r\n try:\r\n return self.hdu.data.field(key)\r\n except AttributeError:\r\n raise KeyError(key)\r\n\r\n raise TypeError\r\n\r\n def __setitem__(self, key, val):\r\n \"\"\"\r\n set values of an LDAC table\r\n\r\n a['Xpos'] = b # sets the key 'Xpos' in the table 'a' to the\r\n # values in numpy array 'b'. If the key does\r\n # not yet exist, it is created.\r\n \"\"\"\r\n # VERY uncomplete implementation for the moment!\r\n # - we only treat scalars for the moment!\r\n # - we do not check whether the key types match\r\n # when an existing key is overwritten\r\n\r\n # sanity checks: the column name must be a string and\r\n # the value arrays length must match the table data\r\n # dimension:\r\n if type(key) == type(\"a\"):\r\n # The first condition applies to an empty table:\r\n if self.hdu.data is None or len(val) == self.hdu.data.size:\r\n # If necessary add a new column to the table\r\n if self.__contains__(key) == True:\r\n # quite some things might go wrong here\r\n # (same data type, etc.)\r\n self.hdu.data.field(key)[:] = val\r\n else:\r\n # determine format for the new column:\r\n colformat = \"\"\r\n if np.issubdtype(val.dtype, float) == True:\r\n colformat = \"1E\"\r\n\r\n if np.issubdtype(val.dtype, np.float64) == True:\r\n colformat = \"1D\"\r\n\r\n if np.issubdtype(val.dtype, int) == True:\r\n colformat = \"1I\"\r\n\r\n if np.issubdtype(val.dtype, np.string_) == True or \\\r\n np.issubdtype(val.dtype, np.unicode_):\r\n string_length = val.itemsize\r\n colformat = \"%dA\" % (string_length)\r\n\r\n # now create the new column and create a 'new' table\r\n # with the old plus the new column (I did not find a\r\n # way to just append a new column to an existing\r\n # table!):\r\n newcolumn = aif.Column(name=key, format=colformat,\r\n array=val)\r\n\r\n # If you want the new columns appended at the top of the file (e.g an ID) then do it like this\r\n #self.hdu.columns = aif.ColDefs([newcolumn]) + self.hdu.columns\r\n # If you want the new columns appended at the bottom of the file then do it like this\r\n self.hdu.columns += aif.ColDefs([newcolumn]) \r\n self._key_comments[key] = \"\"\r\n self.update = 1\r\n\r\n #raise NotImplementedError\r\n\r\n def __delitem__(self, key):\r\n if self.__contains__(key):\r\n self.hdu.columns.del_col(key)\r\n del self._key_comments[key]\r\n del self._key_units[key]\r\n self.update = 1\r\n\r\n\r\n def __add__(self, b):\r\n \"\"\"\r\n Appends table b to table a and returns a new LDAC table.\r\n Tables 'a' and 'b' must be identical keys and key types.\r\n\r\n >>> c = a + b # appends table b to a and saves it\r\n # as a LDAC table again\r\n \"\"\"\r\n # First check if both tables have the same number of\r\n # columns:\r\n if len(list(self.keys())) != len(list(b.keys())):\r\n print(\"Tables do not have the same number of columns / keywords!\")\r\n print(\"First table has \" + str(len(list(self.keys()))) + \\\r\n \" colums / keywords.\")\r\n print(\"Second table has \" + str(len(list(b.keys()))) + \\\r\n \" colums / keywords.\")\r\n return None\r\n\r\n # Now let's check if all keywords from the first table are also\r\n # present in the second table and also at the same place!\r\n for i in range(len(list(self.keys()))):\r\n if (list(self.keys())[i] in b) == False:\r\n print(\"Key \" + str(list(self.keys())[i]) + \\\r\n \" is not present in the second table!\")\r\n return None\r\n\r\n selfrows = self.hdu.data.shape[0]\r\n brows = b.hdu.data.shape[0]\r\n nrows = selfrows + brows\r\n hdu = aif.BinTableHDU.from_columns(self.hdu.columns, nrows=nrows)\r\n hdu = self-__correct_header(hdu)\r\n\r\n for i in list(self.keys()):\r\n hdu.data.field(i)[:selfrows] = self.hdu.data.field(i)\r\n hdu.data.field(i)[selfrows:] = b.hdu.data.field(i)\r\n\r\n hdu.header = self.hdu.header\r\n hdu.header['NAXIS2'] = nrows\r\n hdu.columns = self.hdu.columns\r\n hdu.name = self.hdu.name\r\n\r\n return LDACTable(hdu)\r\n\r\n def __correct_header(self, hdu):\r\n \"\"\"\r\n 'corrects' the header for a newly created binary table.\r\n key comments are not standardized and hence astropy deleted\r\n 'non-standardized' comments. We add here comments with the\r\n TCOMM convention as as comments of the TTYPE keyword.\r\n\r\n input:\r\n - hdu: the HDU of a newly created binary FITS table from\r\n BinTableHDU\r\n return:\r\n The original HDU with the 'corrected' header.\r\n \"\"\"\r\n\r\n for i in range(len(hdu.columns.names)):\r\n name = hdu.header.get('TTYPE%d' % (i + 1))\r\n\r\n if name in self._key_comments and \\\r\n len(self._key_comments[name]) > 0:\r\n hdu.header.comments['TTYPE%d' % (i + 1)] = \\\r\n self._key_comments[name]\r\n hdu.header.set('TCOMM%d' % (i + 1), self._key_comments[name],\r\n after='TTYPE%d' % (i + 1))\r\n\r\n if name in self._key_units and \\\r\n len(self._key_units[name]) > 0:\r\n hdu.header.set('TUNIT%d' % (i + 1), self._key_units[name],\r\n after='TTYPE%d' % (i + 1))\r\n \r\n if name in self._key_ucd and \\\r\n len(self._key_ucd[name]) > 0:\r\n hdu.header.set('TUCD%d' % (i + 1), self._key_ucd[name],\r\n after='TTYPE%d' % (i + 1))\r\n\r\n return hdu\r\n\r\n def _update(self):\r\n # update the table if necessary:\r\n newtabhdu = aif.BinTableHDU.from_columns(self.hdu.columns)\r\n newtabhdu = self.__correct_header(newtabhdu)\r\n newtabhdu.name = self.hdu.name\r\n self.hdu = newtabhdu\r\n self.update = 0\r\n\r\n def get_comment(self, key):\r\n \"\"\"\r\n return the comment of a key\r\n\r\n >>> tab.get_comment('x') # returns comment from key 'x' in table\r\n # tab as a string\r\n \"\"\"\r\n\r\n if key in self._key_comments[key]:\r\n return self._key_comments[key]\r\n else:\r\n return \"\"\r\n\r\n def set_comment(self, key, comm):\r\n \"\"\"\r\n set comment of a key\r\n\r\n >>> tab.set_comment('x', 'x position')\r\n\r\n \"\"\"\r\n\r\n self._key_comments[key] = comm\r\n\r\n def set_unit(self, key, unit):\r\n \"\"\"\r\n set the unit of a key\r\n\r\n >>> tab.set_unit('x', 'pix')\r\n \"\"\"\r\n\r\n self._key_units[key] = unit\r\n \r\n def set_ucd(self, key, ucd):\r\n \"\"\"\r\n set the UCD of a key\r\n\r\n >>> tab.set_UCD('x', 'meta.code')\r\n \"\"\"\r\n\r\n self._key_ucd[key] = ucd\r\n\r\n\r\n def keys(self):\r\n \"\"\"\r\n returns the names of the keys contained in this table\r\n\r\n >>> b = a.keys() # store a list of keynames of table 'a' in\r\n # 'b'.\r\n \"\"\"\r\n\r\n if self.update == 1:\r\n self._update()\r\n\r\n return self.hdu.columns.names\r\n\r\n def __iter__(self):\r\n if self.update == 1:\r\n self._update()\r\n\r\n return self.hdu.data.__iter__()\r\n\r\n def __contains__(self, item):\r\n if self.update == 1:\r\n self._update()\r\n\r\n return item in list(self.keys())\r\n\r\n def has_key(self, key):\r\n \"\"\"\r\n tests whether the table contains a certain key.\r\n\r\n >>> b = a.haskey('Xpos') # returns 'True' if table 'a' contains\r\n # a key with name 'Xpos'.\r\n \"\"\"\r\n\r\n return self.__contains__(key)\r\n\r\n def filter(self, mask):\r\n if self.update == 1:\r\n self._update()\r\n\r\n newtable = aif.BinTableHDU(data=self.hdu.data[mask],\r\n header=self.hdu.header)\r\n newtable = self.__correct_header(newtable)\r\n\r\n return LDACTable(newtable)\r\n\r\n def setname(self, name):\r\n \"\"\"\r\n set/change the name of the LDAC table.\r\n\r\n >>> a.setname('TESTTABLE') # set/change the name of the LDAC table\r\n # in 'a' to 'TESTTABLE'.\r\n \"\"\"\r\n\r\n self.hdu.name = name\r\n\r\n def saveas(self, filename, checksum=False, overwrite=False):\r\n \"\"\"\r\n save the LDAC table as a catalogue. The produced\r\n catalogue will only consist of this table!\r\n\r\n overwrite=True overwrites an existing file with the\r\n new catalogue\r\n\r\n >>> a.saveas('table.cat') # saves the LDAC table in 'a'\r\n # to file 'table.cat'\r\n \"\"\"\r\n\r\n if self.update == 1:\r\n self._update()\r\n\r\n self.hdu.writeto(filename, checksum=checksum, overwrite=overwrite)\r\n\r\n\r\ndef openObjects(hdulist, table='OBJECTS'):\r\n tablehdu = None\r\n for hdu in hdulist:\r\n # In a regular LDAC catalogue the primary header\r\n # does not have an EXTNAME keyword and 'hdu.header['EXTNAME']'\r\n # leads to a KeyError exception which we just ignore:\r\n try:\r\n if table == hdu.header['EXTNAME']:\r\n tablehdu = hdu\r\n except KeyError:\r\n pass\r\n\r\n if tablehdu is None:\r\n print(\"Table %s not present in catalogue %s\" % (table,\r\n hdulist.filename()))\r\n print(\"Creating an empty LDAC table\")\r\n\r\n return LDACTable(tablehdu)\r\n\r\ndef openObjectFile(filename, table='OBJECTS'):\r\n hdulist = aif.open(filename)\r\n if hdulist is None:\r\n return None\r\n\r\n return openObjects(hdulist, table)\r\n", "#!/usr/bin/env python\n# From Hendrik\nimport math, string, sys, os\n\nimport scipy\nimport scipy.integrate\n\ndef norm(k_vec): # the norm of a 3d vector\n return math.sqrt(k_vec[0]**2+k_vec[1]**2+k_vec[2]**2)\n\ndef W_k(k_vec): # the Fourier transform of the survey volume\n a=k_vec[0]*l[0]/2\n b=k_vec[1]*l[1]/2\n c=k_vec[2]**2*l[2]**2/2\n return exp(-c)*math.sin(a)/a*math.sin(b)/b\n\ndef f_k(k,R): # the Fourier transform of a spherical top-hat with radius R\n y=R*k\n return 3/y**3*(math.sin(y)-y*math.cos(y))\n\nclass Cosmology:\n \"\"\"This class computes various cosmological quantities like comoving,\n angular diameter, luminosity distance, lookback time etc.. Distance\n definitions are from Hogg 1999, astro-ph/9905116.\n \"\"\"\n \n def __init__(self, omega_m=0.27, omega_l=0.73, h=0.7, Gamma=0.2, n_s=1.0, sigma_8=0.81):\n self.omega_m = omega_m\n self.omega_l = omega_l\n self.omega_k = 1. - self.omega_m - self.omega_l\n self.h = h\n self.c = 2.99792458E8 # speed of light in m/s\n self.pc = 3.085678E16 # parsec in metres\n self.G = 6.673E-11 # Gravitational constant\n self.M_sun = 1.98892E30 # solar mass in kg\n self.H_0 = self.h * 100. * 1.E3 / 1.E6 / self.pc # Hubble constant in SI units\n self.dh = 3000./self.h # Hubble distance (Hogg eq. 4) in Mpc.\n self.th = 9.78e9/self.h # Hubble time in years\n self.th_sec = 3.09e17/self.h # Hubble time in seconds\n self.Gamma=Gamma # should be calculated by gamma=omega_m*h*exp(-omega_b*(1 + sqrt(2*h)/omega_m))\n self.n_s=n_s\n self.sigma_8=sigma_8\n self.norm_int=1/(2*math.pi)**3 * 4*math.pi * scipy.integrate.quad(lambda k: k**2*self.P_L(k)*f_k(k,8.0)**2, 0, scipy.Inf)[0]\n self.A=self.sigma_8**2/self.norm_int\n self.ro_0=2.77786E11 # critical density in M_sun/Mpc**3\n self.dlnsigma_dlnM=(math.log(self.sigma_M(10.**15))-math.log(self.sigma_M(10.**5)))/(math.log(15)-math.log(5))\n return\n \n\n def Ez(self, z):\n \"\"\"E(z) function of Hogg's equation 14\"\"\"\n e = math.sqrt(self.omega_m*(1+z)**3 + self.omega_k*(1+z)**2 \\\n + self.omega_l)\n return e\n \n\n def ooEz(self, z):\n \"\"\"Returns 1/E(z), E(z) being Hogg's eq. 14.\"\"\"\n return 1./self.Ez(z)\n\n\n def ooEzopz(self, z):\n \"\"\"Returns 1/(E(z)*(1+z)), E(z) being Hogg's eq. 14.\"\"\"\n return 1./(self.Ez(z)*(1+z))\n\n \n def dcom_los(self, z1, z2):\n \"\"\"Returns the line of sight comoving distance between objects at\n redshifts z1 and z2, z2>z1. Value is in Mpc/h\"\"\"\n if z1>=z2:\n print(\"z2 must be greater than z1\")\n return -1\n dclos = self.dh * scipy.integrate.quad(self.ooEz, z1, z2)[0]\n return dclos\n\n def dcom_tra(self, z1, z2):\n \"\"\"Returns the transverse comoving distance (proper motion distance)\n between objects at redshift z1 and z2.\"\"\"\n dcl = self.dcom_los(z1, z2)\n if self.omega_k == 0.0:\n dct = dcl\n elif self.omega_k > 0:\n dct = self.dh / math.sqrt(self.omega_k) \\\n * math.sinh(math.sqrt(self.omega_k)*dcl/self.dh)\n else:\n dct = self.dh / math.sqrt(math.fabs(self.omega_k)) \\\n * math.sin(math.sqrt(math.fabs(self.omega_k))*dcl/self.dh)\n return dct\n\n\n def dang(self, z1, z2):\n \"\"\"Returns the angular diameter distance between objects at\n redshift z1 and z2.\"\"\"\n dct = self.dcom_tra(z1, z2)\n return dct/(1+z2)\n\n\n def dlum(self, z1, z2):\n \"\"\"Returns the luminosity distance between objects at\n redshift z1 and z2.\n\n WARNING! WARNING! \n This function is untested for z1>0!\n WARNING! WARNING! \n \"\"\"\n dct = self.dcom_tra(z1, z2)\n return (1+z2)/(1+z1) * dct\n\n\n def covol(self, z):\n \"\"\"Returns the comoving volume element d V_c in a solid angle\n d Omaga at redshift z.\"\"\"\n da = self.dang(0, z)\n return self.dh * (1+z)**2 * da**2 / self.Ez(z)\n\n def tlook(self, z):\n \"\"\"This function returns the lookback time in units of the\n Hubble time. The Hubble time can be accessed as the attributes\n th (in years) or th_sec (in seconds).\"\"\"\n tl = scipy.integrate.quad(self.ooEzopz, 0, z)[0]\n return tl\n\n def DM(self, z1, z2):\n \"\"\"Returns the distance modulus between objects at\n redshift z1 and z2.\n \"\"\"\n x=self.dlum(z1,z2)\n return 5*math.log(x/1.e-5)/math.log(10)\n\n def rho_crit(self, z1):\n \"\"\"Returns the critical density at z1 in SI units.\n \"\"\"\n return 3*(self.Ez(z1)*self.H_0)**2/(8*math.pi*self.G)\n\n def Sigma_crit(self, z1, z2):\n \"\"\"Returns the critical surface mass density for lenses at z1 and sources at z2 in SI units.\n \"\"\"\n return self.c**2/(4*math.pi*self.G)*self.dang(0.,z2)/(self.dang(0.,z1)*self.dang(z1,z2))/(1.E6*self.pc)*self.h\n\n ########## Power spectrum and mass function #############\n\n def T_k(self, k): # the Transfer function\n q=k/self.Gamma\n T=math.log(1+2.34*q)/(2.34*q)*(1+3.89*q+(16.1*q)**2+(5.46*q)**3+(6.71*q)**4)**(-0.25)\n return T\n\n def H_sqd(self, a1): # the Hubble parameter\n H=(100.*self.h)**2*(self.omega_m/(a1**3)+self.omega_l)\n return H\n\n def D_plus(self, a2): # the growth factor\n def func(x):\n return 1/(self.omega_m/x+self.omega_l*x**2)**1.5\n integral=scipy.integrate.quad(func,0,a2)\n integral_0=scipy.integrate.quad(func,0,1)\n D_a=math.sqrt(self.H_sqd(a2))/100.*integral[0]\n D_0=math.sqrt(self.H_sqd(1))/100.*integral_0[0]\n return D_a/D_0\n \n def D_plus2(self, a2): # the growth factor\n om = self.omega_m/(a2+self.omega_m*(1.-a2)+self.omega_l*a2*(a2*a2-1.))\n ol = self.omega_l*a2*a2*a2/(a2+self.omega_m*(1.-a2)+self.omega_l*a2*(a2*a2-1.))\n g1 = 5./2.*self.omega_m/(self.omega_m**(4./7.)-self.omega_l+(1+self.omega_m/2.0)*(1.0+self.omega_l/70.0))\n g = 5./2.*om/(om**(4./7.)-ol+(1+om/2.0)*(1.0+ol/70.0))\n return a2*g/g1\n \n def P_L(self, k): # the linear CDM power spectrum\n P=self.T_k(k)**2*k**self.n_s\n return P\n\n def P_L_norm(self, k): # the normalised, linear CDM power spectrum\n P=self.A*self.T_k(k)**2*k**self.n_s\n return P\n\n def P_L_norm_z(self, k, z): # the normalised, linear CDM power spectrum\n P=self.A*self.T_k(k)**2*k**self.n_s*self.D_plus(1/(1+z))\n return P\n\n def d_ln_P_L_norm(self, k): # derivative of the normalised, linear CDM power spectrum\n P=(math.log(self.P_L_norm(k+k/1000.))-math.log(self.P_L_norm(k-k/1000.)))/(math.log(k+k/1000.)-math.log(k-k/1000.))\n return P\n\n def d_ln_P_L_norm_z(self, k,z): # derivative of the normalised, linear CDM power spectrum\n P=(math.log(self.P_L_norm_z(k+k/1000.,z))-math.log(self.P_L_norm_z(k-k/1000.,z)))/(math.log(k+k/1000.)-math.log(k-k/1000.))\n return P\n\n def Delta_sq_L_norm(self, k): # the normalised, linear, dimensionless CDM power spectrum\n P=self.A*self.T_k(k)**2*k**self.n_s*k**3/(2*math.pi**2)\n return P\n\n def Delta_sq_L_norm_z(self, k,z): # the normalised, linear, dimensionless CDM power spectrum\n P=self.A*self.T_k(k)**2*k**self.n_s*k**3/(2*math.pi**2)*self.D_plus(1/(1+z))\n return P\n\n def sigma_M(self, M):\n def func(k,R):\n return k**2*self.P_L_norm(k)*f_k(k,R)\n R=(M/self.ro_0*3/4/math.pi)**(1/3.)\n integrand=scipy.integrate.quad(func, 0, scipy.Inf, args=(R), limit=50000)[0]\n return R #1/(2*math.pi**2)*integrand\n\n def Jenkins(self, M):\n return 0.315*self.ro_0/M**2*self.dlnsigma_dlnM*math.exp(-math.sqrt((0.61-math.log(self.sigma_M(M)))**2)**3.8)\n\n def f96(self, x, n_eff): # Peacock and Dodds 1996 fitting formula\n A_c=0.482*(1.+n_eff/3.)**(-0.947)\n B_c=0.226*(1.+n_eff/3.)**(-1.778)\n alpha_c=3.310*(1.+n_eff/3.)**(-0.244)\n beta_c=0.862*(1.+n_eff/3.)**(-0.287)\n V_c=11.55*(1.+n_eff/3.)**(-0.423)\n g=5./2.*self.omega_m*(self.omega_m**(4./7.)-self.omega_l+(1+self.omega_m/2)*(1+self.omega_l/70))**(-1)\n return x*((1+B_c*beta_c*x+(A_c*x)**(alpha_c*beta_c))/(1+((A_c*x)**alpha_c*g**3/(V_c*x**0.5))**beta_c))**(1/beta_c)\n\n def Delta_sq_NL_PD96_norm(self, k_L): # the normalised, non-linear, dimensionless CDM power spectrum from Peacock and Dodds 1996\n n_eff=self.d_ln_P_L_norm(k_L/2.)\n return self.f96(self.Delta_sq_L_norm(k_L), n_eff)\n\n def Delta_sq_NL_PD96_norm_z(self, k_L,z): # the normalised, non-linear, dimensionless CDM power spectrum from Peacock and Dodds 1996\n n_eff=self.d_ln_P_L_norm_z(k_L/2.,z)\n return self.f96(self.Delta_sq_L_norm_z(k_L,z), n_eff)\n\n def P_NL_PD96_norm(self, k): # the normalised, non-linear CDM power spectrum from Peacock and Dodds 1996\n return self.Delta_sq_NL_PD96_norm(k)*((k/self.k_L_over_k_NL_PD96(self.Delta_sq_NL_PD96_norm(k)))**3/(2*math.pi**2))**(-1)\n\n def P_NL_PD96_norm_z(self, k, z): # the normalised, non-linear CDM power spectrum from Peacock and Dodds 1996\n return self.Delta_sq_NL_PD96_norm_z(k,z)*((k/self.k_L_over_k_NL_PD96(self.Delta_sq_NL_PD96_norm_z(k,z)))**3/(2*math.pi**2))**(-1)\n\n def k_L_over_k_NL_PD96(self, Delta):\n return (1+Delta)**(-1./3.)\n\n", "import numpy as np\nfilename=\"Summary_multiplicative_Fid_unblinded.npy\"\nm=np.load(filename)[:,1]" ]
[ [ "numpy.issubdtype" ], [ "scipy.integrate.quad" ], [ "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cropsinsilico/yggdrasil
[ "466a4f77605a6f461d57ef7b165a6db7eec4d1fd", "466a4f77605a6f461d57ef7b165a6db7eec4d1fd" ]
[ "scripts/test_wofost.py", "yggdrasil/serialize/PandasSerialize.py" ]
[ "import numpy as np\nimport pprint\nfrom yggdrasil import units\nfrom yggdrasil.communication.NetCDFFileComm import NetCDFFileComm\nfrom yggdrasil.serialize.WOFOSTParamSerialize import WOFOSTParamSerialize\n\nfname_param = 'cropfile_example.cab'\n# fname_netcdf = 'simple.nc'\nfname_netcdf = 'test.nc'\n\n# Test serializer\nwith open(fname_param, 'rb') as fd:\n contents = fd.read()\ninst = WOFOSTParamSerialize()\nx = inst.deserialize(contents)\n\n\n# Test comm\ndata = {\n 'time': units.add_units(np.arange(10).astype('float32'), 's'),\n 'x': np.array(['a', 'hello', 'c'], 'S5'),\n 'space': units.add_units(np.ones((5, 5), 'int64'), 'mol')}\n# out_file = NetCDFFileComm('test_send', address=fname_netcdf, direction='send')\n# assert(out_file.send(data))\n\nin_file = NetCDFFileComm('test_recv', address=fname_netcdf, direction='recv')\nflag, data_recv = in_file.recv()\nassert(flag)\nassert(data_recv == data)\n\npprint.pprint(data)\n\nwith open(fname_netcdf, 'rb') as fd:\n print(fd.read())\n", "import pandas\nimport copy\nimport numpy as np\nimport warnings\nimport io as sio\nfrom yggdrasil import platform, serialize\nfrom yggdrasil.metaschema.datatypes.JSONArrayMetaschemaType import (\n JSONArrayMetaschemaType)\nfrom yggdrasil.serialize.AsciiTableSerialize import AsciiTableSerialize\nfrom yggdrasil.communication.transforms.PandasTransform import PandasTransform\n\n\nclass PandasSerialize(AsciiTableSerialize):\n r\"\"\"Class for serializing/deserializing Pandas data frames.\n\n Args:\n no_header (bool, optional): If True, headers will not be read or\n serialized from/to tables. Defaults to False.\n str_as_bytes (bool, optional): If True, strings in columns are\n read as bytes. Defaults to False.\n\n \"\"\"\n\n _seritype = 'pandas'\n _schema_subtype_description = ('Serializes tables using the pandas package.')\n _schema_properties = {'no_header': {'type': 'boolean',\n 'default': False},\n 'str_as_bytes': {'type': 'boolean',\n 'default': False}}\n _schema_excluded_from_inherit = ['as_array']\n default_read_meth = 'read'\n as_array = True\n concats_as_str = False\n # has_header = False\n\n def __init__(self, *args, **kwargs):\n self.write_header_once = False\n self.dont_write_header = kwargs.pop('dont_write_header',\n kwargs.get('no_header', False))\n return super(PandasSerialize, self).__init__(*args, **kwargs)\n\n @property\n def empty_msg(self):\n r\"\"\"obj: Object indicating empty message.\"\"\"\n if self.numpy_dtype:\n return pandas.DataFrame(np.zeros(0, self.numpy_dtype))\n else:\n return pandas.DataFrame(columns=self.get_field_names())\n\n def get_field_names(self, *args, **kwargs):\n r\"\"\"Get the field names for an array of fields.\n\n Args:\n *args: Arguments are passed to the parent class's method.\n **kwargs: Keyword arguments are passed to the parent class's\n method.\n\n Returns:\n list: Names for each field in the data type.\n\n \"\"\"\n if self.no_header:\n return None\n return super(PandasSerialize, self).get_field_names(*args, **kwargs)\n \n @classmethod\n def apply_field_names(cls, frame, field_names=None):\n r\"\"\"Apply field names as columns to a frame, first checking for a mapping.\n If there is a direct mapping, the columns are reordered to match the order\n of the field names. If there is not an overlap in the field names and\n columns, a one-to-one mapping is assumed, but a warning is issued. If there\n is a partial overlap, an error is raised.\n\n Args:\n frame (pandas.DataFrame): Frame to apply field names to as columns.\n field_names (list, optional): New field names that should be applied.\n If not provided, the original frame will be returned unaltered.\n\n Returns:\n pandas.DataFrame: Frame with updated field names.\n\n Raises:\n RuntimeError: If there is a partial overlap between the field names\n and columns.\n\n \"\"\"\n if field_names is None:\n return frame\n cols = frame.columns.tolist()\n if len(field_names) != len(cols):\n raise RuntimeError((\"Number of field names (%d) doesn't match \"\n + \"number of columns in data frame (%d).\")\n % (len(field_names), len(cols)))\n # Check for missing fields\n fmiss = []\n for f in field_names:\n if f not in cols:\n fmiss.append(f)\n if fmiss:\n if len(fmiss) == len(field_names):\n warnings.warn(\"Assuming direct mapping of field names to columns. \"\n + \"This may not be correct.\")\n frame.columns = field_names\n else:\n # Partial overlap\n raise RuntimeError(\"%d fields (%s) missing from frame: %s\"\n % (len(fmiss), str(fmiss), str(frame)))\n else:\n # Reorder columns\n frame = frame[field_names]\n return frame\n\n def cformat2nptype(self, *args, **kwargs):\n r\"\"\"Method to convert c format string to numpy data type.\n\n Args:\n *args: Arguments are passed to serialize.cformat2nptype.\n **kwargs: Keyword arguments are passed to serialize.cformat2nptype.\n\n Returns:\n np.dtype: Corresponding numpy data type.\n\n \"\"\"\n out = super(PandasSerialize, self).cformat2nptype(*args, **kwargs)\n if (out.char == 'S') and (not self.str_as_bytes):\n out = np.dtype('U%d' % out.itemsize)\n return out\n \n def func_serialize(self, args):\n r\"\"\"Serialize a message.\n\n Args:\n args (obj): Python object to be serialized.\n\n Returns:\n bytes, str: Serialized message.\n\n \"\"\"\n if not isinstance(args, pandas.DataFrame):\n raise TypeError((\"Pandas DataFrame required. Invalid type \"\n + \"of '%s' provided.\") % type(args))\n fd = sio.StringIO()\n # For Python 3 and higher, bytes need to be encoded\n args_ = copy.deepcopy(args)\n for c in args.columns:\n if isinstance(args_[c][0], bytes):\n args_[c] = args_[c].apply(lambda s: s.decode('utf-8'))\n if (self.field_names is None) and (not self.no_header):\n self.field_names = self.get_field_names()\n args_ = self.apply_field_names(args_, self.field_names)\n if not self.no_header:\n cols = args_.columns.tolist()\n if cols == list(range(len(cols))):\n args_ = self.apply_field_names(args_, ['f%d' % i for i in\n range(len(cols))])\n args_.to_csv(fd, index=False,\n # Not in pandas <0.24\n # line_terminator=self.newline.decode(\"utf-8\"),\n sep=self.delimiter.decode(\"utf-8\"),\n mode='w', encoding='utf8',\n header=(not self.dont_write_header))\n if self.write_header_once:\n self.dont_write_header = True\n out = fd.getvalue()\n fd.close()\n # Required to change out \\r\\n for \\n on windows\n out = out.encode(\"utf-8\")\n out = out.replace(platform._newline, self.newline)\n return out\n\n def func_deserialize(self, msg):\n r\"\"\"Deserialize a message.\n\n Args:\n msg (str, bytes): Message to be deserialized.\n\n Returns:\n obj: Deserialized Python object.\n\n \"\"\"\n fd = sio.BytesIO(msg)\n names = None\n dtype = None\n if self.initialized:\n np_dtype = self.numpy_dtype\n dtype = {}\n if self.no_header:\n dtype_names = range(len(np_dtype.names))\n else:\n dtype_names = np_dtype.names\n for n in dtype_names:\n if np_dtype[n].char == 'U':\n dtype[n] = object\n else:\n dtype[n] = np_dtype[n]\n kws = dict(sep=self.delimiter.decode(\"utf-8\"),\n names=names,\n dtype=dtype,\n encoding='utf8',\n skipinitialspace=True)\n if self.no_header:\n kws['header'] = None\n out = pandas.read_csv(fd, **kws)\n out = out.dropna(axis='columns', how='all')\n fd.close()\n if self.str_as_bytes:\n # Make sure strings are bytes\n for c, d in zip(out.columns, out.dtypes):\n if (d == object) and isinstance(out[c][0], str):\n out[c] = out[c].apply(lambda s: s.encode('utf-8'))\n # On windows, long != longlong and longlong requires special cformat\n # For now, long will be used to preserve the use of %ld to match long\n if platform._is_win: # pragma: windows\n if np.dtype('longlong').itemsize == 8:\n new_dtypes = dict()\n for c, d in zip(out.columns, out.dtypes):\n if d == np.dtype('longlong'):\n new_dtypes[c] = np.int32\n else:\n new_dtypes[c] = d\n out = out.astype(new_dtypes, copy=False)\n # Reorder if necessary\n out = self.apply_field_names(out, self.get_field_names())\n if dtype is not None:\n out = out.astype(dtype, copy=False)\n if (self.field_names is None) and (not self.no_header):\n self.field_names = out.columns.tolist()\n if not self.initialized:\n typedef = JSONArrayMetaschemaType.encode_type(out)\n self.update_serializer(extract=True, **typedef)\n return out\n\n @property\n def send_converter(self):\n kws = {}\n field_names = self.get_field_names()\n if field_names is not None:\n kws['field_names'] = field_names\n return PandasTransform(**kws)\n\n @classmethod\n def object2dict(cls, obj, **kwargs):\n r\"\"\"Convert a message object into a dictionary.\n\n Args:\n obj (object): Object that would be serialized by this class and\n should be returned in a dictionary form.\n **kwargs: Additional keyword arguments are ignored.\n\n Returns:\n dict: Dictionary version of the provided object.\n\n \"\"\"\n if isinstance(obj, pandas.DataFrame):\n return serialize.pandas2dict(obj)\n return super(PandasSerialize, cls).object2dict(obj, as_array=True,\n **kwargs)\n\n @classmethod\n def object2array(cls, obj, **kwargs):\n r\"\"\"Convert a message object into an array.\n\n Args:\n obj (object): Object that would be serialized by this class and\n should be returned in an array form.\n **kwargs: Additional keyword arguments are ignored.\n\n Returns:\n np.array: Array version of the provided object.\n\n \"\"\"\n if isinstance(obj, pandas.DataFrame):\n return serialize.pandas2numpy(obj)\n return super(PandasSerialize, cls).object2array(obj, as_array=True,\n **kwargs)\n\n @classmethod\n def concatenate(cls, objects, **kwargs):\n r\"\"\"Concatenate objects to get object that would be recieved if\n the concatenated serialization were deserialized.\n\n Args:\n objects (list): Objects to be concatenated.\n **kwargs: Additional keyword arguments are ignored.\n\n Returns:\n list: Set of objects that results from concatenating those provided.\n\n \"\"\"\n if len(objects) == 0:\n return []\n if isinstance(objects[0], pandas.DataFrame):\n field_names = objects[0].columns.tolist()\n for i in range(1, len(objects)):\n objects[i] = cls.apply_field_names(objects[i],\n field_names)\n return [pandas.concat(objects, ignore_index=True)]\n out = super(PandasSerialize, cls).concatenate(objects, as_array=True,\n **kwargs)\n return out\n \n def consolidate_array(self, out):\n r\"\"\"Consolidate message into a structure numpy array if possible.\n\n Args:\n out (list, tuple, np.ndarray): Object to consolidate into a\n structured numpy array.\n\n Returns:\n np.ndarray: Structured numpy array containing consolidated message.\n\n Raises:\n ValueError: If the array cannot be consolidated.\n\n \"\"\"\n if isinstance(out, pandas.DataFrame):\n out = serialize.pandas2numpy(out)\n return super(PandasSerialize, self).consolidate_array(out)\n \n @classmethod\n def get_testing_options(cls, not_as_frames=False, no_names=False,\n no_header=False, **kwargs):\n r\"\"\"Method to return a dictionary of testing options for this class.\n\n Args:\n not_as_frames (bool, optional): If True, the returned example\n includes data that is not in a pandas data frame. Defaults to\n False.\n no_names (bool, optional): If True, an example is returned where the\n names are not provided to the deserializer. Defaults to False.\n no_header (bool, optional): If True, an example is returned\n where a header is not included. Defaults to False.\n\n Returns:\n dict: Dictionary of variables to use for testing.\n\n \"\"\"\n kwargs.setdefault('table_string_type', 'string')\n field_names = None\n out = super(PandasSerialize, cls).get_testing_options(array_columns=True,\n **kwargs)\n if kwargs['table_string_type'] == 'bytes':\n out['kwargs']['str_as_bytes'] = True\n for k in ['as_array']: # , 'format_str']:\n if k in out['kwargs']:\n del out['kwargs'][k]\n out['extra_kwargs'] = {}\n if no_names:\n for x in [out['kwargs'], out]:\n if 'field_names' in x:\n del x['field_names']\n header_line = b'f0\\tf1\\tf2\\n'\n elif no_header:\n for x in [out['kwargs'], out]:\n if 'field_names' in x:\n del x['field_names']\n header_line = b''\n out['kwargs']['no_header'] = True\n for x in out['typedef']['items']:\n x.pop('title', None)\n else:\n if 'field_names' in out['kwargs']:\n field_names = out['kwargs']['field_names']\n header_line = b'name\\tcount\\tsize\\n'\n out['contents'] = (header_line\n + b'one\\t1\\t1.0\\n'\n + b'two\\t2\\t2.0\\n'\n + b'three\\t3\\t3.0\\n'\n + b'one\\t1\\t1.0\\n'\n + b'two\\t2\\t2.0\\n'\n + b'three\\t3\\t3.0\\n')\n out['concatenate'] = [([], [])]\n if not_as_frames:\n pass\n elif no_header:\n out['objects'] = [serialize.list2pandas(x) for x in out['objects']]\n out['dtype'] = np.dtype(','.join([x[1] for x in out['dtype'].descr]))\n else:\n if field_names is None:\n field_names = ['f0', 'f1', 'f2']\n out['objects'] = [serialize.list2pandas(x, names=field_names)\n for x in out['objects']]\n out['kwargs']['datatype'] = copy.deepcopy(out['typedef'])\n if no_names:\n for x in out['kwargs']['datatype']['items']:\n x.pop('title', None)\n out['empty'] = pandas.DataFrame(np.zeros(0, out['dtype']))\n return out\n\n def enable_file_header(self):\n r\"\"\"Set serializer attributes to enable file headers to be included in\n the serializations.\"\"\"\n self.dont_write_header = False\n self.write_header_once = True\n\n def disable_file_header(self):\n r\"\"\"Set serializer attributes to disable file headers from being\n included in the serializations.\"\"\"\n self.dont_write_header = True\n self.write_header_once = True\n \n def serialize_file_header(self):\n r\"\"\"Return the serialized header information that should be prepended\n to files serialized using this class.\n\n Returns:\n bytes: Header string that should be written to the file.\n\n \"\"\"\n return b''\n\n def deserialize_file_header(self, fd):\n r\"\"\"Deserialize the header information from the file and update the\n serializer.\n\n Args:\n fd (file): File containing header.\n\n \"\"\"\n pass\n" ]
[ [ "numpy.arange", "numpy.array", "numpy.ones" ], [ "pandas.concat", "pandas.read_csv", "numpy.zeros", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
shawinmihail/binder_test
[ "e1ef050c0631a80d8c23bdcdc25b472ef1e855b8" ]
[ "utils.py" ]
[ "\nimport math\nimport numpy as np\nfrom consts import Consts\n\ndef get_SSO_inclination(a, ecc):\n # The formula for the RAAN drift is taken from D.A. Vallado Fundamentals of\n # Astrodynamics, page 649, eq 9-37\n\n # Inputs: constants, a - semi-major axis [m] and inclination\n # Outputs: SSO orbit inclination [rad]\n\n p = a * (1 - ecc ** 2)\n n = math.sqrt(Consts.muEarth / a ** 3)\n\n inclination = math.acos(-(Consts.EarthMeanMotion * 2 * p ** 2) / (3 * n * Consts.rEarth ** 2 * Consts.J2))\n return inclination\n\n\ndef mean2ecc(M, e):\n # Converts mean anomaly to eccentric anomaly.\n #\n # Inputs:\n # * mean anomaly\n # * eccentricity\n # Output:\n # * eccentric anomaly\n\n E = 0\n # initial guess\n if ((M > -math.pi) and (M < 0)) or (M > math.pi):\n E = M - e\n else:\n E = M + e\n\n # iteration\n tol = 1e-12\n d = -(E - e*math.sin(E) - M)/(1 - e*math.cos(E))\n while abs(d) >= tol:\n E = E + d\n d = -(E - e*math.sin(E) - M)/(1 - e*math.cos(E))\n return E\n\n\ndef oe2rv(oe):\n # Converts orbital elements (units: radians) to ECI state\n #\n # Input:\n # * orbital elements\n # * gravitational parameter\n # * time math.since given mean anomaly [days]\n # Output:\n # * ECI state [km, km/s], column vector\n\n sma = oe[0] # km\n ecc = oe[1] # -\n inc = oe[2] # rad\n RAAN = oe[3] # rad\n AOP = oe[4] # rad\n MA = oe[5] # rad\n\n E = mean2ecc(MA, ecc)\n v = 2 * math.atan(math.sqrt((1 + ecc) / (1 - ecc)) * math.tan(E / 2))\n r = sma * (1 - ecc ** 2) / (1 + ecc * math.cos(v))\n\n r_pqw = r * np.array([math.cos(v), math.sin(v), 0])\n v_pqw = math.sqrt(Consts.muEarth / (sma * (1 - ecc ** 2))) * np.array([-math.sin(v), ecc + math.cos(v), 0])\n\n Rz_O = np.array([[math.cos(RAAN), -math.sin(RAAN), 0], [math.sin(RAAN), math.cos(RAAN), 0], [0, 0, 1]])\n Rx_i = np.array([[1, 0, 0], [0, math.cos(inc), -math.sin(inc)], [0, math.sin(inc), math.cos(inc)]])\n Rz_w = [[math.cos(AOP), -math.sin(AOP), 0], [math.sin(AOP), math.cos(AOP), 0], [0, 0, 1]]\n R = np.matmul(np.matmul(Rz_O, Rx_i), Rz_w)\n\n r_ijk = np.transpose(np.matmul(R, np.transpose(r_pqw)))\n v_ijk = np.transpose(np.matmul(R, v_pqw))\n\n sv = np.concatenate((np.transpose(r_ijk), (np.transpose(v_ijk))))\n return sv\n\n\ndef central_gravity_motion(rv, t):\n\n rv_prime = np.array([rv[3], rv[4], rv[5],\n -Consts.muEarth * rv[0] / (rv[0] ** 2 + rv[1] ** 2 + rv[2] ** 2) ** (3 / 2),\n -Consts.muEarth * rv[1] / (rv[0] ** 2 + rv[1] ** 2 + rv[2] ** 2) ** (3 / 2),\n -Consts.muEarth * rv[2] / (rv[0] ** 2 + rv[1] ** 2 + rv[2] ** 2) ** (3 / 2)])\n\n return rv_prime\n\n\ndef orb_2_eci(rv0, rv, n):\n # x - along track; y - out-of-plane; z - radial\n z_orb = rv0[0:3] / np.linalg.norm(rv0[0:3])\n y_orb = np.cross(rv0[0:3], rv0[3:6])\n y_orb = y_orb / np.linalg.norm(y_orb)\n x_orb = np.cross(y_orb, z_orb)\n\n M = np.column_stack((x_orb, y_orb, z_orb))\n de_eci = np.matmul(M, rv[0:3])\n r_eci = rv0[0:3] + np.matmul(M, rv[0:3])\n v_eci = rv0[3:6] + np.matmul(M, (rv[3:6] + np.cross(np.array([0, n, 0]), rv[0:3])))\n rv_eci = np.concatenate((r_eci, v_eci))\n return rv_eci\n\n\ndef hyll_traj(t, n, A, B, C, D, E):\n nu = n * t\n x = 2 * A * np.cos(nu) - 2 * B * np.sin(nu) + C\n y = D * np.sin(nu) + E * np.cos(nu)\n z = A * np.sin(nu) + B * np.cos(nu)\n\n vx = n * -2 * A * np.sin(nu) - n * 2 * B * np.cos(nu)\n vy = n * D * np.cos(nu) - n * E * np.sin(nu)\n vz = n * A * np.cos(nu) - n * B * np.sin(nu)\n\n return np.column_stack((x, y, z, vx, vy, vz))\n\n\ndef tetrahedron_configuration_1(K, a, b, c):\n A3 = K * np.array([1, -0.5, -0.5])\n B3 = K * np.array([0, -math.sqrt(3)/2, math.sqrt(3)/2])\n C3 = c * np.array([1, 1, 1])\n D3 = a * K * np.array([1, -0.5, -0.5]) + b * K * np.array([0, -math.sqrt(3)/2, math.sqrt(3)/2])\n E3 = -b * K * np.array([1, -0.5, -0.5]) + a * K * np.array([0, -math.sqrt(3)/2, math.sqrt(3)/2])\n return A3, B3, C3, D3, E3\n\n\ndef tetrahedron_volume(r1, r2, r3):\n M = np.column_stack((r1,r2, r3))\n V = 1/6 * np.linalg.det(M)\n return V\n\n\ndef tetrahedron_square_length(r1, r2, r3):\n d1 = r2 - r1\n d2 = r3 - r2\n d3 = r1 - r3\n L = np.dot(r1, r1) + np.dot(r2, r2) + np.dot(r3, r3) + np.dot(d1, d1) + np.dot(d2, d2) + np.dot(d3, d3)\n return L\n\n\ndef tetrahedron_quality(r1, r2, r3):\n V = tetrahedron_volume(r1, r2, r3)\n L = tetrahedron_square_length(r1, r2, r3)\n if(V < 0):\n return 0\n Q = 12 * (3 * V) ** (2/3) / L\n return Q\n\n\n\n" ]
[ [ "numpy.cross", "numpy.dot", "numpy.matmul", "numpy.linalg.norm", "numpy.transpose", "numpy.sin", "numpy.concatenate", "numpy.linalg.det", "numpy.cos", "numpy.column_stack", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dahaj1897/sierpinski-triangle
[ "cea74c425e93e8829c574b5f9665b3c856fc5e58" ]
[ "sierpinski-triangle.py" ]
[ "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 20 16:13:54 2019\n\n@author: sazd3\n\"\"\"\nfrom __future__ import division\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx0=[0.5,-.5,0]\ny0=[0,0,np.sqrt(5)/4]\nx=x0\ny=y0\nnewx=(x[0]+x[1])/2\nnewy=(y[0]+y[1])/2\nn=100000 #number of points\nfor i in range(n):\n p1=np.random.randint(0,3)\n print(p1,x0[p1],y0[p1])\n x.append((x0[p1]+newx)/2)\n y.append((y0[p1]+newy)/2)\n newx=x[-1]\n newy=y[-1]\n \nplt.scatter(x,y,s=.2)\n \n" ]
[ [ "numpy.sqrt", "matplotlib.pyplot.scatter", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
marioantolienzh/Cardiac-Rearrests-Prediction-Using-HRV-and-ML-Algorithms
[ "76b757baf8503c5fc6337b1d33871e04c87c37ed" ]
[ "machine_learning.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 5 14:11:00 2021\n\n@author: laros\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib\nimport matplotlib.pyplot as plt\n# %matplotlib inline\nsns.set(color_codes=True)\nfrom scipy import stats\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict\nfrom sklearn import metrics\nfrom sklearn import datasets\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import tree\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import cross_val_score\n\ndata = pd.read_csv('results1.csv')\ndata1=pd.DataFrame(data)\ndata1[[\"bpm min\", \"bpm max\"]] = data1[[\"bpm min\", \"bpm max\"]].apply(pd.to_numeric)\ndata1[[\"bpm std\", \"sdnn\",\"RMSSD\",\"sdsd\",\"nn50\",\"pnn50\",\"tinn n\",\"tinn m\",\"tinn\"]] = data1[[\"bpm std\", \"sdnn\",\"RMSSD\",\"sdsd\",\"nn50\",\"pnn50\",\"tinn n\",\"tinn m\",\"tinn\"]].apply(pd.to_numeric)\ndata1[[\"tri index\", \"VLF peak\",\"LF peak\",\"HF peak\",\"VLF power\",\"LF power\",\"HF power\",\"fft ratio\"]] = data1[[\"tri index\", \"VLF peak\",\"LF peak\",\"HF peak\",\"VLF power\",\"LF power\",\"HF power\",\"fft ratio\"]].apply(pd.to_numeric)\ndata1[[\"sd1\", \"sd2\",\"sd ratio\",\"ellipse area\",\"Sample entropy\"]] = data1[[\"sd1\", \"sd2\",\"sd ratio\",\"ellipse area\",\"Sample entropy\"]].apply(pd.to_numeric)\n#data1[[\"Type\"]]= data1[[\"Type\"]].apply(pd.to_numeric)\n\ndel data1['ID']\ndel data1['nni counter (sample size)']\n\n#print(data1.info())\n# print(data1.shape)\n\n#Check duplicate rows in data\n# duplicate_rows = data1[data1.duplicated()]\n# print(\"Number of duplicate rows :: \", duplicate_rows.shape)\n\n#remove null values \ndata2 = data1.dropna()\n\n# #Looking for null values\n# print(\"Null values :: \")\n# print(data2.isnull() .sum())\n\n# plt.figure(1)\n# # plt.scatter(data2[\"Sample entropy\"],data2[\"Type\"])\n# # plt.xlabel(\"sdnn\")\n# sns.pairplot(data2, vars= ['sdnn', 'RMSSD','sdsd','pnn50'],hue=(\"Type\"))\n\n# plt.figure(2)\n# sns.pairplot(data2, vars= ['VLF power', 'LF power','HF power'],hue=(\"Type\"))\n\n# plt.figure(3)\n# sns.pairplot(data2, vars= ['sd1', 'sd2','ellipse area'],hue=(\"Type\"))\n\n#correlation\nplt.figure(5)\npearcor = data2.corr(method='pearson')\nspearcor = data2.corr(method='spearman')\ncmap=sns.diverging_palette(20, 220, n=200)\n# cmap = sns.diverging_palette(0,250,150,50,as_cmap=True)\nsns.heatmap(pearcor, vmin=-1, vmax=1, cmap=cmap, linewidth=0.1)\nplt.title(\"Pearson Correlation\")\n\n\nplt.figure(6)\nsns.heatmap(spearcor, vmin=-1, vmax=1, cmap=cmap, linewidth=0.1)\nplt.title(\"Spearman Correlation\")\n\n#machine learning\nx = data2.drop(\"Type\",axis=1)\ny = data2[\"Type\"]\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3)\n\n#Logistic regression\nlogreg = LogisticRegression(random_state=0,solver='liblinear')\nlogreg.fit(x_train,y_train)\ny_pred_logreg = logreg.predict(x_test)\nprint(\"Accuracy of 1st log reg::\" , metrics.accuracy_score(y_test,y_pred_logreg))\n\ndata3 = data2[[\"Type\",\"sdnn\",\"RMSSD\",\"sdsd\",\"VLF power\",\"LF power\",\"HF power\",\"sd1\",\"sd2\",\"ellipse area\"]]\n# print(data3.info())\n\n#machine learning\nx1 = data3.drop(\"Type\",axis=1)\ny1 = data3[\"Type\"]\nx1_train,x1_test,y1_train,y1_test = train_test_split(x1,y1,test_size=0.3)\n\n#Logistic regression\nlogreg = LogisticRegression(random_state=0,solver='liblinear')\nlogreg.fit(x1_train,y1_train)\ny1_pred_logreg = logreg.predict(x1_test)\n# score = logreg.score(x1_test, y1_test)\n# print(\"score::\", score)\nprint(\"Accuracy of 2nd log reg::\" , metrics.accuracy_score(y1_test,y1_pred_logreg))\n\n#cross validation\n# scores = cross_val_score(logreg, x1_train, y1_train, cv=10)\n# print('Cross-Validation Accuracy Scores', scores)\n\n# ***********************Decision Tree Classification***********************\n\ndecTree = DecisionTreeClassifier(max_depth=12, random_state=0)\ndecTree.fit(x_train,y_train)\ny2_pred_decTree = decTree.predict(x_test)\nprint(\"Accuracy of Decision Trees :: \" , metrics.accuracy_score(y_test,y2_pred_decTree))\n\n# plt.figure()\n# tree.plot_tree(decTree)\n\n# Using Random forest classifier\nrf = RandomForestClassifier(n_estimators=100)\nrf.fit(x_train,y_train)\ny_pred_rf = rf.predict(x_test)\nprint(\"Accuracy of Random Forest Classifier :: \", metrics.accuracy_score(y_test, y_pred_rf))\n\n\n\nplt.figure(7)\n#Find the score of each feature in model and drop the features with low scores\nf_imp = rf.feature_importances_\nsorted_indices = np.argsort(f_imp)[::-1]\n\nplt.title('Feature Importance based on random forest')\nplt.bar(range(x_train.shape[1]), f_imp[sorted_indices], align='center')\nplt.xticks(range(x_train.shape[1]), x_train.columns[sorted_indices], rotation=90)\nplt.ylabel('Feature Importance score')\n# plt.xlabel('Features')\nplt.tight_layout()\nplt.show()\n\n\n\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.tight_layout", "sklearn.linear_model.LogisticRegression", "matplotlib.pyplot.title", "sklearn.ensemble.RandomForestClassifier", "sklearn.metrics.accuracy_score", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.tree.DecisionTreeClassifier", "matplotlib.pyplot.ylabel", "numpy.argsort", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
brencej/ProGED
[ "50c077cab400e186dfe45834b96a257d501c04c9" ]
[ "ProGED/generators/grammar_construction.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom ProGED.generators.grammar import GeneratorGrammar\n\nimport sympy as sp\nfrom diophantine import solve\nfrom itertools import product\n\ndef grammar_from_template (template_name, generator_settings, repeat_limit = 100, depth_limit = 100):\n if template_name in GRAMMAR_LIBRARY:\n grammar_str = GRAMMAR_LIBRARY[template_name](**generator_settings)\n return GeneratorGrammar(grammar_str, repeat_limit = repeat_limit, depth_limit = depth_limit)\n\ndef construct_right (right = \"a\", prob = 1):\n return right + \" [\" + str(prob) + \"]\"\n\ndef construct_production (left = \"S\", items = [\"a\"], probs=[1]):\n if not items:\n return \"\"\n else:\n return \"\\n\" + left + \" -> \" + construct_right_distribution (items=items, probs=probs)\n\ndef construct_right_distribution (items=[], probs=[]):\n p = np.array(probs)/np.sum(probs)\n S = construct_right(right=items[0], prob=p[0])\n for i in range(1, len(items)):\n S += \" | \" + construct_right(right=items[i], prob=p[i])\n return S\n\ndef construct_grammar_trigonometric (probs1 = [0.8,0.2], probs2=[0.4,0.4,0.2], \n variables = [\"'x'\", \"'y'\"], p_vars = [0.5, 0.5],\n functions = [\"'sin'\", \"'cos'\", \"'tan'\"]):\n \n grammar = construct_production(left=\"S\", items=[\"T1\" + \"'('\" + \"V\" + \"')'\",\n \"T1\" + \" \" + \"T2\" + \"'('\" + \"V\" + \"')'\"], probs=probs1)\n grammar += construct_production(left=\"T1\", items=functions, probs=probs2)\n grammar += construct_production(left=\"T2\", items=[\"'h'\"], probs=[1])\n grammar += construct_production(left = \"V\", items=variables, probs=p_vars)\n return grammar\n \ndef construct_grammar_function (functions=[\"'sin'\", \"'cos'\"], probs=[0.5,0.5], string=True):\n grammar = construct_production(left=\"S\", items=[\"A'(''x'')'\"], probs=[1])\n grammar += construct_production(left=\"A\", items=functions, probs=probs)\n return grammar\n \ndef construct_grammar_polytrig (p_more_terms=[0.7,0.15,0.15], p_higher_terms=0.5, p_vars = [0.5,0.3,0.2], \n variables = [\"'x'\", \"'v'\", \"'a'\", \"'sin(C*x + C)'\"]):\n grammar = construct_production(left=\"S\", items=[\"'C' '+' S2\"], probs=[1])\n grammar += construct_production(left=\"S2\", items=[\"'C' '*' T '+' S2\", \"'C' '*' T\", \"'C'\"], probs=p_more_terms)\n grammar += construct_production(left=\"T\", items=[\"T '*' V\", \"V\"], probs=[p_higher_terms, 1-p_higher_terms])\n grammar += construct_production(left=\"V\", items=variables, probs=p_vars)\n return grammar\n\ndef construct_grammar_polynomial (p_S = [0.4, 0.6], p_T = [0.4, 0.6], p_vars = [1], p_R = [0.6, 0.4], p_F = [1],\n functions = [\"'exp'\"], variables = [\"'x'\"]):\n grammar = construct_production(left=\"S\", items=[\"S '+' R\", \"R\"], probs=p_S)\n grammar += construct_production(left=\"R\", items=[\"T\", \"'C' '*' F '(' T ')'\"], probs=p_R)\n grammar += construct_production(left=\"T\", items=[\"T '*' V\", \"'C'\"], probs=p_T)\n grammar += construct_production(left=\"F\", items=functions, probs=p_F)\n grammar += construct_production(left=\"V\", items=variables, probs=p_vars)\n return grammar\n\ndef construct_grammar_simplerational (p_S = [0.2, 0.8], p_P = [0.4, 0.3, 0.3], p_R = [0.4, 0.6], p_M = [0.4, 0.6], \n p_F = [1], p_vars = [1], functions = [\"'exp'\"], variables = [\"'x'\"]):\n grammar = construct_production(left=\"S\", items=[\"P '/' R\", \"P\"], probs=p_S)\n grammar += construct_production(left=\"P\", items=[\"P '+' 'C' '*' R\", \"'C' '*' R\", \"'C'\"], probs=p_P)\n grammar += construct_production(left=\"R\", items=[\"F '(' 'C' '*' M ')'\", \"M\"], probs=p_R)\n grammar += construct_production(left=\"M\", items=[\"M '*' V\", \"V\"], probs=p_M)\n grammar += construct_production(left=\"F\", items=functions, probs=p_F)\n grammar += construct_production(left=\"V\", items=variables, probs=p_vars)\n return grammar\n\ndef construct_grammar_rational (p_S = [0.4, 0.6], p_T = [0.4, 0.6], p_vars = [1], p_R = [0.6, 0.4], p_F = [1],\n functions = [\"'exp'\"], variables = [\"'x'\"]):\n grammar = construct_production(left=\"S\", items=[\"'(' E ')' '/' '(' E ')'\"], probs=[1])\n grammar += construct_production(left=\"E\", items=[\"E '+' R\", \"R\"], probs=p_S)\n grammar += construct_production(left=\"R\", items=[\"T\", \"'C' '*' F '(' T ')'\"], probs=p_R)\n grammar += construct_production(left=\"T\", items=[\"T '*' V\", \"'C'\"], probs=p_T)\n grammar += construct_production(left=\"F\", items=functions, probs=p_F)\n grammar += construct_production(left=\"V\", items=variables, probs=p_vars)\n return grammar\n\ndef construct_grammar_universal (p_sum=[0.2, 0.2, 0.6], p_mul = [0.2, 0.2, 0.6], p_rec = [0.2, 0.4, 0.4], \n variables=[\"'x'\", \"'y'\"], p_vars=[0.5,0.5],\n functions=[\"sin\", \"cos\", \"sqrt\", \"exp\"], p_functs=[0.6, 0.1, 0.1, 0.1, 0.1]):\n #grammar = construct_production(left=\"S\", items=[\"E '+' 'C'\"], probs=[1])\n grammar = construct_production(left=\"S\", items=[\"S '+' F\", \"S '-' F\", \"F\"], probs=p_sum)\n grammar += construct_production(left=\"F\", items=[\"F '*' T\", \"F '/' T\", \"T\"], probs=p_mul)\n grammar += construct_production(left=\"T\", items=[\"R\", \"'C'\", \"V\"], probs=p_rec)\n grammar += construct_production(left=\"R\", items=[\"'(' S ')'\"] + [\"'\"+f+\"(' S ')'\" for f in functions], probs=p_functs)\n grammar += construct_production(left=\"V\", items=variables, probs=p_vars)\n return grammar\n\n\ndef unit_to_string (unit, unit_symbols=[\"m\", \"s\", \"kg\", \"T\", \"V\"]):\n return \"\".join([unit_symbols[i]+str(unit[i]) for i in range(len(unit))])\n\ndef string_to_unit (unit_string, unit_symbols=[\"m\", \"s\", \"kg\", \"T\", \"V\"]):\n u = []\n for i in range(len(unit_symbols)-1):\n split = unit_string.split(unit_symbols[i])[1].split(unit_symbols[i+1])\n u += [int(split[0])]\n u += [int(split[1])]\n return u\n\ndef units_dict (variables, units, dimensionless = [0,0,0,0,0], target_variable_unit = [0,0,0,0,0]):\n dictunits = {}\n for i in range(len(variables)):\n unit_string = unit_to_string(units[i])\n if unit_string in dictunits:\n dictunits[unit_string] += [variables[i]]\n else:\n dictunits[unit_string] = [variables[i]]\n if unit_to_string(dimensionless) not in dictunits:\n dictunits[unit_to_string(dimensionless)] = []\n #if unit_to_string(unit_to_string(units[target_variable_unit_index])) not in dictunits:\n # dictunits[unit_to_string(units[target_variable_unit_index])] = []\n if unit_to_string(target_variable_unit) not in dictunits:\n dictunits[unit_to_string(target_variable_unit)] = []\n \n for unit in units:\n if unit_to_string(unit) not in dictunits:\n dictunits[unit_to_string(unit)] = []\n return dictunits\n\ndef unit_conversions(units_dict, order=1, unit_symbols = [\"m\", \"s\", \"kg\", \"T\", \"V\"]):\n conversions = {}\n #units = np.array([np.fromstring(unit.strip(\"[\").strip(\"]\").strip(), sep=\",\", dtype=int) for unit in list(units_dict.keys())])\n units = np.array([string_to_unit(unit, unit_symbols = unit_symbols) for unit in list(units_dict.keys())])\n for i in range(len(units)):\n conversions_mul = []\n conversions_div = []\n for j in range(len(units)):\n for k in range(len(units)):\n if np.array_equal(units[i], units[j] + units[k]):\n if [j,k] not in conversions_mul and [k,j] not in conversions_mul:\n conversions_mul += [[j,k]]\n if np.array_equal(units[i], units[j] - units[k]):\n if [j,k] not in conversions_div:\n conversions_div += [[j,k]]\n if np.array_equal(units[i], units[k]- units[j]):\n if [k,j] not in conversions_div:\n conversions_div += [[k,j]]\n conversions[str(i)+\"*\"] = conversions_mul\n conversions[str(i)+\"/\"] = conversions_div\n return conversions, units\n\ndef probs_uniform(items, A=1):\n if len(items) > 0:\n return [A/len(items)]*len(items)\n else:\n return []\n \ndef extend_units(units):\n ext_units = list(units)\n for unit in units:\n for i in range(len(unit)):\n for j in range(abs(unit[i])):\n u = [0]*i + [int(unit[i]/abs(unit[i])*(abs(unit[i])-j))] + unit[min([i+1, len(unit)]):]\n if u not in ext_units:\n ext_units += [u]\n \n for i in range(len(units[0])):\n if np.sum(np.abs(units)[:,i]) > 0:\n u = [0]*i + [1] + [0]*(len(units[0]) - i - 1)\n if u not in ext_units:\n ext_units += [u]\n \n return ext_units\n\ndef clumsy_solve(A: sp.Matrix, b: sp.Matrix):\n \"\"\"Fixes a bug in diophantine.solve by expanding the system of equations to force multiple solutions.\"\"\"\n try: # First try to find 0 or infinite solutions.\n x = solve(A, b)\n return x\n except NotImplementedError:\n # Expand the system to get more than 1 solutions (infinite, \n # since nontrivial kernel). Then drop the last element of \n # the solution to get the solution of the original unexpanded \n # system.\n A_inf = sp.Matrix.hstack(A, sp.Matrix.zeros(A.shape[0], 1)) # Expand system.\n x = solve(A_inf, b) # infinite solutions so no error ...\n return [sp.Matrix(x[0][:-1])] # Drop the last element of the vector.\n\ndef extend_units_dio(units_list, target_variable_index):\n \"\"\"Extends the units to facilitate generation of expressions by grammar.\n \n A system of diophantine equations Ax=b is solved, where A is a matrix of provided units transposed,\n b is the the target variable unit and x is a vector of integer weights. x represents the largest \n multiple of a unit that needs to be included to be able to derive expressions. \n \"\"\"\n target_unit = units_list[target_variable_index]\n units = list(units_list)\n units.pop(target_variable_index)\n\n \"\"\"Define and solve the system of diophantine equations.\"\"\"\n A = sp.Matrix(np.vstack(units)).T\n b = sp.Matrix(target_unit)\n solutions = clumsy_solve(A, b)\n\n expanded_units = list(units)\n for solution in solutions:\n expanded_multipliers = []\n \"\"\"For each dimension, generate every integer multiplier up to the maxumal.\"\"\"\n for u in solution:\n expanded_multipliers += [[ui for ui in range(0, u+1)]]\n \"\"\"Generate the cartesian product of the integer multipliers between all dimensions.\"\"\"\n expanded_combinations = product(*expanded_multipliers)\n \"\"\"Obtain units by computing sums of weighted units.\"\"\"\n for comb in expanded_combinations:\n unit = list(np.dot(comb, units))\n if unit not in expanded_units:\n expanded_units += [unit]\n return expanded_units\n \ndef construct_grammar_universal_dim_direct (variables=[\"'U'\", \"'d'\", \"'k'\", \"'A'\"],\n p_recursion=[0.1, 0.9], # recurse vs terminate\n p_operations=[0.2, 0.3, 0.4, 0.1], # sum, sub, mul, div\n p_constant=[0.2, 0.8], # constant vs variable\n functions=[\"sin\", \"cos\", \"sqrt\", \"exp\"], p_functs=[0.6, 0.1, 0.1, 0.1, 0.1],\n units = [[2,-2,1,0,0], [1,0,0,0,0], [-1,0,0,0,0], [0,0,0,0,0], [2,-2,1,0,0]], \n target_variable_unit_index = -1,\n dimensionless = [0,0,0,0,0]):\n target_variable_unit = units[target_variable_unit_index]\n dictunits = units_dict(variables, units)\n conversions, unique_units = unit_conversions(dictunits)\n strunits = [unit_to_string(unit) for unit in unique_units]\n \n grammar = construct_production(left=\"S\", items=[unit_to_string(target_variable_unit)], probs=[1.0])\n for i in range(len(unique_units)):\n if strunits[i] == unit_to_string(dimensionless):\n grammar += construct_production(left=strunits[i], \n items=[\"F\"] + [\"'\"+f+\"(' F ')'\" for f in functions],\n probs=p_functs)\n left_item = \"F\"\n else:\n left_item = strunits[i]\n \n right_sum = [\"'('\" + strunits[i] + \"')'\" + \"'+'\" + \"'('\" + strunits[i] + \"')'\"]\n right_sub = [\"'('\" + strunits[i] + \"')'\" + \"'-'\" + \"'('\" + strunits[i] + \"')'\"]\n right_mul = [\"'('\" + strunits[conv[0]] + \"')'\" + \"'*'\" + \"'('\" + strunits[conv[1]] + \"')'\" for conv in conversions[str(i)+\"*\"]]\n right_div = [\"'('\" + strunits[conv[0]] + \"')'\" + \"'/'\" + \"'('\" + strunits[conv[1]] + \"')'\" for conv in conversions[str(i)+\"/\"]]\n right_var = dictunits[unit_to_string(unique_units[i])]\n right_const = [\"'C'\"]\n right_recur = right_sum + right_sub + right_mul + right_div \n right_terminal = right_const + right_var\n right = right_recur + right_terminal\n \n probs_mul = probs_uniform(right_mul, A=p_operations[2])\n probs_div = probs_uniform(right_div, A=p_operations[3])\n probs_recur = np.hstack([p_operations[:2], probs_mul, probs_div])\n probs_vars = probs_uniform(dictunits[strunits[i]], A=p_constant[1])\n probs_terminal = np.hstack([[p_constant[0]], probs_vars])\n probs = np.hstack([p_recursion[0]*probs_recur, p_recursion[1]*probs_terminal])\n\n #probs = [0.4/len(right_recur)]*len(right_recur) + [0.6/len(right_terminal)]*len(right_terminal)\n \n grammar += construct_production(left=left_item, \n items=right,\n probs = probs)\n\n return grammar\n\ndef construct_grammar_universal_dim (variables=[\"'U'\", \"'d'\", \"'k'\"],\n p_vars = [0.34, 0.33, 0.33],\n p_sum = [0.2, 0.2, 0.6],\n p_mul = [0.2, 0.2, 0.6],\n p_rec=[0.2, 0.4, 0.4], # recurse vs terminate\n functions=[\"sin\", \"cos\", \"sqrt\", \"exp\"], p_functs=[0.6, 0.1, 0.1, 0.1, 0.1],\n units = [[2,-2,1,0,0], [1,0,0,0,0], [-1,0,0,0,0], [2,-2,1,0,0]], \n target_variable_unit_index = -1,\n dimensionless = [0,0,0,0,0],\n extended_units = None):\n target_variable_unit = list(units[target_variable_unit_index])\n \n if isinstance(extended_units, list):\n units += extended_units\n elif isinstance(extended_units, str):\n if extended_units == \"heuristic\":\n units = extend_units(units)\n elif extended_units == \"diophantine\":\n units = extend_units_dio(units, target_variable_unit_index)\n print(units)\n else:\n raise ValueError(\"Dimensional grammar construction: choice of unit extension not recognized. Supported inputs: None, list of units, 'heuristic' and 'diophantine'\")\n \n dictunits = units_dict(variables, units, dimensionless = dimensionless, target_variable_unit = target_variable_unit)\n conversions, unique_units = unit_conversions(dictunits)\n strunits = [unit_to_string(unit) for unit in unique_units]\n \n grammar = construct_production(left=\"S\", items=[\"E_\" + unit_to_string(target_variable_unit)], probs=[1.0])\n for i in range(len(unique_units)): \n right_sum = [\"E_\" + strunits[i] + \"'+'\" + \"F_\" + strunits[i]]\n right_sub = [\"E_\" + strunits[i] + \"'-'\" + \"F_\" + strunits[i]]\n right_Fid = [\"F_\" + strunits[i]]\n grammar += construct_production(left=\"E_\" + strunits[i], \n items = right_sum + right_sub + right_Fid,\n probs = p_sum)\n \n right_mul = [\"F_\" + strunits[conv[0]] + \"'*'\" + \"T_\" + strunits[conv[1]] for conv in conversions[str(i)+\"*\"]]\n right_div = [\"F_\" + strunits[conv[0]] + \"'/'\" + \"T_\" + strunits[conv[1]] for conv in conversions[str(i)+\"/\"]]\n right_Tid = [\"T_\" + strunits[i]]\n probs_mul = probs_uniform(right_mul, A=p_mul[0])\n probs_div = probs_uniform(right_div, A=p_mul[1])\n grammar += construct_production(left=\"F_\" + strunits[i], \n items = right_mul + right_div + right_Tid,\n probs = probs_mul + probs_div + [p_mul[2]])\n \n if strunits[i] == unit_to_string(dimensionless):\n right_recur = [\"F\"]\n right_const = [\"'C'\"]\n else:\n right_recur = [\"'('\" + \"E_\" + strunits[i] + \"')'\"]\n #right_recur = [\"E_\" + strunits[i]]\n right_const = [\"C_\" + strunits[i]]\n right_var = dictunits[unit_to_string(unique_units[i])]\n probs_vars = probs_uniform(dictunits[strunits[i]], A=p_rec[1])\n probs_rec = [p_rec[0]] + probs_vars + [p_rec[2]]\n # if len(probs_vars) > 0:\n # right_const = [\"'C'\"]\n # else:\n # #probs_rec = [1.0]\n # right_const = [\"'1'\"]\n grammar += construct_production(left=\"T_\" + strunits[i], \n items = right_recur + right_var + right_const,\n probs = probs_rec)\n \n \n if strunits[i] == unit_to_string(dimensionless): \n right_F = [\"'('\" + \"E_\" + strunits[i] + \"')'\"] + [\"'\"+f+\"('\" + \"E_\"+strunits[i] + \"')'\" for f in functions]\n grammar += construct_production(left = \"F\", \n items=right_F,\n probs=p_functs)\n\n return grammar\n\nGRAMMAR_LIBRARY = {\n \"universal\": construct_grammar_universal,\n \"universal-dim\": construct_grammar_universal_dim,\n \"rational\": construct_grammar_rational,\n \"simplerational\": construct_grammar_simplerational,\n \"polytrig\": construct_grammar_polytrig,\n \"trigonometric\": construct_grammar_trigonometric,\n \"polynomial\": construct_grammar_polynomial}\n\n\n\nif __name__ == \"__main__\":\n print(\"--- grammar_construction.py test ---\")\n np.random.seed(0)\n from nltk import PCFG\n grammar = grammar_from_template(\"universal\", {\"variables\":[\"'phi'\", \"'theta'\", \"'r'\"], \"p_vars\":[0.2,0.4,0.4]})\n # Testing some grammar generation:\n grammar1 = grammar_from_template(\"trigonometric\", {})\n # Grammar template without variables argument (proudces error):\n # grammar2 = grammar_from_template(\"trigonometric\", {\"variables\":[\"'phi'\", \"'theta'\", \"'r'\"]})\n grammar3 = grammar_from_template(\"function\", {\"variables\":[\"'phi'\", \"'theta'\", \"'r'\"]})\n grammar4 = grammar_from_template(\"trigonometric\", {\"probs1\":[0.8,0.2], \"probs2\":[0.4,0.4,0.2] })\n grammar5 = grammar_from_template(\"function\", {\"functions\":[\"'sin'\", \"'cos'\"], \"probs\":[0.5,0.5]})\n for i, grammar_ in enumerate([grammar, grammar1, grammar3, grammar4, grammar5]):\n print(f\"grammar {i}: {grammar_}\")\n print(grammar)\n for i in range(5):\n print(grammar.generate_one())\n print(\"test\", construct_production(\"s\", [], []))\n" ]
[ [ "numpy.hstack", "numpy.dot", "numpy.abs", "numpy.random.seed", "numpy.array_equal", "numpy.array", "numpy.sum", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Liuzkai/PythonScript
[ "fb21ad80e085f6390ae970b81404f7e5c7923f4e" ]
[ "Quantitative/3_10Normalization.py" ]
[ "from pylab import *\nimport pandas as pd\nimport matplotlib.pyplot as plot\n\n\nfilePath = (\"/Users/liuzhongkai/Downloads/Python量化交易实战/code/dataTest.csv\")\ndataFile = pd.read_csv(filePath, header=None, prefix='V')\n\nsummary = dataFile.describe()\ndataFile_normalization = dataFile.iloc[:,1:6]\n\nfor i in range(5) :\n mean = summary.iloc[1,i+1]\n std = summary.iloc[2,i+1]\n dataFile_normalization.iloc[:,i:i+1] = (dataFile_normalization.iloc[:,i:i+1] - mean)/ std\n\na = dataFile_normalization.values\nb = dataFile.iloc[:,1:6].values\nboxplot(a)\nplot.xlabel(\"Attribute\")\nplot.ylabel(\"Score\")\nplot.show()" ]
[ [ "matplotlib.pyplot.xlabel", "pandas.read_csv", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
666zcli/nnan
[ "89de23092cf6b6bc8ca501920221b1f047665327" ]
[ "models/alexnet_nan.py" ]
[ "import torch.nn as nn\nimport torchvision.transforms as transforms\nimport nnan\n\nsnn = nnan.NNaNUnit(dims=[10,10,10])\n\n__all__ = ['alexnet_nan']\n\nclass AlexNetOWT_BN(nn.Module):\n\n def __init__(self, num_classes=1000):\n super(AlexNetOWT_BN, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2,\n bias=False),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.BatchNorm2d(64),\n #nn.ReLU(inplace=True),\n snn,\n nn.Conv2d(64, 192, kernel_size=5, padding=2, bias=False),\n nn.MaxPool2d(kernel_size=3, stride=2),\n #nn.ReLU(inplace=True),\n snn,\n nn.BatchNorm2d(192),\n nn.Conv2d(192, 384, kernel_size=3, padding=1, bias=False),\n #nn.ReLU(inplace=True),\n snn,\n nn.BatchNorm2d(384),\n nn.Conv2d(384, 256, kernel_size=3, padding=1, bias=False),\n #nn.ReLU(inplace=True),\n snn,\n nn.BatchNorm2d(256),\n nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),\n nn.MaxPool2d(kernel_size=3, stride=2),\n #nn.ReLU(inplace=True),\n snn,\n nn.BatchNorm2d(256)\n )\n self.classifier = nn.Sequential(\n nn.Linear(256 * 6 * 6, 4096, bias=False),\n nn.BatchNorm1d(4096),\n #nn.ReLU(inplace=True),\n snn,\n nn.Dropout(0.5),\n nn.Linear(4096, 4096, bias=False),\n nn.BatchNorm1d(4096),\n #nn.ReLU(inplace=True),\n snn,\n nn.Dropout(0.5),\n nn.Linear(4096, num_classes)\n )\n\n #self.regime = {\n # 0: {'optimizer': 'SGD', 'lr': 1e-2,\n # 'weight_decay': 5e-4, 'momentum': 0.9},\n # 10: {'lr': 5e-3},\n # 15: {'lr': 1e-3, 'weight_decay': 0},\n # 20: {'lr': 5e-4},\n # 25: {'lr': 1e-4}\n #}\n self.regime = {\n 0: {'optimizer': 'SGD', 'lr': 1e-2,\n 'weight_decay': 5e-4, 'momentum': 0.9},\n 20: {'lr': 1e-3},\n 40: {'lr': 1e-4}\n }\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n self.input_transform = {\n 'train': transforms.Compose([\n transforms.Scale(256),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize\n ]),\n 'eval': transforms.Compose([\n transforms.Scale(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize\n ])\n }\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(-1, 256 * 6 * 6)\n x = self.classifier(x)\n return x\n\n\ndef alexnet_nan(**kwargs):\n num_classes = getattr(kwargs, 'num_classes', 1000)\n return AlexNetOWT_BN(num_classes)\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
122689305/private-pgm
[ "557f0381cd8c31c6277ccc61ec66702a8bdfcacc" ]
[ "src/mbi/dataset.py" ]
[ "import numpy as np\nimport pandas as pd\nimport os\nimport json\nfrom mbi import Domain\n\nclass Dataset:\n def __init__(self, df, domain, weights=None):\n \"\"\" create a Dataset object\n\n :param df: a pandas dataframe\n :param domain: a domain object\n :param weight: weight for each row\n \"\"\"\n assert set(domain.attrs) <= set(df.columns), 'data must contain domain attributes'\n assert weights is None or df.shape[0] == weights.size\n self.domain = domain\n self.df = df.loc[:,domain.attrs]\n self.weights = weights\n\n @staticmethod\n def synthetic(domain, N):\n \"\"\" Generate synthetic data conforming to the given domain\n\n :param domain: The domain object \n :param N: the number of individuals\n \"\"\"\n arr = [np.random.randint(low=0, high=n, size=N) for n in domain.shape]\n values = np.array(arr).T\n df = pd.DataFrame(values, columns = domain.attrs)\n return Dataset(df, domain)\n\n @staticmethod\n def load(path, domain):\n \"\"\" Load data into a dataset object\n\n :param path: path to csv file\n :param domain: path to json file encoding the domain information\n \"\"\"\n df = pd.read_csv(path)\n config = json.load(open(domain))\n domain = Domain(config.keys(), config.values())\n return Dataset(df, domain)\n \n def project(self, cols):\n \"\"\" project dataset onto a subset of columns \"\"\"\n if type(cols) in [str, int]:\n cols = [cols]\n data = self.df.loc[:,cols]\n domain = self.domain.project(cols)\n return Dataset(data, domain, self.weights)\n\n def drop(self, cols):\n proj = [c for c in self.domain if c not in cols]\n return self.project(proj)\n \n @property\n def records(self):\n return self.df.shape[0]\n\n def datavector(self, flatten=True):\n \"\"\" return the database in vector-of-counts form \"\"\"\n bins = [range(n+1) for n in self.domain.shape]\n ans = np.histogramdd(self.df.values, bins, weights=self.weights)[0]\n return ans.flatten() if flatten else ans\n \n" ]
[ [ "pandas.read_csv", "numpy.histogramdd", "pandas.DataFrame", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
mr4jay/numerai
[ "2a09c648c66143ee101cd80de4827108aaf218fc" ]
[ "models/pairwise/model.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nimport tensorflow as tf\n\nclass Model(object):\n\n def __init__(self, features_L, features_R, targets, is_training):\n with tf.variable_scope('feature_extractor'):\n embedding_L = self.feature_extractor(features_L, is_training)\n\n with tf.variable_scope('feature_extractor', reuse=True):\n embedding_R = self.feature_extractor(features_R, is_training)\n\n embedding = tf.concat(1, [embedding_L, embedding_R])\n logits = self.classifier(embedding, is_training)\n self.predictions = tf.nn.softmax(logits)\n\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, targets)\n self.loss = tf.reduce_mean(cross_entropy, name='loss')\n tf.contrib.layers.summarize_tensor(self.loss)\n tf.contrib.losses.add_loss(self.loss)\n\n self.total_loss = tf.contrib.losses.get_total_loss(add_regularization_losses=True, name='total_loss')\n\n # setup learning\n if is_training:\n self.global_step = tf.contrib.framework.get_or_create_global_step()\n self.learning_rate = 1e-4\n\n optimizer = tf.train.AdamOptimizer(self.learning_rate)\n self.train_step = tf.contrib.layers.optimize_loss(self.total_loss, self.global_step, \\\n learning_rate=self.learning_rate,\n clip_gradients=None,\n gradient_noise_scale=None,\n optimizer=optimizer,\n moving_average_decay=None)\n\n def feature_extractor(self, features, is_training):\n relu_init = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN')\n weights_reg = tf.contrib.layers.l2_regularizer(1e-3)\n\n normalizer_fn = tf.contrib.layers.batch_norm\n normalizer_params = { 'is_training': is_training }\n\n h0 = tf.contrib.layers.fully_connected(\n inputs=features,\n num_outputs=16,\n activation_fn=tf.nn.relu,\n weights_initializer=relu_init,\n weights_regularizer=weights_reg,\n normalizer_fn=normalizer_fn,\n normalizer_params=normalizer_params)\n\n h1 = tf.contrib.layers.fully_connected(\n inputs=h0,\n num_outputs=8,\n activation_fn=tf.nn.relu,\n weights_initializer=relu_init,\n weights_regularizer=weights_reg,\n normalizer_fn=normalizer_fn,\n normalizer_params=normalizer_params)\n\n return h1\n\n def classifier(self, features, is_training):\n relu_init = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN')\n softmax_init = tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN')\n weights_reg = tf.contrib.layers.l2_regularizer(1e-3)\n\n normalizer_fn = tf.contrib.layers.batch_norm\n normalizer_params = { 'is_training': is_training }\n\n h1 = tf.contrib.layers.fully_connected(\n inputs=features,\n num_outputs=16,\n activation_fn=tf.nn.relu,\n weights_initializer=relu_init,\n weights_regularizer=weights_reg,\n normalizer_fn=normalizer_fn,\n normalizer_params=normalizer_params)\n\n h2 = tf.contrib.layers.fully_connected(\n inputs=h1,\n num_outputs=2,\n activation_fn=None,\n weights_initializer=softmax_init,\n weights_regularizer=weights_reg,\n normalizer_fn=normalizer_fn,\n normalizer_params=normalizer_params)\n\n return h2\n\n @property\n def num_parameters(self):\n return sum([np.prod(tvar.get_shape().as_list()) for tvar in tf.trainable_variables()])\n" ]
[ [ "tensorflow.nn.softmax", "tensorflow.concat", "tensorflow.contrib.layers.variance_scaling_initializer", "tensorflow.reduce_mean", "tensorflow.trainable_variables", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.contrib.losses.add_loss", "tensorflow.contrib.layers.fully_connected", "tensorflow.contrib.layers.optimize_loss", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.train.AdamOptimizer", "tensorflow.contrib.losses.get_total_loss", "tensorflow.variable_scope", "tensorflow.contrib.framework.get_or_create_global_step", "tensorflow.contrib.layers.summarize_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
pengkangzaia/usad
[ "937a29c24632cfa31e0c626cd5b058b3af74ef94" ]
[ "utils/utils.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport torch\n\nfrom sklearn.metrics import roc_curve, roc_auc_score, f1_score, classification_report\n\n\ndef get_default_device():\n \"\"\"Pick GPU if available, else CPU\"\"\"\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')\n\n\ndef to_device(data, device):\n \"\"\"Move tensor(s) to chosen device\"\"\"\n if isinstance(data, (list, tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)\n\n\ndef plot_history(history):\n losses1 = [x['val_loss1'] for x in history]\n losses2 = [x['val_loss2'] for x in history]\n plt.plot(losses1, '-x', label=\"loss1\")\n plt.plot(losses2, '-x', label=\"loss2\")\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.legend()\n plt.title('Losses vs. No. of epochs')\n plt.grid()\n plt.show()\n\n\ndef plot_simple_history(history):\n losses = [x['val_loss'] for x in history]\n plt.plot(losses, '-x', label=\"loss\")\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.legend()\n plt.title('Losses vs. No. of epochs')\n plt.grid()\n plt.show()\n\n\ndef plot_train_loss(history):\n plt.plot(history, '-x', label=\"loss\")\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.legend()\n plt.title('train Losses vs. No. of epochs')\n plt.grid()\n plt.show()\n\n\ndef histogram(y_test, y_pred):\n plt.figure(figsize=(12, 6))\n plt.hist([y_pred[y_test == 0],\n y_pred[y_test == 1]],\n bins=20,\n color=['#82E0AA', '#EC7063'], stacked=True)\n plt.title(\"Results\", size=20)\n plt.grid()\n plt.show()\n\n\ndef ROC(y_test, y_pred):\n fpr, tpr, tr = roc_curve(y_test, y_pred)\n auc = roc_auc_score(y_test, y_pred)\n idx = np.argwhere(np.diff(np.sign(tpr - (1 - fpr)))).flatten()\n\n plt.xlabel(\"FPR\")\n plt.ylabel(\"TPR\")\n plt.plot(fpr, tpr, label=\"AUC=\" + str(auc))\n plt.plot(fpr, 1 - fpr, 'r:')\n plt.plot(fpr[idx], tpr[idx], 'ro')\n plt.legend(loc=4)\n plt.grid()\n plt.show()\n return tr[idx]\n\n\ndef confusion_matrix(target, predicted, perc=False):\n data = {'y_Actual': target,\n 'y_Predicted': predicted\n }\n df = pd.DataFrame(data, columns=['y_Predicted', 'y_Actual'])\n confusion_matrix = pd.crosstab(df['y_Predicted'], df['y_Actual'], rownames=['Predicted'], colnames=['Actual'])\n\n if perc:\n sns.heatmap(confusion_matrix / np.sum(confusion_matrix), annot=True, fmt='.2%', cmap='Blues')\n else:\n sns.heatmap(confusion_matrix, annot=True, fmt='d')\n plt.show()\n\n\ndef max_f1_score(y_test, y_pred):\n res = 0\n res_threshold = 0\n step_times = 1000\n for threshold in range(0, step_times, 1):\n th = threshold / step_times * 100\n y_clone = np.zeros(y_pred.shape[0])\n y_clone[y_pred >= th] = 1\n score = f1_score(y_test, y_clone, average='binary')\n if score > res:\n res = score\n res_threshold = th\n print(classification_report(y_test, y_clone))\n return res, res_threshold\n\n# def down_sample(data, sample_rate):\n#\n" ]
[ [ "matplotlib.pyplot.legend", "sklearn.metrics.roc_auc_score", "pandas.DataFrame", "matplotlib.pyplot.plot", "torch.cuda.is_available", "torch.device", "sklearn.metrics.f1_score", "sklearn.metrics.classification_report", "pandas.crosstab", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "sklearn.metrics.roc_curve", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel", "numpy.sign", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
JuanFuriaz/donkey_share
[ "caad831ca21094f05f9084f881ca3bbfa4168e4c" ]
[ "heatmap.py" ]
[ "import matplotlib.pyplot as plt\nfrom matplotlib import animation\n\ntry:\n from tensorflow.keras.layers import Input\n from tensorflow.keras.models import Model, load_model\n from tensorflow.keras.layers import Convolution2D\n from keras import backend as K\n plt.rcParams['animation.ffmpeg_path'] = '/home/jm/bin/ffmpeg' # explicit path for finding ffmpeg in my computer\nexcept ImportError:\n from tensorflow.python.keras.layers import Input\n from tensorflow.python.keras.models import Model, load_model\n from tensorflow.python.keras.layers import Convolution2D\n from tensorflow.python.keras import backend as K\n\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # disabling TF warnings\nimport tensorflow as tf\nimport cv2\nimport numpy as np\n#import matplotlib.pyplot as plt\nfrom matplotlib import animation\nfrom glob import glob\n#from keras import backend as K\nimport argparse\n#plt.rcParams['animation.ffmpeg_path'] = '/home/jm/bin/ffmpeg' # explicit path for finding ffmpeg in my computer\n\n\ndef compute_visualisation_mask(img, functor, layers_kernels, layers_strides):\n activations = functor([np.array([img])])\n upscaled_activation = np.ones((3, 6))\n for layer in [4, 3, 2, 1, 0]:\n averaged_activation = np.mean(activations[layer], axis=3).squeeze(axis=0) * upscaled_activation\n if layer > 0:\n output_shape = (activations[layer - 1].shape[1], activations[layer - 1].shape[2])\n else:\n output_shape = (120, 160)\n x = tf.constant(\n np.reshape(averaged_activation, (1,averaged_activation.shape[0],averaged_activation.shape[1],1)),\n tf.float32\n )\n conv = tf.nn.conv2d_transpose(\n x, layers_kernels[layer],\n output_shape=(1,output_shape[0],output_shape[1], 1),\n strides=layers_strides[layer],\n padding='VALID'\n )\n with tf.Session() as session:\n result = session.run(conv)\n upscaled_activation = np.reshape(result, output_shape)\n final_visualisation_mask = upscaled_activation\n return (final_visualisation_mask - np.min(final_visualisation_mask))/(np.max(final_visualisation_mask) - np.min(final_visualisation_mask))\n\n\ndef save_movie_mp4(image_array, video_name = \"example.mp4\"):\n writer = animation.FFMpegFileWriter(fps=20, metadata=dict(artist='Me'), bitrate=1800)\n dpi = 72.0\n xpixels, ypixels = image_array[0].shape[0], image_array[0].shape[1]\n fig = plt.figure(figsize=(ypixels/dpi, xpixels/dpi), dpi=dpi)\n im = plt.figimage(image_array[0])\n\n\n def animate(i):\n im.set_array(image_array[i])\n return (im,)\n\n plt.show()\n ani = animation.FuncAnimation(fig, animate, frames=len(image_array))\n ani.save(video_name, writer=writer)\n\n\ndef get_video_array(video_limit=500, data_path = 'my/path/to/imgs/*.jpg', functor= None, layers_kernels = None, layers_strides = None):\n\n def numericalSort(value):\n parts = value.split(\"/\")[-1]\n parts = int(parts.split(\"_\")[0])\n return parts\n\n imgs = []\n alpha = 0.004\n beta = 1.0 - alpha\n counter = 0\n for path in sorted(glob(data_path), key=numericalSort):\n img = cv2.imread(path)\n img = img[:, :, ::-1]\n salient_mask = compute_visualisation_mask(img, functor, layers_kernels, layers_strides)\n salient_mask_stacked = np.dstack((salient_mask,salient_mask))\n salient_mask_stacked = np.dstack((salient_mask_stacked,salient_mask))\n blend = cv2.addWeighted(img.astype('float32'), alpha, salient_mask_stacked, beta, 0.0)\n imgs.append(blend)\n counter += 1\n if video_limit is not None:\n if counter >= video_limit:\n return imgs\n return imgs\n\n\ndef get_keras_functor(model_path=\"my/path/to/model.h5\"):\n \"\"\"\n Create CNN-model structure for Heatmap\n \"\"\"\n custom_objects = {\"GlorotUniform\": tf.keras.initializers.glorot_uniform}\n model = load_model(model_path, custom_objects)\n\n img_in = Input(shape=(120, 160, 3), name='img_in')\n x = img_in\n x = Convolution2D(24, (5, 5), strides=(2, 2), activation='relu', name='conv2d_1')(x)\n x = Convolution2D(32, (5, 5), strides=(2, 2), activation='relu', name='conv2d_2')(x)\n x = Convolution2D(64, (5, 5), strides=(2, 2), activation='relu', name='conv2d_3')(x)\n x = Convolution2D(64, (3, 3), strides=(2, 2), activation='relu', name='conv2d_4')(x)\n conv_5 = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu', name='conv2d_5')(x)\n convolution_part = Model(inputs=[img_in], outputs=[conv_5])\n\n for layer_num in ('1', '2', '3', '4', '5'):\n convolution_part.get_layer('conv2d_' + layer_num).set_weights(\n model.get_layer('conv2d_' + layer_num).get_weights())\n inp = convolution_part.input # input placeholder\n outputs = [layer.output for layer in convolution_part.layers][1:] # all layer outputs\n functor = K.function([inp], outputs)\n return functor\n\n\ndef main(video_limit = 100, data_path = 'my/path/to/imgs/*.jpg', model_path=\"my/path/to/model.h5\", video_name = \"example.mp4\"):\n functor = get_keras_functor(model_path= model_path)\n kernel_3x3 = tf.constant(np.array([\n [[[1]], [[1]], [[1]]],\n [[[1]], [[1]], [[1]]],\n [[[1]], [[1]], [[1]]]\n ]), tf.float32)\n kernel_5x5 = tf.constant(np.array([\n [[[1]], [[1]], [[1]], [[1]], [[1]]],\n [[[1]], [[1]], [[1]], [[1]], [[1]]],\n [[[1]], [[1]], [[1]], [[1]], [[1]]],\n [[[1]], [[1]], [[1]], [[1]], [[1]]],\n [[[1]], [[1]], [[1]], [[1]], [[1]]]\n ]), tf.float32)\n layers_kernels = {4: kernel_3x3, 3: kernel_3x3, 2: kernel_5x5, 1: kernel_5x5, 0: kernel_5x5}\n layers_strides = {4: [1, 1, 1, 1], 3: [1, 2, 2, 1], 2: [1, 2, 2, 1], 1: [1, 2, 2, 1], 0: [1, 2, 2, 1]}\n imgs = get_video_array(video_limit= video_limit, data_path = data_path, functor= functor, layers_kernels = layers_kernels, layers_strides = layers_strides)\n save_movie_mp4(imgs, video_name)\n\n\nif __name__ == '__main__':\n \"\"\"\n Example use\n python3 heatmap.py -d \"mycar/data/tub_4_19-12-22/*.jpg\" -m \"mycar/models/mod_lin_1.h5\" -v \"lin_mod_19-12-22-tub4_500.mp4\" -c 500\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--data-path', help='Images dir', default= 'my/path/to/imgs/*.jpg', type=str)\n parser.add_argument('-m', '--model-path', help='Path to a model',\n default='my/path/to/model.h5', type=str)\n parser.add_argument('-v', '--video-name', help='Video Name',\n default='example.mp4', type=str)\n parser.add_argument('-c', '--number-images', help='number of images for creating video', default=100,\n type=int)\n args = parser.parse_args()\n #Without paser use this:\n #main(200, \"mycar/data/tub_4_19-12-22/*.jpg\" ,\"mycar/models/mod_lin_aug_1.h5\", \"lin_mod_aug_tub1_200.mp4\" )\n main(args.number_images, args.data_path, args.model_path, args.video_name)\n" ]
[ [ "numpy.min", "numpy.reshape", "matplotlib.pyplot.figimage", "numpy.dstack", "tensorflow.python.keras.models.load_model", "numpy.ones", "tensorflow.nn.conv2d_transpose", "tensorflow.python.keras.backend.function", "tensorflow.python.keras.models.Model", "numpy.max", "numpy.mean", "tensorflow.python.keras.layers.Convolution2D", "tensorflow.Session", "numpy.array", "matplotlib.pyplot.show", "tensorflow.python.keras.layers.Input", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.4" ] } ]
ktiwary2/nerf_pl
[ "99d40cba3a2d9a11d6988cb1a74cf29035a1ab5e" ]
[ "train_shadows.py" ]
[ "import os, sys\n\nimport imageio\nfrom opt import get_opts\nimport torch\nfrom collections import defaultdict\n\nfrom torch.utils.data import DataLoader\nfrom datasets import dataset_dict\n\n# models\nfrom models.nerf import Embedding, NeRF\nfrom models.rendering import render_rays\n\n# optimizer, scheduler, visualization\nfrom utils import *\n\n# losses\nfrom losses import loss_dict\n\n# metrics\nfrom metrics import *\n\n# pytorch-lightning\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning import LightningModule, Trainer\nfrom pytorch_lightning.logging import TestTubeLogger\n\nto8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8)\n\nclass NeRFSystem(LightningModule):\n def __init__(self, hparams):\n super(NeRFSystem, self).__init__()\n self.hparams = hparams\n\n self.loss = loss_dict[hparams.loss_type]()\n\n self.embedding_xyz = Embedding(3, 10) # 10 is the default number\n self.embedding_dir = Embedding(3, 4) # 4 is the default number\n self.embeddings = [self.embedding_xyz, self.embedding_dir]\n\n self.nerf_coarse = NeRF()\n self.models = [self.nerf_coarse]\n if hparams.N_importance > 0:\n self.nerf_fine = NeRF()\n self.models += [self.nerf_fine]\n\n def decode_batch(self, batch):\n if self.hparams.dataset_name == 'pyredner':\n all_rgb_gt = batch[\"rgb\"] # (num_images, H*W, 3)\n all_rgb_gt = all_rgb_gt.reshape(-1, 3)\n\n cam_all_rays = batch[\"cam_ray_bundle\"] # (num_images, H*W, 8)\n cam_all_rays = cam_all_rays.reshape(-1, 8)\n # light_all_rays = batch[\"light_ray_bundle\"] # (num_images, H*W, 8)?\n # light_all_rays = light_all_rays.reshape(-1, 8)\n\n # shadow_maps = batch[\"shadow_maps\"]\n # shadow_maps = shadow_maps.reshape(-1, 3)\n # shadow_maps = None\n\n return all_rgb_gt, cam_all_rays, None, None\n else:\n rays = batch['rays'] # (B, 8)\n # print(\"rays.shape\",rays.shape)\n rgbs = batch['rgbs'] # (B, 3)\n # print(\"rgbs.shape\",rgbs.shape)\n # print(\"decode batch\", rays.shape, rgbs.shape)\n return rays, rgbs\n\n def forward(self, rays):\n \"\"\"Do batched inference on rays using chunk.\"\"\"\n B = rays.shape[0]\n results = defaultdict(list)\n for i in range(0, B, self.hparams.chunk):\n rendered_ray_chunks = \\\n render_rays(self.models,\n self.embeddings,\n rays[i:i+self.hparams.chunk],\n self.hparams.N_samples,\n self.hparams.use_disp,\n self.hparams.perturb,\n self.hparams.noise_std,\n self.hparams.N_importance,\n self.hparams.chunk, # chunk size is effective in val mode\n self.train_dataset.white_back)\n\n for k, v in rendered_ray_chunks.items():\n results[k] += [v]\n\n for k, v in results.items():\n results[k] = torch.cat(v, 0)\n return results\n\n def prepare_data(self):\n dataset = dataset_dict[self.hparams.dataset_name]\n kwargs = {'root_dir': self.hparams.root_dir,\n 'img_wh': tuple(self.hparams.img_wh), \n 'hparams': self.hparams\n }\n if self.hparams.dataset_name == 'llff':\n kwargs['spheric_poses'] = self.hparams.spheric_poses\n kwargs['val_num'] = self.hparams.num_gpus\n self.train_dataset = dataset(split='train', **kwargs)\n self.val_dataset = dataset(split='val', **kwargs)\n\n def configure_optimizers(self):\n self.optimizer = get_optimizer(self.hparams, self.models)\n scheduler = get_scheduler(self.hparams, self.optimizer)\n \n return [self.optimizer], [scheduler]\n\n def train_dataloader(self):\n return DataLoader(self.train_dataset,\n shuffle=True,\n num_workers=4,\n batch_size=self.hparams.batch_size,\n pin_memory=True)\n\n def val_dataloader(self):\n return DataLoader(self.val_dataset,\n shuffle=False,\n num_workers=4,\n batch_size=1, # validate one image (H*W rays) at a time\n pin_memory=True)\n \n def training_step(self, batch, batch_nb):\n log = {'lr': get_learning_rate(self.optimizer)}\n if self.hparams.dataset_name == 'pyredner':\n rgbs, cam_all_rays, _, _ = self.decode_batch(batch)\n results = self(cam_all_rays)\n else: \n rays, rgbs = self.decode_batch(batch)\n results = self(rays)\n\n log['train/loss'] = loss = self.loss(results, rgbs)\n typ = 'fine' if 'rgb_fine' in results else 'coarse'\n\n with torch.no_grad():\n psnr_ = psnr(results[f'rgb_{typ}'], rgbs)\n log['train/psnr'] = psnr_\n\n return {'loss': loss,\n 'progress_bar': {'train_psnr': psnr_},\n 'log': log\n }\n\n def validation_step(self, batch, batch_nb):\n print(\"---------------Starting Validation---------------\")\n if self.hparams.dataset_name == 'pyredner':\n rgbs, cam_all_rays, _, _ = self.decode_batch(batch)\n rays = cam_all_rays.squeeze() # (H*W, 3)\n rgbs = rgbs.squeeze() # (H*W, 3)\n results = self(cam_all_rays)\n else: \n rays, rgbs = self.decode_batch(batch)\n rays = rays.squeeze() # (H*W,3)\n rgbs = rgbs.squeeze() # (H*W,3)\n results = self(rays)\n\n log = {'val_loss': self.loss(results, rgbs)}\n typ = 'fine' if 'rgb_fine' in results else 'coarse'\n \n if batch_nb == 0:\n print(\"---------------Evaluating and saving Images!---------------\")\n W, H = self.hparams.img_wh\n img = results[f'rgb_{typ}'].view(H, W, 3).cpu()\n rgb8 = to8b(img.numpy())\n gt8 = to8b(rgbs.view(H, W, 3).cpu().numpy())\n img = img.permute(2, 0, 1) # (3, H, W)\n img_gt = rgbs.view(H, W, 3).permute(2, 0, 1).cpu() # (3, H, W)\n depth8 = visualize_depth(results[f'depth_{typ}'].view(H, W), to_tensor=False) \n depth = visualize_depth(results[f'depth_{typ}'].view(H, W)) # (3, H, W)\n if not os.path.exists(f'logs/{self.hparams.exp_name}/imgs'):\n os.mkdir(f'logs/{self.hparams.exp_name}/imgs')\n filename = os.path.join(f'logs/{self.hparams.exp_name}/imgs', 'gt_{:03d}.png'.format(self.current_epoch))\n imageio.imwrite(filename, gt8)\n filename = os.path.join(f'logs/{self.hparams.exp_name}/imgs', 'rgb_{:03d}.png'.format(self.current_epoch))\n imageio.imwrite(filename, rgb8)\n filename = os.path.join(f'logs/{self.hparams.exp_name}/imgs', 'depth_{:03d}.png'.format(self.current_epoch))\n imageio.imwrite(filename, depth8)\n\n stack = torch.stack([img_gt, img, depth]) # (3, 3, H, W)\n self.logger.experiment.add_images('val/GT_pred_depth',\n stack, self.global_step)\n\n log['val_psnr'] = psnr(results[f'rgb_{typ}'], rgbs)\n return log\n\n def validation_epoch_end(self, outputs):\n mean_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n mean_psnr = torch.stack([x['val_psnr'] for x in outputs]).mean()\n\n return {'progress_bar': {'val_loss': mean_loss,\n 'val_psnr': mean_psnr},\n 'log': {'val/loss': mean_loss,\n 'val/psnr': mean_psnr}\n }\n\n\nif __name__ == '__main__':\n hparams = get_opts()\n system = NeRFSystem(hparams)\n checkpoint_callback = ModelCheckpoint(filepath=os.path.join(f'ckpts/{hparams.exp_name}',\n '{epoch:d}'),\n monitor='val/loss',\n mode='min',\n save_top_k=5,)\n\n logger = TestTubeLogger(\n save_dir=\"logs\",\n name=hparams.exp_name,\n debug=False,\n create_git_tag=False\n )\n\n trainer = Trainer(max_epochs=hparams.num_epochs,\n checkpoint_callback=checkpoint_callback,\n resume_from_checkpoint=hparams.ckpt_path,\n logger=logger,\n early_stop_callback=None,\n weights_summary=None,\n progress_bar_refresh_rate=1,\n gpus=hparams.num_gpus,\n distributed_backend='ddp' if len(hparams.num_gpus)>1 else None,\n num_sanity_val_steps=hparams.num_sanity_val_steps,\n benchmark=True,\n profiler=hparams.num_gpus==1, \n auto_scale_batch_size=True)\n\n trainer.fit(system)" ]
[ [ "torch.stack", "torch.no_grad", "torch.utils.data.DataLoader", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
madrugado/RecVAE
[ "8b9b2ded3f215f9e30b45a9cc61199b67fc3da42" ]
[ "utils.py" ]
[ "# based on https://github.com/dawenl/vae_cf\n\nimport numpy as np\nfrom scipy import sparse\nimport pandas as pd\nimport os\nimport bottleneck as bn\n\n\n\n\ndef load_train_data(csv_file, n_items, n_users, global_indexing=False):\n tp = pd.read_csv(csv_file)\n \n n_users = n_users if global_indexing else tp['uid'].max() + 1\n\n rows, cols = tp['uid'], tp['sid']\n data = sparse.csr_matrix((np.ones_like(rows),\n (rows, cols)), dtype='float64',\n shape=(n_users, n_items))\n return data\n\n\ndef load_tr_te_data(csv_file_tr, csv_file_te, n_items, n_users, global_indexing=False):\n tp_tr = pd.read_csv(csv_file_tr)\n tp_te = pd.read_csv(csv_file_te)\n\n if global_indexing:\n start_idx = 0\n end_idx = len(unique_uid) - 1\n else:\n start_idx = min(tp_tr['uid'].min(), tp_te['uid'].min())\n end_idx = max(tp_tr['uid'].max(), tp_te['uid'].max())\n\n rows_tr, cols_tr = tp_tr['uid'] - start_idx, tp_tr['sid']\n rows_te, cols_te = tp_te['uid'] - start_idx, tp_te['sid']\n\n data_tr = sparse.csr_matrix((np.ones_like(rows_tr),\n (rows_tr, cols_tr)), dtype='float64', shape=(end_idx - start_idx + 1, n_items))\n data_te = sparse.csr_matrix((np.ones_like(rows_te),\n (rows_te, cols_te)), dtype='float64', shape=(end_idx - start_idx + 1, n_items))\n return data_tr, data_te\n\n\ndef get_data(dataset, global_indexing=False):\n unique_sid = list()\n with open(os.path.join(dataset, 'unique_sid.txt'), 'r') as f:\n for line in f:\n unique_sid.append(line.strip())\n \n unique_uid = list()\n with open(os.path.join(dataset, 'unique_uid.txt'), 'r') as f:\n for line in f:\n unique_uid.append(line.strip())\n \n n_items = len(unique_sid)\n n_users = len(unique_uid)\n \n train_data = load_train_data(os.path.join(dataset, 'train.csv'), n_items, n_users, global_indexing=global_indexing)\n\n\n vad_data_tr, vad_data_te = load_tr_te_data(os.path.join(dataset, 'validation_tr.csv'),\n os.path.join(dataset, 'validation_te.csv'),\n n_items, n_users, \n global_indexing=global_indexing)\n\n test_data_tr, test_data_te = load_tr_te_data(os.path.join(dataset, 'test_tr.csv'),\n os.path.join(dataset, 'test_te.csv'),\n n_items, n_users, \n global_indexing=global_indexing)\n \n data = train_data, vad_data_tr, vad_data_te, test_data_tr, test_data_te\n data = (x.astype('float32') for x in data)\n \n return data\n\n\ndef ndcg(X_pred, heldout_batch, k=100):\n '''\n normalized discounted cumulative gain@k for binary relevance\n ASSUMPTIONS: all the 0's in heldout_data indicate 0 relevance\n '''\n batch_users = X_pred.shape[0]\n idx_topk_part = bn.argpartition(-X_pred, k, axis=1)\n topk_part = X_pred[np.arange(batch_users)[:, np.newaxis],\n idx_topk_part[:, :k]]\n idx_part = np.argsort(-topk_part, axis=1)\n # X_pred[np.arange(batch_users)[:, np.newaxis], idx_topk] is the sorted\n # topk predicted score\n idx_topk = idx_topk_part[np.arange(batch_users)[:, np.newaxis], idx_part]\n # build the discount template\n tp = 1. / np.log2(np.arange(2, k + 2))\n\n DCG = (heldout_batch[np.arange(batch_users)[:, np.newaxis],\n idx_topk].toarray() * tp).sum(axis=1)\n IDCG = np.array([(tp[:min(n, k)]).sum()\n for n in heldout_batch.getnnz(axis=1)])\n return DCG / IDCG\n\n\ndef recall(X_pred, heldout_batch, k=100):\n batch_users = X_pred.shape[0]\n\n idx = bn.argpartition(-X_pred, k, axis=1)\n X_pred_binary = np.zeros_like(X_pred, dtype=bool)\n X_pred_binary[np.arange(batch_users)[:, np.newaxis], idx[:, :k]] = True\n\n X_true_binary = (heldout_batch > 0).toarray()\n tmp = (np.logical_and(X_true_binary, X_pred_binary).sum(axis=1)).astype(\n np.float32)\n recall = tmp / np.minimum(k, X_true_binary.sum(axis=1))\n return recall" ]
[ [ "pandas.read_csv", "numpy.ones_like", "numpy.arange", "numpy.zeros_like", "numpy.argsort", "numpy.logical_and" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
two-7182/synchrony
[ "aa15fa8b0229d257e15785b4fb70c1727c13b8c8" ]
[ "src/measure.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nMeeasure class for the research project 'Cortical Spike Synchrony as \na Measure of Contour Uniformity', as part of the RTG computational cognition, \nOsnabrueck University, Germany.\n\"\"\"\n\n__author__ = 'Julius Mayer, Viktoria Zemliak, Flora Perizonius'\n__email__ = '[email protected]'\n__date__ = '01.04.2022'\n__copyright__ = '(C) 2022 Julius Mayer, Viktoria Zemliak, Flora Perizonius'\n__license__ = 'MIT License'\n\nimport itertools\nimport numpy as np\nfrom elephant.spike_train_dissimilarity import van_rossum_distance, victor_purpura_distance\nfrom elephant.spike_train_synchrony import spike_contrast\nfrom neo.core import SpikeTrain\n\nclass Measure:\n def __init__(self, firings, metric='van_rossum'):\n '''\n Function for calculating dissimilarity between multiple spike trains.\n Args:\n metric = metric name. Available metrics: van_rossum, victor_purpura.\n firings = list of sequences of the neuron firings.\n '''\n metrics_available = ('van_rossum', 'victor_purpura', 'spike_contrast', 'rsync')\n if metric not in metrics_available:\n raise Exception('Please select from the available metrics: van_rossum, victor_purpura, spike_contrast, rsync')\n self.metric = metric\n \n if len(firings) < 2:\n raise Exception('Please select 2 or more spike trains to compare')\n if len(set([len(f) for f in firings])) > 1:\n raise Exception('Please select spike trains of the similar length')\n \n self.firings = firings\n #print(len(self.firings), 'spike trains to compare')\n self.length = len(firings[0])\n \n def _transform_firing(self, spike_train):\n return SpikeTrain(list(np.nonzero(spike_train))[0], units='ms', t_stop=self.length)\n \n def _pairwise_distance(self, firing1, firing2):\n train1 = self._transform_firing(firing1)\n train2 = self._transform_firing(firing2)\n\n if self.metric == 'van_rossum':\n return van_rossum_distance((train1, train2))[0,1]\n return victor_purpura_distance((train1, train2))[0,1]\n \n def dissimilarity(self):\n '''\n Measure the distance between arbitrary amount of neurons.\n '''\n if self.metric == 'spike_contrast':\n trains = [self._transform_firing(firing) for firing in self.firings]\n return 1 - spike_contrast(trains)\n \n elif self.metric == 'rsync':\n if isinstance(self.firings, list):\n firings = np.zeros((len(self.firings), len(self.firings[0])))\n for i,f in enumerate(self.firings):\n firings[i] = f\n self.firings = firings\n \n meanfield = np.mean(self.firings, axis=0) # spatial mean across cells, at each time\n variances = np.var(self.firings, axis=1) # variance over time of each cell\n return 1 - np.var(meanfield) / np.mean(variances)\n \n else:\n pairs = list(itertools.combinations(range(len(self.firings)), 2))\n distances = [self._pairwise_distance(self.firings[pair[0]], self.firings[pair[1]]) for pair in pairs]\n return {'median': np.median(distances), 'mean': np.mean(distances), 'max': np.max(distances), 'min': np.min(distances)}" ]
[ [ "numpy.nonzero", "numpy.min", "numpy.median", "numpy.max", "numpy.mean", "numpy.var" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jeguerra/nonlinearMtnWavesSolver
[ "e2fe83d1f7c3c57cbe9ba0299a1b9179cf4b5869", "e2fe83d1f7c3c57cbe9ba0299a1b9179cf4b5869" ]
[ "computeAdjust4CBC.py", "computePartialDerivativesXZ.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 23 10:49:24 2019\n\n@author: -\n\"\"\"\n\nimport numpy as np\n\ndef computeAdjust4CBC(DIMS, numVar, varDex, bcType):\n # Get DIMS data\n NX = DIMS[3]\n NZ = DIMS[4]\n OPS = (NX+1) * NZ\n \n # All DOF per variable\n rowsAll = set(np.array(range(0,OPS)))\n \n # Get prognostic ordering\n #iU = varDex[0]\n iW = varDex[1]\n iP = varDex[2]\n iT = varDex[3]\n \n # Compute BC index vectors for U and W (coupled top and bottom BC)\n # including corners\n ubdex = np.array(range(0, OPS, NZ))\n utdex = np.array(range(NZ-1, OPS, NZ))\n \n # including corners\n uldex1 = np.array(range(ubdex[0], NZ))\n urdex1 = np.array(range(ubdex[-1], OPS))\n # excluding corners at terrain boundary\n uldex2 = np.array(range(ubdex[0]+1, NZ))\n urdex2 = np.array(range(ubdex[-1]+1, OPS))\n \n # Index all boundary DOF that can be diffused on\n latDex = np.unique(np.concatenate((uldex2,urdex2)))\n #vrtDex = np.unique(np.concatenate((ubdex,utdex)))\n extDex = np.unique(np.concatenate((urdex2,uldex2,ubdex,utdex)))\n diffDex = (latDex, ubdex, utdex, extDex)\n \n # BC indices for static solution (per variable)\n if bcType == 1:\n # Inflow condition on UWPT STATIC SOLUTION\n rowsOutU = set(uldex2)\n rowsOutW = set(np.concatenate((uldex2,utdex)))\n rowsOutP = set(uldex2)\n rowsOutT = set(uldex2)\n # Indexing for static solver\n left = np.concatenate((uldex2, uldex2 + iW*OPS, uldex2 + iP*OPS, uldex2 + iT*OPS))\n top = utdex + iW*OPS\n rowsOutBC_static = set(np.concatenate((left, top)))\n elif bcType == 2:\n # Inflow condition on UWPT TRANSIENT SOLUTION\n rowsOutU = set(uldex1)\n rowsOutW = set(np.concatenate((uldex1,utdex)))\n rowsOutP = set(uldex1)\n rowsOutT = set(uldex1)\n #rowsOutP = set(np.concatenate((uldex1,urdex1)))\n #rowsOutT = set(np.concatenate((uldex1,urdex1)))\n # Indexing for static solver\n left = np.concatenate((uldex1, uldex1 + iW*OPS, uldex1 + iP*OPS, uldex1 + iT*OPS))\n #right = np.concatenate((urdex1, urdex1 + iW*OPS, urdex1 + iP*OPS, urdex1 + iT*OPS))\n top = utdex + iW*OPS\n rowsOutBC_static = set(np.concatenate((left, top)))\n \n # Indexing arrays for static solution\n ubcDex = rowsAll.difference(rowsOutU); ubcDex = sorted(ubcDex)\n wbcDex = rowsAll.difference(rowsOutW); wbcDex = sorted(wbcDex)\n pbcDex = rowsAll.difference(rowsOutP); pbcDex = sorted(pbcDex)\n tbcDex = rowsAll.difference(rowsOutT); tbcDex = sorted(tbcDex)\n \n # W is treated as an essential BC at terrain in solution by direct substitution\n rowsOutBC_transient = (sorted(rowsOutU), sorted(rowsOutW), \\\n sorted(rowsOutP), sorted(rowsOutT))\n \n # All DOF for all variables\n rowsAll = set(np.array(range(0,numVar*OPS)))\n # Compute set difference from all rows to rows to be taken out LINEAR\n sysDex = rowsAll.difference(rowsOutBC_static)\n sysDex = sorted(sysDex)\n \n return uldex1, urdex1, ubdex, utdex, ubdex + iW*OPS, ubcDex, wbcDex, pbcDex, tbcDex, rowsOutBC_transient, sysDex, diffDex", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 2 17:10:18 2019\n\n@author: TempestGuerra\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sps\n\ndef computePartialDerivativesXZ(DIMS, REFS, DDX_1D, DDZ_1D):\n # Get the dimensions\n NX = DIMS[3] + 1\n NZ = DIMS[4]\n OPS = NX * NZ\n \n # Get REFS data\n sigma = REFS[7]\n \n # Unwrap the 1D derivative matrices into 2D operators\n \n #%% Vertical derivative and diffusion operators\n DDZ_OP = np.empty((OPS,OPS))\n for cc in range(NX):\n # Advanced slicing used to get submatrix\n ddex = np.array(range(NZ)) + cc * NZ\n # TF adjustment for vertical coordinate transformation\n SIGMA = sps.diags(sigma[:,cc])\n DDZ_OP[np.ix_(ddex,ddex)] = SIGMA.dot(DDZ_1D)\n \n # Make the operators sparse\n DDZM = sps.csr_matrix(DDZ_OP); del(DDZ_OP)\n \n #%% Horizontal Derivative\n DDX_OP = np.empty((OPS,OPS))\n for rr in range(NZ):\n ddex = np.array(range(0,OPS,NZ)) + rr\n # Advanced slicing used to get submatrix\n DDX_OP[np.ix_(ddex,ddex)] = DDX_1D\n \n # Make the operators sparse\n DDXM = sps.csr_matrix(DDX_OP); del(DDX_OP)\n\n return DDXM, DDZM\n\ndef computePartialDerivativesXZ_BC(DIMS, REFS, DDX_1D, DDZ_1D, DDX_BC, DDZ_BC):\n # Get the dimensions\n NX = DIMS[3] + 1\n NZ = DIMS[4]\n OPS = NX * NZ\n \n # Get REFS data\n sigma = REFS[7]\n \n # Unwrap the 1D derivative matrices into 2D operators\n \n #%% Vertical derivative and diffusion operators\n DDZ_OP = np.empty((OPS,OPS))\n for cc in range(NX):\n # Advanced slicing used to get submatrix\n ddex = np.array(range(NZ)) + cc * NZ\n # TF adjustment for vertical coordinate transformation\n SIGMA = sps.diags(sigma[:,cc])\n if cc == 0 or cc == NX-1:\n DDZ_OP[np.ix_(ddex,ddex)] = SIGMA.dot(DDZ_BC)\n else:\n DDZ_OP[np.ix_(ddex,ddex)] = SIGMA.dot(DDZ_1D)\n \n # Make the operators sparse\n DDZM = sps.csr_matrix(DDZ_OP); del(DDZ_OP)\n \n #%% Horizontal Derivative\n DDX_OP = np.empty((OPS,OPS))\n for rr in range(NZ):\n ddex = np.array(range(0,OPS,NZ)) + rr\n # Advanced slicing used to get submatrix\n if rr == 0 or cc == NZ-1:\n DDX_OP[np.ix_(ddex,ddex)] = DDX_BC\n else:\n DDX_OP[np.ix_(ddex,ddex)] = DDX_1D\n \n # Make the operators sparse\n DDXM = sps.csr_matrix(DDX_OP); del(DDX_OP)\n\n return DDXM, DDZM" ]
[ [ "numpy.concatenate" ], [ "scipy.sparse.csr_matrix", "numpy.ix_", "scipy.sparse.diags", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
mayuri-dhote/psydac
[ "01ddbe2d049a599684c45060912d01c2658160a3", "01ddbe2d049a599684c45060912d01c2658160a3", "01ddbe2d049a599684c45060912d01c2658160a3" ]
[ "psydac/feec/multipatch/fem_linear_operators.py", "psydac/api/tests/test_quadorder.py", "psydac/feec/multipatch/examples/hcurl_source_pbms_conga_2d.py" ]
[ "# coding: utf-8\n\nfrom mpi4py import MPI\n\nfrom scipy.sparse import eye as sparse_id\n\nfrom psydac.linalg.basic import LinearOperator\nfrom psydac.fem.basic import FemField\n\n#===============================================================================\nclass FemLinearOperator( LinearOperator ):\n \"\"\"\n Linear operators with an additional Fem layer\n \"\"\"\n\n def __init__( self, fem_domain=None, fem_codomain=None, matrix=None, sparse_matrix=None):\n \"\"\"\n we may store the matrix of the linear operator with different formats\n :param matrix: stencil format\n :param sparse_matrix: scipy sparse format\n \"\"\"\n assert fem_domain\n self._fem_domain = fem_domain\n if fem_codomain:\n self._fem_codomain = fem_codomain\n else:\n self._fem_codomain = fem_domain\n self._domain = self._fem_domain.vector_space\n self._codomain = self._fem_codomain.vector_space\n\n self._matrix = matrix\n self._sparse_matrix = sparse_matrix\n\n @property\n def domain( self ):\n return self._domain\n\n @property\n def codomain( self ):\n return self._codomain\n\n @property\n def fem_domain( self ):\n return self._fem_domain\n\n @property\n def fem_codomain( self ):\n return self._fem_codomain\n\n @property\n def matrix( self ):\n return self._matrix\n\n @property\n def T(self):\n return self.transpose()\n\n @property\n def dtype( self ):\n return self.domain.dtype\n\n # ...\n def transpose(self):\n raise NotImplementedError('Class does not provide a transpose() method')\n\n # ...\n def to_sparse_matrix( self , **kwargs):\n if self._sparse_matrix is not None:\n return self._sparse_matrix\n elif self._matrix is not None:\n return self._matrix.tosparse()\n else:\n raise NotImplementedError('Class does not provide a get_sparse_matrix() method without a matrix')\n\n # ...\n def __call__( self, f ):\n if self._matrix is not None:\n coeffs = self._matrix.dot(f.coeffs)\n return FemField(self.fem_codomain, coeffs=coeffs)\n else:\n raise NotImplementedError('Class does not provide a __call__ method without a matrix')\n\n # ...\n def dot( self, f_coeffs, out=None ):\n # coeffs layer\n if self._matrix is not None:\n f = FemField(self.fem_domain, coeffs=f_coeffs)\n return self(f).coeffs\n else:\n raise NotImplementedError('Class does not provide a dot method without a matrix')\n\n # ...\n def __mul__(self, c):\n return MultLinearOperator(c, self)\n\n # ...\n def __rmul__(self, c):\n return MultLinearOperator(c, self)\n\n # ...\n def __add__(self, C):\n assert isinstance(C, FemLinearOperator)\n return SumLinearOperator(C, self)\n\n # ...\n def __sub__(self, C):\n assert isinstance(C, FemLinearOperator)\n return SumLinearOperator(C, -self)\n\n # ...\n def __neg__(self):\n return MultLinearOperator(-1, self)\n\n\n#==============================================================================\nclass ComposedLinearOperator( FemLinearOperator ):\n \"\"\"\n operator L = L_1 .. L_n\n with L_i = self._operators[i-1]\n (so, the last one is applied first, like in a product)\n \"\"\"\n def __init__( self, operators ):\n n = len(operators)\n assert all([isinstance(operators[i], FemLinearOperator) for i in range(n)])\n assert all([operators[i].fem_domain == operators[i+1].fem_codomain for i in range(n-1)])\n FemLinearOperator.__init__(\n self, fem_domain=operators[-1].fem_domain, fem_codomain=operators[0].fem_codomain\n )\n self._operators = operators\n self._n = n\n\n # matrix not defined by matrix product because it could break the Stencil Matrix structure\n\n def to_sparse_matrix( self, **kwargs):\n mat = self._operators[-1].to_sparse_matrix()\n for i in range(2, self._n+1):\n mat = self._operators[-i].to_sparse_matrix() * mat\n return mat\n\n def __call__( self, f ):\n v = self._operators[-1](f)\n for i in range(2, self._n+1):\n v = self._operators[-i](v)\n return v\n\n def dot( self, f_coeffs, out=None ):\n v_coeffs = self._operators[-1].dot(f_coeffs)\n for i in range(2, self._n+1):\n v_coeffs = self._operators[-i].dot(v_coeffs)\n return v_coeffs\n\n\n#==============================================================================\nclass IdLinearOperator( FemLinearOperator ):\n\n def __init__( self, V ):\n FemLinearOperator.__init__(self, fem_domain=V)\n\n def to_sparse_matrix( self , **kwargs):\n return sparse_id( self.fem_domain.nbasis )\n\n def __call__( self, f ):\n return f\n\n def dot( self, f_coeffs, out=None ):\n return f_coeffs\n\n#==============================================================================\nclass SumLinearOperator( FemLinearOperator ):\n\n def __init__( self, B, A ):\n assert isinstance(A, FemLinearOperator)\n assert isinstance(B, FemLinearOperator)\n assert B.fem_domain == A.fem_domain\n assert B.fem_codomain == A.fem_codomain\n FemLinearOperator.__init__(\n self, fem_domain=A.fem_domain, fem_codomain=A.fem_codomain\n )\n self._A = A\n self._B = B\n\n def to_sparse_matrix( self, **kwargs):\n return self._A.to_sparse_matrix() + self._B.to_sparse_matrix()\n\n def __call__( self, f ):\n # fem layer\n return self._B(f) + self._A(f)\n\n def dot( self, f_coeffs, out=None ):\n # coeffs layer\n return self._B.dot(f_coeffs) + self._A.dot(f_coeffs)\n\n#==============================================================================\nclass MultLinearOperator( FemLinearOperator ):\n\n def __init__( self, c, A ):\n assert isinstance(A, FemLinearOperator)\n FemLinearOperator.__init__(\n self, fem_domain=A.fem_domain, fem_codomain=A.fem_codomain\n )\n self._A = A\n self._c = c\n\n def to_sparse_matrix( self, **kwargs):\n return self._c * self._A.to_sparse_matrix()\n\n def __call__( self, f ):\n # fem layer\n return self._c * self._A(f)\n\n def dot( self, f_coeffs, out=None ):\n # coeffs layer\n return self._c * self._A.dot(f_coeffs)\n\n", "import pytest\n\nfrom sympde.topology import Line, Square\nfrom sympde.topology import ScalarFunctionSpace\nfrom sympde.topology import element_of\nfrom sympde.core import Constant\nfrom sympde.expr import BilinearForm\nfrom sympde.expr import LinearForm\nfrom sympde.expr import integral\n\nimport numpy as np\n\nfrom psydac.api.discretization import discretize\nfrom psydac.api.settings import PSYDAC_BACKEND_PYTHON\n\n#==============================================================================\[email protected]( 'test_quad_order', [(3,3), (4,4), (5,3)] )\ndef test_custom_quad_order(test_quad_order):\n\n # If 'backend' is specified, accelerate Python code by passing **kwargs\n # to discretization of bilinear forms, linear forms and functionals.\n\n domain = Square()\n V = ScalarFunctionSpace('V', domain)\n u = element_of(V, name='u')\n v = element_of(V, name='v')\n c = Constant(name='c')\n\n a = BilinearForm((u, v), integral(domain, u * v))\n l = LinearForm(v, integral(domain, v))\n\n ncells = (12, 12)\n degree = (2, 2)\n\n domain_h = discretize(domain, ncells=ncells)\n\n # TODO for future (once fixed/solved): remove the nquads=(10,10) here again\n Vh = discretize(V, domain_h, degree=degree, nquads=test_quad_order)\n\n # NOTE: we _need_ the Python backend here for range checking, otherwise we'd only get segfaults at best\n _ = discretize(a, domain_h, [Vh, Vh], nquads=test_quad_order, backend=PSYDAC_BACKEND_PYTHON).assemble()\n _ = discretize(l, domain_h, Vh , nquads=test_quad_order, backend=PSYDAC_BACKEND_PYTHON).assemble()\n\n assert np.array_equal(Vh.quad_order, test_quad_order)\n", "# coding: utf-8\n\nfrom mpi4py import MPI\n\nimport os\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom sympy import lambdify, Matrix\n\nfrom scipy.sparse.linalg import spsolve\n\nfrom sympde.calculus import dot\nfrom sympde.topology import element_of\nfrom sympde.expr.expr import LinearForm\nfrom sympde.expr.expr import integral, Norm\nfrom sympde.topology import Derham\n\nfrom psydac.api.settings import PSYDAC_BACKENDS\nfrom psydac.feec.pull_push import pull_2d_hcurl\n\nfrom psydac.feec.multipatch.api import discretize\nfrom psydac.feec.multipatch.fem_linear_operators import IdLinearOperator\nfrom psydac.feec.multipatch.operators import HodgeOperator\nfrom psydac.feec.multipatch.plotting_utilities import plot_field\nfrom psydac.feec.multipatch.multipatch_domain_utilities import build_multipatch_domain\nfrom psydac.feec.multipatch.examples.ppc_test_cases import get_source_and_solution\nfrom psydac.feec.multipatch.utilities import time_count\nfrom psydac.linalg.utilities import array_to_stencil\nfrom psydac.fem.basic import FemField\n\ndef solve_hcurl_source_pbm(\n nc=4, deg=4, domain_name='pretzel_f', backend_language=None, source_proj='P_geom', source_type='manu_J',\n eta=-10., mu=1., nu=1., gamma_h=10.,\n plot_source=False, plot_dir=None, hide_plots=True,\n m_load_dir=None,\n):\n \"\"\"\n solver for the problem: find u in H(curl), such that\n\n A u = f on \\Omega\n n x u = n x u_bc on \\partial \\Omega\n\n where the operator\n\n A u := eta * u + mu * curl curl u - nu * grad div u\n\n is discretized as Ah: V1h -> V1h in a broken-FEEC approach involving a discrete sequence on a 2D multipatch domain \\Omega,\n\n V0h --grad-> V1h -—curl-> V2h\n\n Examples:\n\n - time-harmonic maxwell equation with\n eta = -omega**2\n mu = 1\n nu = 0\n\n - Hodge-Laplacian operator L = A with\n eta = 0\n mu = 1\n nu = 1\n\n :param nc: nb of cells per dimension, in each patch\n :param deg: coordinate degree in each patch\n :param gamma_h: jump penalization parameter\n :param source_proj: approximation operator for the source, possible values are 'P_geom' or 'P_L2'\n :param source_type: must be implemented in get_source_and_solution()\n :param m_load_dir: directory for matrix storage\n \"\"\"\n\n ncells = [nc, nc]\n degree = [deg,deg]\n\n # if backend_language is None:\n # if domain_name in ['pretzel', 'pretzel_f'] and nc > 8:\n # backend_language='numba'\n # else:\n # backend_language='python'\n # print('[note: using '+backend_language+ ' backends in discretize functions]')\n if m_load_dir is not None:\n if not os.path.exists(m_load_dir):\n os.makedirs(m_load_dir)\n\n print('---------------------------------------------------------------------------------------------------------')\n print('Starting solve_hcurl_source_pbm function with: ')\n print(' ncells = {}'.format(ncells))\n print(' degree = {}'.format(degree))\n print(' domain_name = {}'.format(domain_name))\n print(' source_proj = {}'.format(source_proj))\n print(' backend_language = {}'.format(backend_language))\n print('---------------------------------------------------------------------------------------------------------')\n\n t_stamp = time_count()\n print('building symbolic domain sequence...')\n domain = build_multipatch_domain(domain_name=domain_name)\n mappings = OrderedDict([(P.logical_domain, P.mapping) for P in domain.interior])\n mappings_list = list(mappings.values())\n\n t_stamp = time_count(t_stamp)\n print('building derham sequence...')\n derham = Derham(domain, [\"H1\", \"Hcurl\", \"L2\"])\n\n t_stamp = time_count(t_stamp)\n print('building discrete domain...')\n domain_h = discretize(domain, ncells=ncells)\n\n t_stamp = time_count(t_stamp)\n print('building discrete derham sequence...')\n derham_h = discretize(derham, domain_h, degree=degree, backend=PSYDAC_BACKENDS[backend_language])\n\n t_stamp = time_count(t_stamp)\n print('building commuting projection operators...')\n nquads = [4*(d + 1) for d in degree]\n P0, P1, P2 = derham_h.projectors(nquads=nquads)\n\n # multi-patch (broken) spaces\n t_stamp = time_count(t_stamp)\n print('calling the multi-patch spaces...')\n V0h = derham_h.V0\n V1h = derham_h.V1\n V2h = derham_h.V2\n print('dim(V0h) = {}'.format(V0h.nbasis))\n print('dim(V1h) = {}'.format(V1h.nbasis))\n print('dim(V2h) = {}'.format(V2h.nbasis))\n\n t_stamp = time_count(t_stamp)\n print('building the Id operator and matrix...')\n I1 = IdLinearOperator(V1h)\n I1_m = I1.to_sparse_matrix()\n\n t_stamp = time_count(t_stamp)\n print('instanciating the Hodge operators...')\n # multi-patch (broken) linear operators / matrices\n # other option: define as Hodge Operators:\n H0 = HodgeOperator(V0h, domain_h, backend_language=backend_language, load_dir=m_load_dir, load_space_index=0)\n H1 = HodgeOperator(V1h, domain_h, backend_language=backend_language, load_dir=m_load_dir, load_space_index=1)\n H2 = HodgeOperator(V2h, domain_h, backend_language=backend_language, load_dir=m_load_dir, load_space_index=2)\n\n t_stamp = time_count(t_stamp)\n print('building the dual Hodge matrix dH0_m = M0_m ...')\n dH0_m = H0.get_dual_Hodge_sparse_matrix() # = mass matrix of V0\n\n t_stamp = time_count(t_stamp)\n print('building the primal Hodge matrix H0_m = inv_M0_m ...')\n H0_m = H0.to_sparse_matrix() # = inverse mass matrix of V0\n\n t_stamp = time_count(t_stamp)\n print('building the dual Hodge matrix dH1_m = M1_m ...')\n dH1_m = H1.get_dual_Hodge_sparse_matrix() # = mass matrix of V1\n\n t_stamp = time_count(t_stamp)\n print('building the primal Hodge matrix H1_m = inv_M1_m ...')\n H1_m = H1.to_sparse_matrix() # = inverse mass matrix of V1\n\n # print(\"dH1_m @ H1_m == I1_m: {}\".format(np.allclose((dH1_m @ H1_m).todense(), I1_m.todense())) ) # CHECK: OK\n\n t_stamp = time_count(t_stamp)\n print('building the dual Hodge matrix dH2_m = M2_m ...')\n dH2_m = H2.get_dual_Hodge_sparse_matrix() # = mass matrix of V2\n\n t_stamp = time_count(t_stamp)\n print('building the conforming Projection operators and matrices...')\n # conforming Projections (should take into account the boundary conditions of the continuous deRham sequence)\n cP0 = derham_h.conforming_projection(space='V0', hom_bc=True, backend_language=backend_language, load_dir=m_load_dir)\n cP1 = derham_h.conforming_projection(space='V1', hom_bc=True, backend_language=backend_language, load_dir=m_load_dir)\n cP0_m = cP0.to_sparse_matrix()\n cP1_m = cP1.to_sparse_matrix()\n\n t_stamp = time_count(t_stamp)\n print('building the broken differential operators and matrices...')\n # broken (patch-wise) differential operators\n bD0, bD1 = derham_h.broken_derivatives_as_operators\n bD0_m = bD0.to_sparse_matrix()\n bD1_m = bD1.to_sparse_matrix()\n\n if plot_dir is not None and not os.path.exists(plot_dir):\n os.makedirs(plot_dir)\n\n def lift_u_bc(u_bc):\n if u_bc is not None:\n print('lifting the boundary condition in V1h...')\n # note: for simplicity we apply the full P1 on u_bc, but we only need to set the boundary dofs\n u_bc_x = lambdify(domain.coordinates, u_bc[0])\n u_bc_y = lambdify(domain.coordinates, u_bc[1])\n u_bc_log = [pull_2d_hcurl([u_bc_x, u_bc_y], m) for m in mappings_list]\n # it's a bit weird to apply P1 on the list of (pulled back) logical fields -- why not just apply it on u_bc ?\n uh_bc = P1(u_bc_log)\n ubc_c = uh_bc.coeffs.toarray()\n # removing internal dofs (otherwise ubc_c may already be a very good approximation of uh_c ...)\n ubc_c = ubc_c - cP1_m.dot(ubc_c)\n else:\n ubc_c = None\n return ubc_c\n\n # Conga (projection-based) stiffness matrices\n # curl curl:\n t_stamp = time_count(t_stamp)\n print('computing the curl-curl stiffness matrix...')\n print(bD1_m.shape, dH2_m.shape )\n pre_CC_m = bD1_m.transpose() @ dH2_m @ bD1_m\n # CC_m = cP1_m.transpose() @ pre_CC_m @ cP1_m # Conga stiffness matrix\n\n # grad div:\n t_stamp = time_count(t_stamp)\n print('computing the grad-div stiffness matrix...')\n pre_GD_m = - dH1_m @ bD0_m @ cP0_m @ H0_m @ cP0_m.transpose() @ bD0_m.transpose() @ dH1_m\n # GD_m = cP1_m.transpose() @ pre_GD_m @ cP1_m # Conga stiffness matrix\n\n # jump penalization:\n t_stamp = time_count(t_stamp)\n print('computing the jump penalization matrix...')\n jump_penal_m = I1_m - cP1_m\n JP_m = jump_penal_m.transpose() * dH1_m * jump_penal_m\n\n t_stamp = time_count(t_stamp)\n print('computing the full operator matrix...')\n print('eta = {}'.format(eta))\n print('mu = {}'.format(mu))\n print('nu = {}'.format(nu))\n pre_A_m = cP1_m.transpose() @ ( eta * dH1_m + mu * pre_CC_m - nu * pre_GD_m ) # useful for the boundary condition (if present)\n A_m = pre_A_m @ cP1_m + gamma_h * JP_m\n\n # get exact source, bc's, ref solution...\n # (not all the returned functions are useful here)\n t_stamp = time_count(t_stamp)\n print('getting the source and ref solution...')\n N_diag = 200\n method = 'conga'\n f_scal, f_vect, u_bc, ph_ref, uh_ref, p_ex, u_ex, phi, grad_phi = get_source_and_solution(\n source_type=source_type, eta=eta, mu=mu, domain=domain, domain_name=domain_name,\n refsol_params=[N_diag, method, source_proj],\n )\n\n # compute approximate source f_h\n t_stamp = time_count(t_stamp)\n b_c = f_c = None\n if source_proj == 'P_geom':\n # f_h = P1-geometric (commuting) projection of f_vect\n print('projecting the source with commuting projection...')\n f_x = lambdify(domain.coordinates, f_vect[0])\n f_y = lambdify(domain.coordinates, f_vect[1])\n f_log = [pull_2d_hcurl([f_x, f_y], m) for m in mappings_list]\n f_h = P1(f_log)\n f_c = f_h.coeffs.toarray()\n b_c = dH1_m.dot(f_c)\n\n elif source_proj == 'P_L2':\n # f_h = L2 projection of f_vect\n print('projecting the source with L2 projection...')\n v = element_of(V1h.symbolic_space, name='v')\n expr = dot(f_vect,v)\n l = LinearForm(v, integral(domain, expr))\n lh = discretize(l, domain_h, V1h, backend=PSYDAC_BACKENDS[backend_language])\n b = lh.assemble()\n b_c = b.toarray()\n if plot_source:\n f_c = H1_m.dot(b_c)\n else:\n raise ValueError(source_proj)\n\n if plot_source:\n plot_field(numpy_coeffs=f_c, Vh=V1h, space_kind='hcurl', domain=domain, title='f_h with P = '+source_proj, filename=plot_dir+'/fh_'+source_proj+'.png', hide_plot=hide_plots)\n\n ubc_c = lift_u_bc(u_bc)\n\n if ubc_c is not None:\n # modified source for the homogeneous pbm\n t_stamp = time_count(t_stamp)\n print('modifying the source with lifted bc solution...')\n b_c = b_c - pre_A_m.dot(ubc_c)\n\n # direct solve with scipy spsolve\n t_stamp = time_count(t_stamp)\n print('solving source problem with scipy.spsolve...')\n uh_c = spsolve(A_m, b_c)\n\n # project the homogeneous solution on the conforming problem space\n t_stamp = time_count(t_stamp)\n print('projecting the homogeneous solution on the conforming problem space...')\n uh_c = cP1_m.dot(uh_c)\n\n if ubc_c is not None:\n # adding the lifted boundary condition\n t_stamp = time_count(t_stamp)\n print('adding the lifted boundary condition...')\n uh_c += ubc_c\n\n t_stamp = time_count(t_stamp)\n print('getting and plotting the FEM solution from numpy coefs array...')\n title = r'solution $u_h$ (amplitude) for $\\eta = $'+repr(eta)\n params_str = 'eta={}_mu={}_nu={}_gamma_h={}'.format(eta, mu, nu, gamma_h)\n\n if plot_dir:\n plot_field(numpy_coeffs=uh_c, Vh=V1h, space_kind='hcurl', domain=domain, title=title, filename=plot_dir+params_str+'_uh.png', hide_plot=hide_plots)\n\n time_count(t_stamp)\n\n if u_ex:\n u = element_of(V1h.symbolic_space, name='u')\n l2norm = Norm(Matrix([u[0] - u_ex[0],u[1] - u_ex[1]]), domain, kind='l2')\n l2norm_h = discretize(l2norm, domain_h, V1h)\n uh_c = array_to_stencil(uh_c, V1h.vector_space)\n l2_error = l2norm_h.assemble(u=FemField(V1h, coeffs=uh_c))\n return l2_error\n\nif __name__ == '__main__':\n\n t_stamp_full = time_count()\n\n quick_run = True\n # quick_run = False\n\n omega = np.sqrt(170) # source\n roundoff = 1e4\n eta = int(-omega**2 * roundoff)/roundoff\n\n source_type = 'manu_maxwell'\n # source_type = 'manu_J'\n\n if quick_run:\n domain_name = 'curved_L_shape'\n nc = 4\n deg = 2\n else:\n nc = 8\n deg = 4\n\n domain_name = 'pretzel_f'\n # domain_name = 'curved_L_shape'\n nc = 20\n deg = 2\n\n # nc = 2\n # deg = 2\n\n run_dir = '{}_{}_nc={}_deg={}/'.format(domain_name, source_type, nc, deg)\n m_load_dir = 'matrices_{}_nc={}_deg={}/'.format(domain_name, nc, deg)\n solve_hcurl_source_pbm(\n nc=nc, deg=deg,\n eta=eta,\n nu=0,\n mu=1, #1,\n domain_name=domain_name,\n source_type=source_type,\n backend_language='pyccel-gcc',\n plot_source=True,\n plot_dir='./plots/tests_source_feb_13/'+run_dir,\n hide_plots=True,\n m_load_dir=m_load_dir\n )\n\n time_count(t_stamp_full, msg='full program')\n" ]
[ [ "scipy.sparse.eye" ], [ "numpy.array_equal" ], [ "numpy.sqrt", "scipy.sparse.linalg.spsolve" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
Valdert-13/captcha
[ "dd521e0184462ba80fc6f201a10ee9653c5724c8", "dd521e0184462ba80fc6f201a10ee9653c5724c8" ]
[ "model/models_nn.py", "model/test_model.py" ]
[ "import tensorflow as tf\nfrom config import *\n\ndef model_1():\n 'Иницилизация структуры модели'\n input_img = tf.keras.layers.Input(shape=IMG_SHAPE)\n output_code = []\n\n out = tf.keras.layers.Convolution2D(16, (3, 3), padding='same', activation='relu')(input_img)\n out = tf.keras.layers.MaxPooling2D(padding='same')(out)\n out = tf.keras.layers.Convolution2D(32, (3, 3), padding='same', activation='relu')(out)\n out = tf.keras.layers.Convolution2D(32, (3, 3), padding='same', activation='relu')(out)\n out = tf.keras.layers.MaxPooling2D(padding='same')(out)\n out = tf.keras.layers.Convolution2D(64, (3, 3), padding='same', activation='relu')(out)\n out = tf.keras.layers.Convolution2D(64, (3, 3), padding='same', activation='relu')(out)\n out = tf.keras.layers.BatchNormalization()(out)\n out = tf.keras.layers.MaxPooling2D(padding='same')(out)\n out = tf.keras.layers.Flatten()(out)\n\n for _ in range(NUM_CODE_CHARACTERS):\n dense = tf.keras.layers.Dense(64, activation='relu')(out)\n dropout = tf.keras.layers.Dropout(0.4)(dense)\n prediction = tf.keras.layers.Dense(ALL_CHARS_LEN, activation='sigmoid')(dropout)\n\n output_code.append(prediction)\n\n model = tf.keras.Model(input_img, output_code)\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\ndef model_2():\n 'Иницилизация структуры модели'\n input_img = tf.keras.layers.Input(shape=IMG_SHAPE)\n output_code = []\n\n out = tf.keras.layers.Convolution2D(16, (3, 3), padding='same', activation='relu')(input_img)\n out = tf.keras.layers.MaxPooling2D(padding='same')(out)\n out = tf.keras.layers.Convolution2D(32, (3, 3), padding='same', activation='relu')(out)\n out = tf.keras.layers.Convolution2D(32, (3, 3), padding='same', activation='relu')(out)\n out = tf.keras.layers.MaxPooling2D(padding='same')(out)\n out = tf.keras.layers.Convolution2D(32, (3, 3), padding='same', activation='relu')(out)\n out = tf.keras.layers.Convolution2D(32, (3, 3), padding='same', activation='relu')(out)\n out = tf.keras.layers.MaxPooling2D(padding='same')(out)\n out = tf.keras.layers.Convolution2D(64, (3, 3), padding='same', activation='relu')(out)\n out = tf.keras.layers.Convolution2D(64, (3, 3), padding='same', activation='relu')(out)\n out = tf.keras.layers.BatchNormalization()(out)\n out = tf.keras.layers.MaxPooling2D(padding='same')(out)\n out = tf.keras.layers.Flatten()(out)\n\n for _ in range(NUM_CODE_CHARACTERS):\n dense = tf.keras.layers.Dense(64, activation='relu')(out)\n dropout = tf.keras.layers.Dropout(0.4)(dense)\n prediction = tf.keras.layers.Dense(ALL_CHARS_LEN, activation='sigmoid')(dropout)\n\n output_code.append(prediction)\n\n model = tf.keras.Model(input_img, output_code)\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\ndef model_3():\n 'Иницилизация структуры модели'\n input_img = tf.keras.layers.Input(shape=IMG_SHAPE)\n output_code = []\n\n out = tf.keras.layers.Convolution2D(16, (3, 3), padding='same', activation='relu')(input_img)\n out = tf.keras.layers.MaxPooling2D(padding='same')(out)\n out = tf.keras.layers.Convolution2D(64, (3, 3), padding='same', activation='relu')(out)\n out = tf.keras.layers.Convolution2D(64, (3, 3), padding='same', activation='relu')(out)\n out = tf.keras.layers.BatchNormalization()(out)\n out = tf.keras.layers.MaxPooling2D(padding='same')(out)\n out = tf.keras.layers.Flatten()(out)\n\n for _ in range(NUM_CODE_CHARACTERS):\n dense = tf.keras.layers.Dense(64, activation='relu')(out)\n dropout = tf.keras.layers.Dropout(0.4)(dense)\n prediction = tf.keras.layers.Dense(ALL_CHARS_LEN, activation='sigmoid')(dropout)\n\n output_code.append(prediction)\n\n model = tf.keras.Model(input_img, output_code)\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model", "from config import *\nimport numpy as np\n\n\nclass Test_model():\n \"\"\"\n Class builds container with predictive models based\n\n Parameters\n ----------\n\n train: tf.data.Datasets\n Тренировочный, предобработатнный датасет\n\n \"\"\"\n\n def __init__(self,\n models:list=[],\n image=None,\n label=None,):\n self.models = models\n self.image = image\n self.label = label\n\n\n\n def _tensor_to_chars(self, tensor):\n 'Преоброзование тензора в символьную строку'\n label_pred = []\n for i in tensor:\n indexes = []\n for char_vector in i:\n indexes.append(np.argmax(char_vector))\n label_pred.append(indexes)\n\n for j, indexes in enumerate(label_pred):\n code = ''\n for i in indexes:\n code += ALL_CHARS[i]\n\n label_pred[j] = code\n\n return label_pred\n\n\n def test_data(self):\n 'Тестирование моделей на тестовой выборке'\n for model in self.models:\n correct = 0\n total = 0\n predicted_code = np.array(model['model_class'].predict(self.image))\n predicted_code = np.transpose(predicted_code, (1, 0, 2))\n label_pred = self._tensor_to_chars(predicted_code)\n labels = self._tensor_to_chars (self.label)\n total += len(labels)\n correct += len([i for i, j in zip(labels, label_pred) if i == j])\n model['result'] = correct / total\n\n\n return self.models\n\n\n\n\n" ]
[ [ "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Convolution2D", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Input" ], [ "numpy.argmax", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
linerxliner/ValCAT
[ "e62985c6c64f6415bb2bb4716bd02d9686badd47", "e62985c6c64f6415bb2bb4716bd02d9686badd47" ]
[ "taattack/victims/wrappers/pytorch_warpper.py", "taattack/utils.py" ]
[ "import torch\n\nfrom .wrapper import Wrapper\n\n\nclass PyTorchWrapper(Wrapper):\n def __init__(self, model, tokenizer):\n super(PyTorchWrapper, self).__init__()\n\n self._model = model\n self._tokenizer = tokenizer\n\n self.unk_token = self._tokenizer.unk_token\n\n def _forward(self, text_list):\n if len(text_list) == 1:\n text_list = text_list[0]\n ids = self._tokenizer(text_list)\n device = next(self._model.parameters()).device\n ids = torch.tensor(ids).to(device)\n\n with torch.no_grad():\n outputs = self._model(ids)\n\n return outputs.cpu().numpy()\n", "import flair\nimport numpy as np\nimport spacy\nimport tensorflow_hub as hub\nimport torch\nfrom flair.data import Sentence\nfrom flair.models import SequenceTagger\nfrom nltk.tokenize.treebank import TreebankWordDetokenizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom string import punctuation\nfrom transformers import AutoTokenizer, GPT2LMHeadModel, MT5ForConditionalGeneration, T5ForConditionalGeneration\n\nfrom .config import DEVICES\n\n\nclass ModelPool:\n ENCODER_DECODER2MODEL_TOKENIZER = {\n 't5-base': 't5_base',\n 't5-large': 't5_large',\n 't5-v1_1-base': 't5_v1_1_base',\n 'mt5-base': 'mt5_base',\n }\n\n def encoder_decoder2model_token(self, encoder_decoder):\n return getattr(self, self.ENCODER_DECODER2MODEL_TOKENIZER[encoder_decoder])\n\n @property\n def flair_pos_tagger(self):\n if not hasattr(self, '_flair_pos_tagger'):\n flair.device = torch.device(DEVICES[1])\n self._flair_pos_tagger = SequenceTagger.load('upos-fast')\n\n return self._flair_pos_tagger\n\n @property\n def gpt2(self):\n if not hasattr(self, '_gpt2_model'):\n self._gpt2_model = GPT2LMHeadModel.from_pretrained('gpt2')\n if not hasattr(self, '_gpt2_tokenizer'):\n self._gpt2_tokenizer = AutoTokenizer.from_pretrained('gpt2', use_fast=True)\n\n return self._gpt2_model, self._gpt2_tokenizer\n\n @property\n def mt5_base(self):\n if not hasattr(self, '_mt5_base_model'):\n self._mt5_base_model = MT5ForConditionalGeneration.from_pretrained('google/mt5-base')\n if not hasattr(self, '_mt5_base_tokenizer'):\n self._mt5_base_tokenizer = AutoTokenizer.from_pretrained('google/mt5-base', use_fast=True)\n\n return self._mt5_base_model, self._mt5_base_tokenizer\n\n @property\n def spacy_model(self):\n if not hasattr(self, '_spacy_model'):\n self._spacy_model = spacy.load('en_core_web_sm')\n return self._spacy_model\n\n @property\n def t5_base(self):\n if not hasattr(self, '_t5_base_model'):\n self._t5_base_model = T5ForConditionalGeneration.from_pretrained('t5-base')\n if not hasattr(self, '_t5_base_tokenizer'):\n self._t5_base_tokenizer = AutoTokenizer.from_pretrained('t5-base', use_fast=True)\n\n return self._t5_base_model, self._t5_base_tokenizer\n\n @property\n def t5_large(self):\n if not hasattr(self, '_t5_large_model'):\n self._t5_large_model = T5ForConditionalGeneration.from_pretrained('t5-large')\n if not hasattr(self, '_t5_large_tokenizer'):\n self._t5_large_tokenizer = AutoTokenizer.from_pretrained('t5-large', use_fast=True)\n\n return self._t5_large_model, self._t5_large_tokenizer\n\n @property\n def t5_v1_1_base(self):\n if not hasattr(self, '_t5_v1_1_base_model'):\n self._t5_v1_1_base_model = T5ForConditionalGeneration.from_pretrained('google/t5-v1_1-base')\n if not hasattr(self, '_t5_v1_1_base_tokenizer'):\n self._t5_v1_1_base_tokenizer = AutoTokenizer.from_pretrained('google/t5-v1_1-base', use_fast=True)\n\n return self._t5_v1_1_base_model, self._t5_v1_1_base_tokenizer\n\n @property\n def treebank_word_detokenizer(self):\n if not hasattr(self, '_treebank_word_detokenizer'):\n self._treebank_word_detokenizer = TreebankWordDetokenizer()\n return self._treebank_word_detokenizer\n\n @property\n def use(self):\n if not hasattr(self, '_use'):\n self._use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')\n return self._use\n\n\nmodel_pool = ModelPool()\n\n\ndef tokenize(text):\n doc = model_pool.spacy_model(text)\n tokens = [token.text for token in doc]\n\n return tokens\n\n\ndef detokenize(tokens):\n return model_pool.treebank_word_detokenizer.detokenize(tokens)\n\n\ndef is_continuous(sequence):\n if len(sequence) == 0:\n return False\n\n for i in range(len(sequence) - 1):\n if sequence[i] + 1 != sequence[i + 1]:\n return False\n\n return True\n\n\ndef is_punctuation(c):\n return len(c) == 1 and c in punctuation\n\n\ndef is_one_word(text):\n return len(tokenize(text)) == 1\n\n\ndef get_use_sim(text1, text2):\n orig_embd, adv_embd = model_pool.use([text1, text2]).numpy()\n sim = cosine_similarity(orig_embd[np.newaxis, ...], adv_embd[np.newaxis, ...])[0, 0]\n return sim.item()\n\n\ndef get_lcs_len(words1, words2):\n num_words1, num_words2 = len(words1), len(words2)\n\n dp = np.zeros((num_words1 + 1, num_words2 + 1), dtype=int)\n\n for i in range(1, num_words1 + 1):\n for j in range(1, num_words2 + 1):\n if words1[i - 1] == words2[j - 1]:\n dp[i, j] = dp[i - 1, j - 1] + 1\n else:\n dp[i, j] = max(dp[i - 1, j], dp[i, j - 1])\n\n return dp[num_words1, num_words2].item()\n\n\ndef get_num_word_pert(words1, words2):\n words1, words2 = list(map(lambda w: w.lower(), words1)), list(map(lambda w: w.lower(), words2))\n return max(len(words1), len(words2)) - get_lcs_len(words1, words2)\n\n\ndef get_pos_list(words):\n sentence = Sentence(detokenize(words), use_tokenizer=lambda text: words)\n model_pool.flair_pos_tagger.predict(sentence)\n return [token.annotation_layers['pos'][0]._value for token in sentence.tokens]\n" ]
[ [ "torch.no_grad", "torch.tensor" ], [ "torch.device", "numpy.zeros", "sklearn.metrics.pairwise.cosine_similarity" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
doncat99/FinanceAnalysis
[ "689aed45bab9e691566b308d2778170a0b3950ec" ]
[ "candle.py" ]
[ "import os\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom zvt import zvt_env\nfrom zvt.api.data_type import Region, Provider\nfrom zvt.factors.candlestick_factor import CandleStickFactor, candlestick_patterns\n\n# import faulthandler\n# faulthandler.enable()\n\n\ndef get_cache():\n file = zvt_env['cache_path'] + '/' + 'candle.pkl'\n if os.path.exists(file) and os.path.getsize(file) > 0:\n with open(file, 'rb') as handle:\n return pickle.load(handle)\n return None\n\n\ndef dump(data):\n file = zvt_env['cache_path'] + '/' + 'candle.pkl'\n with open(file, 'wb+') as handle:\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\nif __name__ == '__main__':\n pd.set_option('max_colwidth', 200)\n\n gb = get_cache()\n\n if gb is None:\n factor = CandleStickFactor(region=Region.US,\n start_timestamp='2015-01-01',\n kdata_overlap=0,\n provider=Provider.Yahoo)\n gb = factor.result_df.groupby('code')\n dump(gb)\n\n stocks = []\n\n for symbol in gb.groups:\n df = gb.get_group(symbol)\n\n patterns = []\n for pattern in candlestick_patterns: \n last = df[pattern].tail(1).values[0]\n patterns.append(last)\n stocks.append(patterns)\n\n def normalization(data):\n _range = np.max(abs(data))\n return data / _range\n\n stocks = normalization(np.array(stocks))\n\n df = pd.DataFrame(data=stocks, columns=candlestick_patterns.keys(), index=gb.groups.keys())\n df['sum'] = df.sum(axis=1)\n df.sort_values(by=['sum'], ascending=False, inplace=True)\n\n f, ax = plt.subplots(figsize=(6, 4))\n cmap = sns.cubehelix_palette(start=0, rot=3, gamma=0.8, as_cmap=True)\n sns.heatmap(df, cmap=cmap, linewidths=0, linecolor='white', ax=ax)\n ax.set_title('Amounts per kind and region')\n ax.set_xlabel('pattern')\n ax.set_ylabel('stock')\n plt.show()\n" ]
[ [ "numpy.array", "pandas.set_option", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ollpu/tiralabra
[ "250a91aa3d3e150ee75555745e21e0a3aafb607c" ]
[ "bench/plot_parametrized_comparison.py" ]
[ "\"\"\"\nPlot comparison of different Criterion baselines, for a benchmark that\nhas a parameter. Requires matplotlib and numpy.\n\nUsage: (run in benchmark crate root)\n\npython3 plot_parametrized_comparison.py [benchmark path] [baseline names...]\n\nbenchmark path could be e.g. \"fft/copy and fft\"\n\"\"\"\nimport sys\nimport json\nfrom pathlib import Path\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\n\nbenchmark_name = sys.argv[1]\nbaselines = sys.argv[2:]\n\nbaseline_data = { name: [] for name in baselines }\n\np = Path(\"target\") / \"criterion\" / benchmark_name\n\nvalues = []\n\nfor parameter in p.iterdir():\n value = parameter.parts[-1]\n if value != \"report\":\n value = int(value)\n values.append(value)\n for name in baselines:\n estimates_path = parameter / name / \"estimates.json\"\n with open(estimates_path) as f:\n data = json.load(f)\n a, b, c = (data[\"mean\"][\"point_estimate\"] / 1000,\n data[\"mean\"][\"confidence_interval\"][\"lower_bound\"] / 1000,\n data[\"mean\"][\"confidence_interval\"][\"upper_bound\"] / 1000)\n baseline_data[name].append((value, a, b, c))\n\nvalues.sort()\nlinear = len(values) <= 2 or values[2] - values[1] == values[1] - values[0]\nplt.title(benchmark_name)\nif not linear:\n plt.xscale(\"log\")\nplt.xticks(values)\nplt.xlabel(\"input\")\nif not linear:\n plt.yscale(\"log\")\nplt.ylabel(\"time (µs)\")\nplt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())\ncomp = [[] for x in values]\nfor name, data in baseline_data.items():\n data.sort(key=lambda t: t[0])\n for t, arr in zip(data, comp):\n arr.append(t[1])\n points = np.array([t[1] for t in data])\n confidence = np.array([[t[2] for t in data], [t[3] for t in data]])\n confidence = np.abs(confidence - points)\n #plt.errorbar(values, points, yerr=confidence, linestyle=\"solid\", marker=\"o\")\n plt.plot(values, points, label=name, marker=\"o\")\nfor values in comp:\n if len(values) == 2:\n old, new = values\n change = (old-new)/old * 100\n print(f\"{old:.3f}; {new:.3f}; {change:.1f} %\".replace(\".\", \",\"))\n else:\n print(\";\".join(f\"{value:.3f}\" for value in values).replace(\".\", \",\"))\nplt.legend()\nplt.show()\n" ]
[ [ "numpy.array", "matplotlib.pyplot.legend", "matplotlib.pyplot.gca", "numpy.abs", "matplotlib.pyplot.title", "matplotlib.pyplot.xscale", "matplotlib.pyplot.yscale", "matplotlib.pyplot.plot", "matplotlib.ticker.ScalarFormatter", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
andre-dsouza/ML-for-Access-Control
[ "dc7ae4438d6f66306d744fe9d2331c394d108c92" ]
[ "ensemble.py" ]
[ "\"\"\" Amazon Access Challenge Starter Code\nThis code enhances the original categorical data with a few sets of additional\nfeatures. It then trains ensemble of decision tree models on the data.\n\nHyperparameter searches were done using leaderboard feedback and grid searches\nin earlier versions of this code.\n\nModel blending / stacking was done in coordination with my partner, Paul Duan,\nand his models.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier)\nfrom sklearn import (metrics, model_selection, linear_model, preprocessing)\n\nSEED = 42 # a seed for randomized procedures\n\n# === load data in memory === #\n\"\"\" Reading data. Each file has 10 columns. The first column in train is the \nlabel (ACTION), the first column in test is an ID used for making sumbissions.\nThe last column, ROLE_CODE, is actually just a duplicate of another column.\n\"\"\"\nprint(\"loading data\")\ntest = pd.read_csv('test.csv', index_col=0)\ntrain = pd.read_csv('train.csv') # There's no id column in train\ny = train['ACTION']\ntrain = train.drop(['ACTION'], axis=1)\n\n\"\"\"I believe I got these hyper-parameters from a combination of leaderboard\nfeedback and an earlier version of this program that performed a somewhat\ncrude grid-search CV. This code is from one of my earliest Kaggle contests and\none of my earliest machine_learning methodlogy-based projects. My\nmethodology has improved and streamlined a lot since then.\"\"\"\nmodelRF =RandomForestClassifier(n_estimators=999, max_features='sqrt', \n\t\tmax_depth=None, min_samples_split=9, random_state=SEED)\nmodelXT =ExtraTreesClassifier(n_estimators=999, max_features='sqrt', \n\t\tmax_depth=None, min_samples_split=8, random_state=SEED)\nmodelGB =GradientBoostingClassifier(n_estimators=50, learning_rate=0.20, \n\t\tmax_depth=20, min_samples_split=9, random_state=SEED)\n\n# Put train and test together for consistant preprocessing\nX_all = pd.concat([test, train], ignore_index=True)\ntest_rows = len(test)\n\n# This column is completely redundant with ROLE_TITLE\nX_all = X_all.drop(['ROLE_CODE'], axis=1)\n\n\"\"\"The feature \"Role Title\" is a subcategory of \"Role Family\" and Rollup 1 is a\nsubcategory of Rollup 2. I believe I experimented with leaving the redundant \ncategory as a feature and also with simply dropping it. \n\nBut in the end I found a slight score boost from the code below, which preserves\na small amount of numerical association between subcategories with a common\nsuper-category.\"\"\"\nX_all['ROLE_TITLE'] = X_all['ROLE_TITLE'] + (1000 * X_all['ROLE_FAMILY'])\nX_all['ROLE_ROLLUPS'] = X_all['ROLE_ROLLUP_1'] + (10000 * X_all['ROLE_ROLLUP_2'])\nX_all = X_all.drop(['ROLE_ROLLUP_1','ROLE_ROLLUP_2','ROLE_FAMILY'], axis=1)\n\n\n# Adding Count Features for Each Column\nprint(\"Counts\")\nfor col in X_all.columns:\n count = X_all[col].value_counts()\n X_all['count_'+col] = X_all[col].replace(count)\n\n\"\"\"Resource is a bit different from the other columns. Others describe a \nparticular department or role (like accountant, programmer, etc.). Resource is \nsomething that the employee wants access too. In the following code, I measure\nthe % of the requests that each department, role, manager, etc. give for this\nresource. So in other words is this a resource that makes up a significant\nfraction of the requests associated with this department?\"\"\"\n# Adding features describing % of requests that are for the requested resource\n# for each particular category of the other columns.\n# This takes quite a while to run (10 mins on my machine.)\nfor col in X_all.columns[1:6]:\n X_all['resource_usage_'+col] = 0.0\n counts = X_all.groupby([col, 'RESOURCE']).size()\n percents = counts.groupby(level=0).transform(lambda x: x/sum(x))\n cc = 0\n print(col, len(percents))\n for c, r in percents.index:\n X_all.loc[(X_all[col]==c) & (X_all['RESOURCE']==r), 'resource_usage_'+ col] = percents[(c, r)]\n cc += 1\n if cc % 1000 == 1:\n print(cc)\n\n\n# Number of Resources that a manager manages. I recall that many other similar\n# features were tested, but this is the only that seemed to reliably move the \n# needle.\nm_r_counts = X_all.groupby(['MGR_ID', \"RESOURCE\"]).size()\nm_counts = m_r_counts.groupby(level=0).size()\nX_all['Manager_Resrouces'] = X_all['MGR_ID'].replace(m_counts)\n\n\n# Here running Pickle or cPickle would probably be helpful, depending\n# on the workflow and goals at this stage of the competition\n\n\n# Recover Test/Train\nX_train = X_all.iloc[test_rows:,:]\nX_test = X_all.iloc[:test_rows:]\n\n# === Predictions === #\nmodelRF.fit(X_train, y)\nmodelXT.fit(X_train, y)\nmodelGB.fit(X_train, y)\n\npredsRF = modelRF.predict_proba(X_test)[:, 1]\npredsXT = modelXT.predict_proba(X_test)[:, 1]\npredsGB = modelGB.predict_proba(X_test)[:, 1]\n\npreds = np.vstack((predsRF, predsXT, predsGB)).T\nsubmissions = pd.DataFrame(data=preds, columns=[\"RF\", \"XT\", \"GB\"], index = test.index)\nprint(submissions.describe())\n\n# At this point these models were blended with my logistic model and another\n# dozen or so models created by my competition partner.\n# I think we did this together, in an interactive session using a standard\n# stacking / blending techniques.\nsubmissions.to_csv(\"dense_numerical_and_categorical_models.csv\")\n" ]
[ [ "pandas.concat", "pandas.read_csv", "sklearn.ensemble.RandomForestClassifier", "sklearn.ensemble.ExtraTreesClassifier", "pandas.DataFrame", "sklearn.ensemble.GradientBoostingClassifier", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
AudioStreamTV/tensorflow
[ "7277ed8ed2da84b227295216632dec52a81f63b3", "7277ed8ed2da84b227295216632dec52a81f63b3", "7277ed8ed2da84b227295216632dec52a81f63b3" ]
[ "tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py", "tensorflow/contrib/autograph/converters/control_flow.py", "tensorflow/contrib/metrics/python/ops/metric_ops_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for utility functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport numpy as np\n\nfrom tensorflow.contrib.distributions.python.ops import distribution_util\nfrom tensorflow.contrib.distributions.python.ops import mixture\nfrom tensorflow.contrib.distributions.python.ops import mixture_same_family\nfrom tensorflow.contrib.distributions.python.ops import mvn_diag\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops.distributions import categorical\nfrom tensorflow.python.ops.distributions import normal\nfrom tensorflow.python.ops.linalg import linear_operator_diag\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\n\n\ndef _powerset(x):\n s = list(x)\n return itertools.chain.from_iterable(\n itertools.combinations(s, r) for r in range(len(s) + 1))\n\n\ndef _matrix_diag(d):\n \"\"\"Batch version of np.diag.\"\"\"\n orig_shape = d.shape\n d = np.reshape(d, (int(np.prod(d.shape[:-1])), d.shape[-1]))\n diag_list = []\n for i in range(d.shape[0]):\n diag_list.append(np.diag(d[i, ...]))\n return np.reshape(diag_list, orig_shape + (d.shape[-1],))\n\n\ndef _make_tril_scale(\n loc=None,\n scale_tril=None,\n scale_diag=None,\n scale_identity_multiplier=None,\n shape_hint=None):\n if scale_tril is not None:\n scale_tril = np.tril(scale_tril)\n if scale_diag is not None:\n scale_tril += _matrix_diag(np.array(scale_diag, dtype=np.float32))\n if scale_identity_multiplier is not None:\n scale_tril += (\n scale_identity_multiplier * _matrix_diag(np.ones(\n [scale_tril.shape[-1]], dtype=np.float32)))\n return scale_tril\n return _make_diag_scale(\n loc, scale_diag, scale_identity_multiplier, shape_hint)\n\n\ndef _make_diag_scale(\n loc=None,\n scale_diag=None,\n scale_identity_multiplier=None,\n shape_hint=None):\n if scale_diag is not None:\n scale_diag = np.asarray(scale_diag)\n if scale_identity_multiplier is not None:\n scale_diag += scale_identity_multiplier\n return _matrix_diag(scale_diag)\n\n if loc is None and shape_hint is None:\n return None\n\n if shape_hint is None:\n shape_hint = loc.shape[-1]\n if scale_identity_multiplier is None:\n scale_identity_multiplier = 1.\n return scale_identity_multiplier * np.diag(np.ones(shape_hint))\n\n\nclass MakeTrilScaleTest(test.TestCase):\n\n def _testLegalInputs(\n self, loc=None, shape_hint=None, scale_params=None):\n for args in _powerset(scale_params.items()):\n with self.cached_session():\n args = dict(args)\n\n scale_args = dict({\n \"loc\": loc,\n \"shape_hint\": shape_hint}, **args)\n expected_scale = _make_tril_scale(**scale_args)\n if expected_scale is None:\n # Not enough shape information was specified.\n with self.assertRaisesRegexp(ValueError, (\"is specified.\")):\n scale = distribution_util.make_tril_scale(**scale_args)\n scale.to_dense().eval()\n else:\n scale = distribution_util.make_tril_scale(**scale_args)\n self.assertAllClose(expected_scale, scale.to_dense().eval())\n\n def testLegalInputs(self):\n self._testLegalInputs(\n loc=np.array([-1., -1.], dtype=np.float32),\n shape_hint=2,\n scale_params={\n \"scale_identity_multiplier\": 2.,\n \"scale_diag\": [2., 3.],\n \"scale_tril\": [[1., 0.],\n [-3., 3.]],\n })\n\n def testLegalInputsMultidimensional(self):\n self._testLegalInputs(\n loc=np.array([[[-1., -1., 2.], [-2., -3., 4.]]], dtype=np.float32),\n shape_hint=3,\n scale_params={\n \"scale_identity_multiplier\": 2.,\n \"scale_diag\": [[[2., 3., 4.], [3., 4., 5.]]],\n \"scale_tril\": [[[[1., 0., 0.],\n [-3., 3., 0.],\n [1., -2., 1.]],\n [[2., 1., 0.],\n [-4., 7., 0.],\n [1., -1., 1.]]]]\n })\n\n def testZeroTriU(self):\n with self.cached_session():\n scale = distribution_util.make_tril_scale(scale_tril=[[1., 1], [1., 1.]])\n self.assertAllClose([[1., 0], [1., 1.]], scale.to_dense().eval())\n\n def testValidateArgs(self):\n with self.cached_session():\n with self.assertRaisesOpError(\"diagonal part must be non-zero\"):\n scale = distribution_util.make_tril_scale(\n scale_tril=[[0., 1], [1., 1.]], validate_args=True)\n scale.to_dense().eval()\n\n def testAssertPositive(self):\n with self.cached_session():\n with self.assertRaisesOpError(\"diagonal part must be positive\"):\n scale = distribution_util.make_tril_scale(\n scale_tril=[[-1., 1], [1., 1.]],\n validate_args=True,\n assert_positive=True)\n scale.to_dense().eval()\n\n\nclass MakeDiagScaleTest(test.TestCase):\n\n def _testLegalInputs(\n self, loc=None, shape_hint=None, scale_params=None):\n for args in _powerset(scale_params.items()):\n with self.cached_session():\n args = dict(args)\n\n scale_args = dict({\n \"loc\": loc,\n \"shape_hint\": shape_hint}, **args)\n expected_scale = _make_diag_scale(**scale_args)\n if expected_scale is None:\n # Not enough shape information was specified.\n with self.assertRaisesRegexp(ValueError, (\"is specified.\")):\n scale = distribution_util.make_diag_scale(**scale_args)\n scale.to_dense().eval()\n else:\n scale = distribution_util.make_diag_scale(**scale_args)\n self.assertAllClose(expected_scale, scale.to_dense().eval())\n\n def testLegalInputs(self):\n self._testLegalInputs(\n loc=np.array([-1., -1.], dtype=np.float32),\n shape_hint=2,\n scale_params={\n \"scale_identity_multiplier\": 2.,\n \"scale_diag\": [2., 3.]\n })\n\n def testLegalInputsMultidimensional(self):\n self._testLegalInputs(\n loc=np.array([[[-1., -1., 2.], [-2., -3., 4.]]], dtype=np.float32),\n shape_hint=3,\n scale_params={\n \"scale_identity_multiplier\": 2.,\n \"scale_diag\": [[[2., 3., 4.], [3., 4., 5.]]]\n })\n\n def testValidateArgs(self):\n with self.cached_session():\n with self.assertRaisesOpError(\"diagonal part must be non-zero\"):\n scale = distribution_util.make_diag_scale(\n scale_diag=[[0., 1], [1., 1.]], validate_args=True)\n scale.to_dense().eval()\n\n def testAssertPositive(self):\n with self.cached_session():\n with self.assertRaisesOpError(\"diagonal part must be positive\"):\n scale = distribution_util.make_diag_scale(\n scale_diag=[[-1., 1], [1., 1.]],\n validate_args=True,\n assert_positive=True)\n scale.to_dense().eval()\n\n\nclass ShapesFromLocAndScaleTest(test.TestCase):\n\n def test_static_loc_static_scale_non_matching_event_size_raises(self):\n loc = constant_op.constant(np.zeros((2, 4)))\n scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))\n with self.assertRaisesRegexp(ValueError, \"could not be broadcast\"):\n distribution_util.shapes_from_loc_and_scale(loc, scale)\n\n def test_static_loc_static_scale(self):\n loc = constant_op.constant(np.zeros((2, 3)))\n scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))\n batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(\n loc, scale)\n\n self.assertEqual(tensor_shape.TensorShape([5, 2]), batch_shape)\n self.assertEqual(tensor_shape.TensorShape([3]), event_shape)\n\n def test_static_loc_dynamic_scale(self):\n loc = constant_op.constant(np.zeros((2, 3)))\n diag = array_ops.placeholder(dtypes.float64)\n scale = linear_operator_diag.LinearOperatorDiag(diag)\n with self.cached_session() as sess:\n batch_shape, event_shape = sess.run(\n distribution_util.shapes_from_loc_and_scale(loc, scale),\n feed_dict={diag: np.ones((5, 1, 3))})\n self.assertAllEqual([5, 2], batch_shape)\n self.assertAllEqual([3], event_shape)\n\n def test_dynamic_loc_static_scale(self):\n loc = array_ops.placeholder(dtypes.float64)\n diag = constant_op.constant(np.ones((5, 2, 3)))\n scale = linear_operator_diag.LinearOperatorDiag(diag)\n with self.cached_session():\n batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(\n loc, scale)\n # batch_shape depends on both args, and so is dynamic. Since loc did not\n # have static shape, we inferred event shape entirely from scale, and this\n # is available statically.\n self.assertAllEqual(\n [5, 2], batch_shape.eval(feed_dict={loc: np.zeros((2, 3))}))\n self.assertAllEqual([3], event_shape)\n\n def test_dynamic_loc_dynamic_scale(self):\n loc = array_ops.placeholder(dtypes.float64)\n diag = array_ops.placeholder(dtypes.float64)\n scale = linear_operator_diag.LinearOperatorDiag(diag)\n with self.cached_session() as sess:\n batch_shape, event_shape = sess.run(\n distribution_util.shapes_from_loc_and_scale(loc, scale),\n feed_dict={diag: np.ones((5, 2, 3)), loc: np.zeros((2, 3))})\n self.assertAllEqual([5, 2], batch_shape)\n self.assertAllEqual([3], event_shape)\n\n def test_none_loc_static_scale(self):\n loc = None\n scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))\n batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(\n loc, scale)\n\n self.assertEqual(tensor_shape.TensorShape([5, 1]), batch_shape)\n self.assertEqual(tensor_shape.TensorShape([3]), event_shape)\n\n def test_none_loc_dynamic_scale(self):\n loc = None\n diag = array_ops.placeholder(dtypes.float64)\n scale = linear_operator_diag.LinearOperatorDiag(diag)\n with self.cached_session() as sess:\n batch_shape, event_shape = sess.run(\n distribution_util.shapes_from_loc_and_scale(loc, scale),\n feed_dict={diag: np.ones((5, 1, 3))})\n self.assertAllEqual([5, 1], batch_shape)\n self.assertAllEqual([3], event_shape)\n\n\nclass GetBroadcastShapeTest(test.TestCase):\n\n def test_all_static_shapes_work(self):\n x = array_ops.ones((2, 1, 3))\n y = array_ops.ones((1, 5, 3))\n z = array_ops.ones(())\n self.assertAllEqual([2, 5, 3],\n distribution_util.get_broadcast_shape(x, y, z))\n\n def test_with_some_dynamic_shapes_works(self):\n x = array_ops.ones((2, 1, 3))\n y = array_ops.placeholder(x.dtype)\n z = array_ops.ones(())\n with self.cached_session() as sess:\n bcast_shape = sess.run(\n distribution_util.get_broadcast_shape(x, y, z),\n feed_dict={y: np.ones((1, 5, 3)).astype(np.float32)})\n self.assertAllEqual([2, 5, 3], bcast_shape)\n\n\nclass TridiagTest(test.TestCase):\n\n def testWorksCorrectlyNoBatches(self):\n with self.cached_session():\n self.assertAllEqual(\n [[4., 8., 0., 0.],\n [1., 5., 9., 0.],\n [0., 2., 6., 10.],\n [0., 0., 3, 7.]],\n distribution_util.tridiag(\n [1., 2., 3.],\n [4., 5., 6., 7.],\n [8., 9., 10.]).eval())\n\n def testWorksCorrectlyBatches(self):\n with self.cached_session():\n self.assertAllClose(\n [[[4., 8., 0., 0.],\n [1., 5., 9., 0.],\n [0., 2., 6., 10.],\n [0., 0., 3, 7.]],\n [[0.7, 0.1, 0.0, 0.0],\n [0.8, 0.6, 0.2, 0.0],\n [0.0, 0.9, 0.5, 0.3],\n [0.0, 0.0, 1.0, 0.4]]],\n distribution_util.tridiag(\n [[1., 2., 3.],\n [0.8, 0.9, 1.]],\n [[4., 5., 6., 7.],\n [0.7, 0.6, 0.5, 0.4]],\n [[8., 9., 10.],\n [0.1, 0.2, 0.3]]).eval(),\n rtol=1e-5, atol=0.)\n\n def testHandlesNone(self):\n with self.cached_session():\n self.assertAllClose(\n [[[4., 0., 0., 0.],\n [0., 5., 0., 0.],\n [0., 0., 6., 0.],\n [0., 0., 0, 7.]],\n [[0.7, 0.0, 0.0, 0.0],\n [0.0, 0.6, 0.0, 0.0],\n [0.0, 0.0, 0.5, 0.0],\n [0.0, 0.0, 0.0, 0.4]]],\n distribution_util.tridiag(\n diag=[[4., 5., 6., 7.],\n [0.7, 0.6, 0.5, 0.4]]).eval(),\n rtol=1e-5, atol=0.)\n\n\nclass MixtureStddevTest(test.TestCase):\n\n def test_mixture_dev(self):\n mixture_weights = np.array([\n [1.0/3, 1.0/3, 1.0/3],\n [0.750, 0.250, 0.000]\n ])\n component_means = np.array([\n [1.0, 1.0, 1.0],\n [-5, 0, 1.25]\n ])\n component_devs = np.array([\n [1.0, 1.0, 1.0],\n [0.01, 2.0, 0.1]\n ])\n\n # The first case should trivially have a standard deviation of 1.0 because\n # all components are identical and have that standard deviation.\n # The second case was computed by hand.\n expected_devs = np.array([\n 1.0,\n 2.3848637277\n ])\n\n weights_tf = array_ops.constant(mixture_weights)\n means_tf = array_ops.constant(component_means)\n sigmas_tf = array_ops.constant(component_devs)\n mix_dev = distribution_util.mixture_stddev(weights_tf,\n means_tf,\n sigmas_tf)\n\n with self.cached_session() as sess:\n actual_devs = sess.run(mix_dev)\n\n self.assertAllClose(actual_devs, expected_devs)\n\n\nclass PadMixtureDimensionsTest(test.TestCase):\n\n def test_pad_mixture_dimensions_mixture(self):\n with self.cached_session() as sess:\n gm = mixture.Mixture(\n cat=categorical.Categorical(probs=[[0.3, 0.7]]),\n components=[\n normal.Normal(loc=[-1.0], scale=[1.0]),\n normal.Normal(loc=[1.0], scale=[0.5])\n ])\n\n x = array_ops.constant([[1.0, 2.0], [3.0, 4.0]])\n x_pad = distribution_util.pad_mixture_dimensions(\n x, gm, gm.cat, gm.event_shape.ndims)\n x_out, x_pad_out = sess.run([x, x_pad])\n\n self.assertAllEqual(x_pad_out.shape, [2, 2])\n self.assertAllEqual(x_out.reshape([-1]), x_pad_out.reshape([-1]))\n\n def test_pad_mixture_dimensions_mixture_same_family(self):\n with self.cached_session() as sess:\n gm = mixture_same_family.MixtureSameFamily(\n mixture_distribution=categorical.Categorical(probs=[0.3, 0.7]),\n components_distribution=mvn_diag.MultivariateNormalDiag(\n loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1.0, 0.5]))\n\n x = array_ops.constant([[1.0, 2.0], [3.0, 4.0]])\n x_pad = distribution_util.pad_mixture_dimensions(\n x, gm, gm.mixture_distribution, gm.event_shape.ndims)\n x_out, x_pad_out = sess.run([x, x_pad])\n\n self.assertAllEqual(x_pad_out.shape, [2, 2, 1])\n self.assertAllEqual(x_out.reshape([-1]), x_pad_out.reshape([-1]))\n\n\nclass _PadTest(object):\n\n def testNegAxisCorrectness(self):\n x_ = np.float32([[1., 2, 3],\n [4, 5, 6]])\n value_ = np.float32(0.25)\n count_ = np.int32(2)\n with self.cached_session() as sess:\n x = array_ops.placeholder_with_default(\n x_, shape=x_.shape if self.is_static_shape else None)\n value = (constant_op.constant(value_) if self.is_static_shape\n else array_ops.placeholder_with_default(value_, shape=None))\n count = (constant_op.constant(count_) if self.is_static_shape\n else array_ops.placeholder_with_default(count_, shape=None))\n\n x0_front = distribution_util.pad(\n x, axis=-2, value=value, count=count, front=True)\n x0_back = distribution_util.pad(\n x, axis=-2, count=count, back=True)\n x0_both = distribution_util.pad(\n x, axis=-2, value=value, front=True, back=True)\n\n if self.is_static_shape:\n self.assertAllEqual([4, 3], x0_front.shape)\n self.assertAllEqual([4, 3], x0_back.shape)\n self.assertAllEqual([4, 3], x0_both.shape)\n\n [x0_front_, x0_back_, x0_both_] = sess.run([\n x0_front, x0_back, x0_both])\n\n self.assertAllClose(\n np.float32([[value_]*3,\n [value_]*3,\n [1, 2, 3],\n [4, 5, 6]]),\n x0_front_, atol=0., rtol=1e-6)\n self.assertAllClose(\n np.float32([[1, 2, 3],\n [4, 5, 6],\n [0.]*3,\n [0.]*3]),\n x0_back_, atol=0., rtol=1e-6)\n self.assertAllClose(\n np.float32([[value_]*3,\n [1, 2, 3],\n [4, 5, 6],\n [value_]*3]),\n x0_both_, atol=0., rtol=1e-6)\n\n def testPosAxisCorrectness(self):\n x_ = np.float32([[1., 2, 3],\n [4, 5, 6]])\n value_ = np.float32(0.25)\n count_ = np.int32(2)\n with self.cached_session() as sess:\n x = array_ops.placeholder_with_default(\n x_, shape=x_.shape if self.is_static_shape else None)\n value = (constant_op.constant(value_) if self.is_static_shape\n else array_ops.placeholder_with_default(value_, shape=None))\n count = (constant_op.constant(count_) if self.is_static_shape\n else array_ops.placeholder_with_default(count_, shape=None))\n\n x1_front = distribution_util.pad(\n x, axis=1, value=value, count=count, front=True)\n x1_back = distribution_util.pad(\n x, axis=1, count=count, back=True)\n x1_both = distribution_util.pad(\n x, axis=1, value=value, front=True, back=True)\n\n if self.is_static_shape:\n self.assertAllEqual([2, 5], x1_front.shape)\n self.assertAllEqual([2, 5], x1_back.shape)\n self.assertAllEqual([2, 5], x1_both.shape)\n\n [x1_front_, x1_back_, x1_both_] = sess.run([\n x1_front, x1_back, x1_both])\n\n self.assertAllClose(\n np.float32([[value_]*2 + [1, 2, 3],\n [value_]*2 + [4, 5, 6]]),\n x1_front_, atol=0., rtol=1e-6)\n self.assertAllClose(\n np.float32([[1, 2, 3] + [0.]*2,\n [4, 5, 6] + [0.]*2]),\n x1_back_, atol=0., rtol=1e-6)\n self.assertAllClose(\n np.float32([[value_, 1, 2, 3, value_],\n [value_, 4, 5, 6, value_]]),\n x1_both_, atol=0., rtol=1e-6)\n\n\nclass PadStaticTest(_PadTest, test.TestCase):\n\n @property\n def is_static_shape(self):\n return True\n\n\nclass PadDynamicTest(_PadTest, test.TestCase):\n\n @property\n def is_static_shape(self):\n return False\n\n\nclass TestMoveDimension(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes\n def test_move_dimension_static_shape(self):\n\n x = random_ops.random_normal(shape=[200, 30, 4, 1, 6])\n\n x_perm = distribution_util.move_dimension(x, 1, 1)\n self.assertAllEqual(x_perm.shape.as_list(), [200, 30, 4, 1, 6])\n\n x_perm = distribution_util.move_dimension(x, 0, 3)\n self.assertAllEqual(x_perm.shape.as_list(), [30, 4, 1, 200, 6])\n\n x_perm = distribution_util.move_dimension(x, 0, -2)\n self.assertAllEqual(x_perm.shape.as_list(), [30, 4, 1, 200, 6])\n\n x_perm = distribution_util.move_dimension(x, 4, 2)\n self.assertAllEqual(x_perm.shape.as_list(), [200, 30, 6, 4, 1])\n\n @test_util.run_in_graph_and_eager_modes\n def test_move_dimension_dynamic_shape(self):\n\n x_ = random_ops.random_normal(shape=[200, 30, 4, 1, 6])\n x = array_ops.placeholder_with_default(input=x_, shape=None)\n\n x_perm = distribution_util.move_dimension(x, 1, 1)\n self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),\n [200, 30, 4, 1, 6])\n\n x_perm = distribution_util.move_dimension(x, 0, 3)\n self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),\n [30, 4, 1, 200, 6])\n\n x_perm = distribution_util.move_dimension(x, 0, -2)\n self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),\n [30, 4, 1, 200, 6])\n\n x_perm = distribution_util.move_dimension(x, 4, 2)\n self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),\n [200, 30, 6, 4, 1])\n\n x_perm = distribution_util.move_dimension(x, -1, 2)\n self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),\n [200, 30, 6, 4, 1])\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Handles control flow statements: while, for, if.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gast\n\nfrom tensorflow.contrib.autograph.core import converter\nfrom tensorflow.contrib.autograph.pyct import anno\nfrom tensorflow.contrib.autograph.pyct import ast_util\nfrom tensorflow.contrib.autograph.pyct import parser\nfrom tensorflow.contrib.autograph.pyct import templates\nfrom tensorflow.contrib.autograph.pyct.static_analysis import annos\n\n\nclass SymbolNamer(object):\n \"\"\"Describes the interface for ControlFlowTransformer's namer.\"\"\"\n\n def new_symbol(self, name_root, reserved_locals):\n \"\"\"Generate a new unique symbol.\n\n Args:\n name_root: String, used as stem in the new name.\n reserved_locals: Set(string), additional local symbols that are reserved\n and which should not be used.\n Returns:\n String.\n \"\"\"\n raise NotImplementedError()\n\n\nclass ControlFlowTransformer(converter.Base):\n \"\"\"Transforms control flow structures like loops an conditionals.\"\"\"\n\n def _create_cond_branch(self, body_name, aliased_orig_names,\n aliased_new_names, body, returns):\n if aliased_orig_names:\n template = \"\"\"\n def body_name():\n aliased_new_names, = aliased_orig_names,\n body\n return (returns,)\n \"\"\"\n return templates.replace(\n template,\n body_name=body_name,\n body=body,\n aliased_orig_names=aliased_orig_names,\n aliased_new_names=aliased_new_names,\n returns=returns)\n else:\n template = \"\"\"\n def body_name():\n body\n return (returns,)\n \"\"\"\n return templates.replace(\n template, body_name=body_name, body=body, returns=returns)\n\n def _create_cond_expr(self, results, test, body_name, orelse_name):\n if results is not None:\n template = \"\"\"\n results = ag__.utils.run_cond(test, body_name, orelse_name)\n \"\"\"\n return templates.replace(\n template,\n test=test,\n results=results,\n body_name=body_name,\n orelse_name=orelse_name)\n else:\n template = \"\"\"\n ag__.utils.run_cond(test, body_name, orelse_name)\n \"\"\"\n return templates.replace(\n template, test=test, body_name=body_name, orelse_name=orelse_name)\n\n def _fmt_symbol_list(self, symbol_set):\n if not symbol_set:\n return 'no variables'\n return ', '.join(map(str, symbol_set))\n\n def _validate_no_live_vars_created(self, node):\n body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)\n live_vars_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)\n live_vars_created_in_body = live_vars_out & body_scope.created\n if live_vars_created_in_body:\n raise ValueError(\n 'The following variables are created inside the loop and used later:'\n '\\n%s\\n'\n 'Variables must be declared outside loops because loops may not'\n ' necessarily execute.' % self._fmt_symbol_list(\n live_vars_created_in_body))\n\n def visit_If(self, node):\n node = self.generic_visit(node)\n\n body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)\n orelse_scope = anno.getanno(node, annos.NodeAnno.ORELSE_SCOPE)\n defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)\n live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)\n\n modified_in_cond = body_scope.modified | orelse_scope.modified\n returned_from_cond = set()\n for s in modified_in_cond:\n if s in live_out:\n returned_from_cond.add(s)\n elif s.is_composite():\n # Special treatment for compound objects: if any of their owner entities\n # are live, then they are outputs as well.\n if any(owner in live_out for owner in s.owner_set):\n returned_from_cond.add(s)\n\n need_alias_in_body = body_scope.modified & defined_in\n need_alias_in_orelse = orelse_scope.modified & defined_in\n\n created_in_body = body_scope.modified & returned_from_cond - defined_in\n created_in_orelse = orelse_scope.modified & returned_from_cond - defined_in\n\n if created_in_body != created_in_orelse:\n raise ValueError(\n 'if statement may not initialize all variables: the true branch'\n ' creates %s, while the false branch creates %s. Make sure all'\n ' these variables are initialized either in both'\n ' branches or before the if statement.' %\n (self._fmt_symbol_list(created_in_body),\n self._fmt_symbol_list(created_in_orelse)))\n\n # Alias the closure variables inside the conditional functions, to allow\n # the functions access to the respective variables.\n # We will alias variables independently for body and orelse scope,\n # because different branches might write different variables.\n aliased_body_orig_names = tuple(need_alias_in_body)\n aliased_orelse_orig_names = tuple(need_alias_in_orelse)\n aliased_body_new_names = tuple(\n self.ctx.namer.new_symbol(s.ssf(), body_scope.referenced)\n for s in aliased_body_orig_names)\n aliased_orelse_new_names = tuple(\n self.ctx.namer.new_symbol(s.ssf(), orelse_scope.referenced)\n for s in aliased_orelse_orig_names)\n\n alias_body_map = dict(zip(aliased_body_orig_names, aliased_body_new_names))\n alias_orelse_map = dict(\n zip(aliased_orelse_orig_names, aliased_orelse_new_names))\n\n node_body = ast_util.rename_symbols(node.body, alias_body_map)\n node_orelse = ast_util.rename_symbols(node.orelse, alias_orelse_map)\n\n returned_from_cond = tuple(returned_from_cond)\n if returned_from_cond:\n if len(returned_from_cond) == 1:\n # TODO(mdan): Move this quirk into the operator implementation.\n cond_results = returned_from_cond[0]\n else:\n cond_results = gast.Tuple([s.ast() for s in returned_from_cond], None)\n\n returned_from_body = tuple(\n alias_body_map[s] if s in need_alias_in_body else s\n for s in returned_from_cond)\n returned_from_orelse = tuple(\n alias_orelse_map[s] if s in need_alias_in_orelse else s\n for s in returned_from_cond)\n\n else:\n # When the cond would return no value, we leave the cond called without\n # results. That in turn should trigger the side effect guards. The\n # branch functions will return a dummy value that ensures cond\n # actually has some return value as well.\n cond_results = None\n # TODO(mdan): This doesn't belong here; it's specific to the operator.\n returned_from_body = templates.replace_as_expression('tf.constant(1)')\n returned_from_orelse = templates.replace_as_expression('tf.constant(1)')\n\n body_name = self.ctx.namer.new_symbol('if_true', body_scope.referenced)\n orelse_name = self.ctx.namer.new_symbol('if_false', orelse_scope.referenced)\n\n body_def = self._create_cond_branch(\n body_name,\n aliased_orig_names=aliased_body_orig_names,\n aliased_new_names=aliased_body_new_names,\n body=node_body,\n returns=returned_from_body)\n orelse_def = self._create_cond_branch(\n orelse_name,\n aliased_orig_names=aliased_orelse_orig_names,\n aliased_new_names=aliased_orelse_new_names,\n body=node_orelse,\n returns=returned_from_orelse)\n cond_expr = self._create_cond_expr(cond_results, node.test, body_name,\n orelse_name)\n\n return body_def + orelse_def + cond_expr\n\n def visit_While(self, node):\n self.generic_visit(node)\n\n self._validate_no_live_vars_created(node)\n\n body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)\n body_closure = body_scope.modified - body_scope.created\n all_referenced = body_scope.referenced\n\n cond_scope = anno.getanno(node, annos.NodeAnno.COND_SCOPE)\n cond_closure = set()\n for s in cond_scope.referenced:\n for root in s.support_set:\n if root not in body_scope.created:\n cond_closure.add(root)\n\n state = list(body_closure)\n if not state:\n # TODO(mdan): Implement this properly.\n # To complete this statement, we need to check whether any variable\n # created inside the body scope is used before being modified outside the\n # scope. This should be done during activity analysis, and in general\n # should cover the case where variables may not be initialized.\n raise ValueError('cannot convert while loop: no outputs')\n\n state_ssf = [\n self.ctx.namer.new_symbol(s.ssf(), all_referenced) for s in state\n ]\n ssf_map = {\n name: ssf\n for name, ssf in zip(state, state_ssf)\n if str(name) != ssf\n }\n\n if len(state) == 1:\n state = state[0]\n state_ssf = state_ssf[0]\n state_ast_tuple = state\n else:\n state_ast_tuple = gast.Tuple([n.ast() for n in state], None)\n\n node_body = ast_util.rename_symbols(node.body, ssf_map)\n test = ast_util.rename_symbols(node.test, ssf_map)\n\n template = \"\"\"\n def test_name(state_ssf):\n return test\n def body_name(state_ssf):\n body\n return state_ssf,\n state_ast_tuple = ag__.while_stmt(\n test_name, body_name, (state,), (extra_deps,))\n \"\"\"\n node = templates.replace(\n template,\n state=state,\n state_ssf=state_ssf,\n state_ast_tuple=state_ast_tuple,\n test_name=self.ctx.namer.new_symbol('loop_test', body_scope.referenced),\n test=test,\n body_name=self.ctx.namer.new_symbol('loop_body', body_scope.referenced),\n body=node_body,\n extra_deps=tuple(s.ast() for s in cond_closure),\n )\n\n return node\n\n def visit_For(self, node):\n self.generic_visit(node)\n\n self._validate_no_live_vars_created(node)\n\n body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)\n body_closure = body_scope.modified - body_scope.created\n all_referenced = body_scope.referenced\n\n state = list(body_closure)\n\n state_ssf = [\n self.ctx.namer.new_symbol(s.ssf(), all_referenced) for s in state\n ]\n ssf_map = {\n name: ssf\n for name, ssf in zip(state, state_ssf)\n if str(name) != ssf\n }\n\n if len(state) == 1:\n state = state[0]\n state_ssf = state_ssf[0]\n state_ast_tuple = state\n else:\n state_ast_tuple = gast.Tuple([n.ast() for n in state], None)\n\n node_body = ast_util.rename_symbols(node.body, ssf_map)\n if anno.hasanno(node, 'extra_test'):\n extra_test = anno.getanno(node, 'extra_test')\n extra_test = ast_util.rename_symbols(extra_test, ssf_map)\n else:\n extra_test = parser.parse_expression('True')\n\n template = \"\"\"\n def extra_test_name(state_ssf):\n return extra_test_expr\n def body_name(iterate, state_ssf):\n body\n return state_ssf,\n state_ast_tuple = ag__.for_stmt(\n iter_, extra_test_name, body_name, (state,))\n \"\"\"\n node = templates.replace(\n template,\n state=state,\n state_ssf=state_ssf,\n state_ast_tuple=state_ast_tuple,\n iter_=node.iter,\n iterate=node.target,\n extra_test_name=self.ctx.namer.new_symbol('extra_test', all_referenced),\n extra_test_expr=extra_test,\n body_name=self.ctx.namer.new_symbol('loop_body', all_referenced),\n body=node_body)\n\n return node\n\n\ndef transform(node, ctx):\n node = ControlFlowTransformer(ctx).visit(node)\n return node\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for metric_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom tensorflow.contrib import metrics as metrics_lib\nfrom tensorflow.contrib.metrics.python.ops import metric_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes as dtypes_lib\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\nNAN = float('nan')\n\nmetrics = metrics_lib\n\n\ndef _enqueue_vector(sess, queue, values, shape=None):\n if not shape:\n shape = (1, len(values))\n dtype = queue.dtypes[0]\n sess.run(\n queue.enqueue(constant_op.constant(values, dtype=dtype, shape=shape)))\n\n\ndef _binary_2d_label_to_sparse_value(labels):\n \"\"\"Convert dense 2D binary indicator tensor to sparse tensor.\n\n Only 1 values in `labels` are included in result.\n\n Args:\n labels: Dense 2D binary indicator tensor.\n\n Returns:\n `SparseTensorValue` whose values are indices along the last dimension of\n `labels`.\n \"\"\"\n indices = []\n values = []\n batch = 0\n for row in labels:\n label = 0\n xi = 0\n for x in row:\n if x == 1:\n indices.append([batch, xi])\n values.append(label)\n xi += 1\n else:\n assert x == 0\n label += 1\n batch += 1\n shape = [len(labels), len(labels[0])]\n return sparse_tensor.SparseTensorValue(\n np.array(indices, np.int64), np.array(values, np.int64),\n np.array(shape, np.int64))\n\n\ndef _binary_2d_label_to_sparse(labels):\n \"\"\"Convert dense 2D binary indicator tensor to sparse tensor.\n\n Only 1 values in `labels` are included in result.\n\n Args:\n labels: Dense 2D binary indicator tensor.\n\n Returns:\n `SparseTensor` whose values are indices along the last dimension of\n `labels`.\n \"\"\"\n return sparse_tensor.SparseTensor.from_value(\n _binary_2d_label_to_sparse_value(labels))\n\n\ndef _binary_3d_label_to_sparse_value(labels):\n \"\"\"Convert dense 3D binary indicator tensor to sparse tensor.\n\n Only 1 values in `labels` are included in result.\n\n Args:\n labels: Dense 2D binary indicator tensor.\n\n Returns:\n `SparseTensorValue` whose values are indices along the last dimension of\n `labels`.\n \"\"\"\n indices = []\n values = []\n for d0, labels_d0 in enumerate(labels):\n for d1, labels_d1 in enumerate(labels_d0):\n d2 = 0\n for class_id, label in enumerate(labels_d1):\n if label == 1:\n values.append(class_id)\n indices.append([d0, d1, d2])\n d2 += 1\n else:\n assert label == 0\n shape = [len(labels), len(labels[0]), len(labels[0][0])]\n return sparse_tensor.SparseTensorValue(\n np.array(indices, np.int64), np.array(values, np.int64),\n np.array(shape, np.int64))\n\n\ndef _binary_3d_label_to_sparse(labels):\n \"\"\"Convert dense 3D binary indicator tensor to sparse tensor.\n\n Only 1 values in `labels` are included in result.\n\n Args:\n labels: Dense 2D binary indicator tensor.\n\n Returns:\n `SparseTensor` whose values are indices along the last dimension of\n `labels`.\n \"\"\"\n return sparse_tensor.SparseTensor.from_value(\n _binary_3d_label_to_sparse_value(labels))\n\n\ndef _assert_nan(test_case, actual):\n test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)\n\n\ndef _assert_metric_variables(test_case, expected):\n test_case.assertEquals(\n set(expected), set(v.name for v in variables.local_variables()))\n test_case.assertEquals(\n set(expected),\n set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))\n\n\nclass StreamingMeanTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_mean(array_ops.ones([4, 3]))\n _assert_metric_variables(self, ('mean/count:0', 'mean/total:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_mean(\n array_ops.ones([4, 3]), metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_mean(\n array_ops.ones([4, 3]), updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testBasic(self):\n with self.test_session() as sess:\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n mean, update_op = metrics.streaming_mean(values)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(4):\n sess.run(update_op)\n self.assertAlmostEqual(1.65, sess.run(mean), 5)\n\n def testUpdateOpsReturnsCurrentValue(self):\n with self.test_session() as sess:\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n mean, update_op = metrics.streaming_mean(values)\n\n sess.run(variables.local_variables_initializer())\n\n self.assertAlmostEqual(0.5, sess.run(update_op), 5)\n self.assertAlmostEqual(1.475, sess.run(update_op), 5)\n self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)\n self.assertAlmostEqual(1.65, sess.run(update_op), 5)\n\n self.assertAlmostEqual(1.65, sess.run(mean), 5)\n\n def test1dWeightedValues(self):\n with self.test_session() as sess:\n # Create the queue that populates the values.\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n # Create the queue that populates the weighted labels.\n weights_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 1))\n _enqueue_vector(sess, weights_queue, [1])\n _enqueue_vector(sess, weights_queue, [0])\n _enqueue_vector(sess, weights_queue, [0])\n _enqueue_vector(sess, weights_queue, [1])\n weights = weights_queue.dequeue()\n\n mean, update_op = metrics.streaming_mean(values, weights)\n\n variables.local_variables_initializer().run()\n for _ in range(4):\n update_op.eval()\n self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)\n\n def test1dWeightedValues_placeholders(self):\n with self.test_session() as sess:\n # Create the queue that populates the values.\n feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))\n values = array_ops.placeholder(dtype=dtypes_lib.float32)\n\n # Create the queue that populates the weighted labels.\n weights_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1,))\n _enqueue_vector(sess, weights_queue, 1, shape=(1,))\n _enqueue_vector(sess, weights_queue, 0, shape=(1,))\n _enqueue_vector(sess, weights_queue, 0, shape=(1,))\n _enqueue_vector(sess, weights_queue, 1, shape=(1,))\n weights = weights_queue.dequeue()\n\n mean, update_op = metrics.streaming_mean(values, weights)\n\n variables.local_variables_initializer().run()\n for i in range(4):\n update_op.eval(feed_dict={values: feed_values[i]})\n self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)\n\n def test2dWeightedValues(self):\n with self.test_session() as sess:\n # Create the queue that populates the values.\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n # Create the queue that populates the weighted labels.\n weights_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, weights_queue, [1, 1])\n _enqueue_vector(sess, weights_queue, [1, 0])\n _enqueue_vector(sess, weights_queue, [0, 1])\n _enqueue_vector(sess, weights_queue, [0, 0])\n weights = weights_queue.dequeue()\n\n mean, update_op = metrics.streaming_mean(values, weights)\n\n variables.local_variables_initializer().run()\n for _ in range(4):\n update_op.eval()\n self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)\n\n def test2dWeightedValues_placeholders(self):\n with self.test_session() as sess:\n # Create the queue that populates the values.\n feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))\n values = array_ops.placeholder(dtype=dtypes_lib.float32)\n\n # Create the queue that populates the weighted labels.\n weights_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(2,))\n _enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))\n _enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))\n _enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))\n _enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))\n weights = weights_queue.dequeue()\n\n mean, update_op = metrics.streaming_mean(values, weights)\n\n variables.local_variables_initializer().run()\n for i in range(4):\n update_op.eval(feed_dict={values: feed_values[i]})\n self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)\n\n\nclass StreamingMeanTensorTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_mean_tensor(array_ops.ones([4, 3]))\n _assert_metric_variables(self,\n ('mean/total_tensor:0', 'mean/count_tensor:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_mean_tensor(\n array_ops.ones([4, 3]), metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_mean_tensor(\n array_ops.ones([4, 3]), updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testBasic(self):\n with self.test_session() as sess:\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n mean, update_op = metrics.streaming_mean_tensor(values)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(4):\n sess.run(update_op)\n self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))\n\n def testMultiDimensional(self):\n with self.test_session() as sess:\n values_queue = data_flow_ops.FIFOQueue(\n 2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))\n _enqueue_vector(\n sess,\n values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],\n shape=(2, 2, 2))\n _enqueue_vector(\n sess,\n values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],\n shape=(2, 2, 2))\n values = values_queue.dequeue()\n\n mean, update_op = metrics.streaming_mean_tensor(values)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(2):\n sess.run(update_op)\n self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))\n\n def testUpdateOpsReturnsCurrentValue(self):\n with self.test_session() as sess:\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n mean, update_op = metrics.streaming_mean_tensor(values)\n\n sess.run(variables.local_variables_initializer())\n\n self.assertAllClose([[0, 1]], sess.run(update_op), 5)\n self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)\n self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)\n self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)\n\n self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)\n\n def testWeighted1d(self):\n with self.test_session() as sess:\n # Create the queue that populates the values.\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n # Create the queue that populates the weights.\n weights_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 1))\n _enqueue_vector(sess, weights_queue, [[1]])\n _enqueue_vector(sess, weights_queue, [[0]])\n _enqueue_vector(sess, weights_queue, [[1]])\n _enqueue_vector(sess, weights_queue, [[0]])\n weights = weights_queue.dequeue()\n\n mean, update_op = metrics.streaming_mean_tensor(values, weights)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(4):\n sess.run(update_op)\n self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)\n\n def testWeighted2d_1(self):\n with self.test_session() as sess:\n # Create the queue that populates the values.\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n # Create the queue that populates the weights.\n weights_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, weights_queue, [1, 1])\n _enqueue_vector(sess, weights_queue, [1, 0])\n _enqueue_vector(sess, weights_queue, [0, 1])\n _enqueue_vector(sess, weights_queue, [0, 0])\n weights = weights_queue.dequeue()\n\n mean, update_op = metrics.streaming_mean_tensor(values, weights)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(4):\n sess.run(update_op)\n self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)\n\n def testWeighted2d_2(self):\n with self.test_session() as sess:\n # Create the queue that populates the values.\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n # Create the queue that populates the weights.\n weights_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, weights_queue, [0, 1])\n _enqueue_vector(sess, weights_queue, [0, 0])\n _enqueue_vector(sess, weights_queue, [0, 1])\n _enqueue_vector(sess, weights_queue, [0, 0])\n weights = weights_queue.dequeue()\n\n mean, update_op = metrics.streaming_mean_tensor(values, weights)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(4):\n sess.run(update_op)\n self.assertAllClose([[0, 0.5]], sess.run(mean), 5)\n\n\nclass StreamingAccuracyTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_accuracy(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n name='my_accuracy')\n _assert_metric_variables(self,\n ('my_accuracy/count:0', 'my_accuracy/total:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_accuracy(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_accuracy(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):\n predictions = array_ops.ones((10, 3))\n labels = array_ops.ones((10, 4))\n with self.assertRaises(ValueError):\n metrics.streaming_accuracy(predictions, labels)\n\n def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):\n predictions = array_ops.ones((10, 3))\n labels = array_ops.ones((10, 3))\n weights = array_ops.ones((9, 3))\n with self.assertRaises(ValueError):\n metrics.streaming_accuracy(predictions, labels, weights)\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)\n accuracy, update_op = metrics.streaming_accuracy(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_accuracy = accuracy.eval()\n for _ in range(10):\n self.assertEqual(initial_accuracy, accuracy.eval())\n\n def testMultipleUpdates(self):\n with self.test_session() as sess:\n # Create the queue that populates the predictions.\n preds_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 1))\n _enqueue_vector(sess, preds_queue, [0])\n _enqueue_vector(sess, preds_queue, [1])\n _enqueue_vector(sess, preds_queue, [2])\n _enqueue_vector(sess, preds_queue, [1])\n predictions = preds_queue.dequeue()\n\n # Create the queue that populates the labels.\n labels_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 1))\n _enqueue_vector(sess, labels_queue, [0])\n _enqueue_vector(sess, labels_queue, [1])\n _enqueue_vector(sess, labels_queue, [1])\n _enqueue_vector(sess, labels_queue, [2])\n labels = labels_queue.dequeue()\n\n accuracy, update_op = metrics.streaming_accuracy(predictions, labels)\n\n sess.run(variables.local_variables_initializer())\n for _ in xrange(3):\n sess.run(update_op)\n self.assertEqual(0.5, sess.run(update_op))\n self.assertEqual(0.5, accuracy.eval())\n\n def testEffectivelyEquivalentSizes(self):\n predictions = array_ops.ones((40, 1))\n labels = array_ops.ones((40,))\n with self.test_session() as sess:\n accuracy, update_op = metrics.streaming_accuracy(predictions, labels)\n\n sess.run(variables.local_variables_initializer())\n self.assertEqual(1.0, update_op.eval())\n self.assertEqual(1.0, accuracy.eval())\n\n def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):\n predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,\n labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),\n 1) # shape 3, 1\n weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),\n 1) # shape 3, 1\n\n with self.test_session() as sess:\n accuracy, update_op = metrics.streaming_accuracy(predictions, labels,\n weights)\n\n sess.run(variables.local_variables_initializer())\n # if streaming_accuracy does not flatten the weight, accuracy would be\n # 0.33333334 due to an intended broadcast of weight. Due to flattening,\n # it will be higher than .95\n self.assertGreater(update_op.eval(), .95)\n self.assertGreater(accuracy.eval(), .95)\n\n def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):\n predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,\n labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),\n 1) # shape 3, 1\n\n weights = [[100], [1], [1]] # shape 3, 1\n weights_placeholder = array_ops.placeholder(\n dtype=dtypes_lib.int32, name='weights')\n feed_dict = {weights_placeholder: weights}\n\n with self.test_session() as sess:\n accuracy, update_op = metrics.streaming_accuracy(predictions, labels,\n weights_placeholder)\n\n sess.run(variables.local_variables_initializer())\n # if streaming_accuracy does not flatten the weight, accuracy would be\n # 0.33333334 due to an intended broadcast of weight. Due to flattening,\n # it will be higher than .95\n self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)\n self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)\n\n def testMultipleUpdatesWithWeightedValues(self):\n with self.test_session() as sess:\n # Create the queue that populates the predictions.\n preds_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 1))\n _enqueue_vector(sess, preds_queue, [0])\n _enqueue_vector(sess, preds_queue, [1])\n _enqueue_vector(sess, preds_queue, [2])\n _enqueue_vector(sess, preds_queue, [1])\n predictions = preds_queue.dequeue()\n\n # Create the queue that populates the labels.\n labels_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 1))\n _enqueue_vector(sess, labels_queue, [0])\n _enqueue_vector(sess, labels_queue, [1])\n _enqueue_vector(sess, labels_queue, [1])\n _enqueue_vector(sess, labels_queue, [2])\n labels = labels_queue.dequeue()\n\n # Create the queue that populates the weights.\n weights_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.int64, shapes=(1, 1))\n _enqueue_vector(sess, weights_queue, [1])\n _enqueue_vector(sess, weights_queue, [1])\n _enqueue_vector(sess, weights_queue, [0])\n _enqueue_vector(sess, weights_queue, [0])\n weights = weights_queue.dequeue()\n\n accuracy, update_op = metrics.streaming_accuracy(predictions, labels,\n weights)\n\n sess.run(variables.local_variables_initializer())\n for _ in xrange(3):\n sess.run(update_op)\n self.assertEqual(1.0, sess.run(update_op))\n self.assertEqual(1.0, accuracy.eval())\n\n\nclass StreamingTruePositivesTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))\n _assert_metric_variables(self, ('true_positives/count:0',))\n\n def testUnweighted(self):\n for expand_predictions in [True, False]:\n for expand_labels in [True, False]:\n for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):\n predictions = math_ops.cast(\n constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),\n dtype=dtype)\n if expand_predictions:\n predictions = array_ops.expand_dims(predictions, 2)\n labels = math_ops.cast(\n constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),\n dtype=dtype)\n if expand_labels:\n labels = array_ops.expand_dims(labels, 2)\n tp, tp_update_op = metrics.streaming_true_positives(\n predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0, tp.eval())\n self.assertEqual(1, tp_update_op.eval())\n self.assertEqual(1, tp.eval())\n\n def testWeighted(self):\n for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):\n predictions = math_ops.cast(\n constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),\n dtype=dtype)\n labels = math_ops.cast(\n constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),\n dtype=dtype)\n tp, tp_update_op = metrics.streaming_true_positives(\n predictions, labels, weights=37.0)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0, tp.eval())\n self.assertEqual(37.0, tp_update_op.eval())\n self.assertEqual(37.0, tp.eval())\n\n\nclass StreamingFalseNegativesTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_false_negatives((0, 1, 0), (0, 1, 1))\n _assert_metric_variables(self, ('false_negatives/count:0',))\n\n def testUnweighted(self):\n for expand_predictions in [True, False]:\n for expand_labels in [True, False]:\n for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):\n predictions = math_ops.cast(\n constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),\n dtype=dtype)\n if expand_predictions:\n predictions = array_ops.expand_dims(predictions, 2)\n labels = math_ops.cast(\n constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),\n dtype=dtype)\n if expand_labels:\n labels = array_ops.expand_dims(labels, 2)\n fn, fn_update_op = metrics.streaming_false_negatives(\n predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0, fn.eval())\n self.assertEqual(2, fn_update_op.eval())\n self.assertEqual(2, fn.eval())\n\n def testWeighted(self):\n for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):\n predictions = math_ops.cast(\n constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),\n dtype=dtype)\n labels = math_ops.cast(\n constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),\n dtype=dtype)\n fn, fn_update_op = metrics.streaming_false_negatives(\n predictions, labels, weights=((3.0,), (5.0,), (7.0,)))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0, fn.eval())\n self.assertEqual(8.0, fn_update_op.eval())\n self.assertEqual(8.0, fn.eval())\n\n\nclass StreamingFalsePositivesTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_false_positives((0, 1, 0), (0, 1, 1))\n _assert_metric_variables(self, ('false_positives/count:0',))\n\n def testUnweighted(self):\n for expand_predictions in [True, False]:\n for expand_labels in [True, False]:\n for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):\n predictions = math_ops.cast(\n constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),\n dtype=dtype)\n if expand_predictions:\n predictions = array_ops.expand_dims(predictions, 2)\n labels = math_ops.cast(\n constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),\n dtype=dtype)\n if expand_labels:\n labels = array_ops.expand_dims(labels, 2)\n fp, fp_update_op = metrics.streaming_false_positives(\n predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0, fp.eval())\n self.assertEqual(4, fp_update_op.eval())\n self.assertEqual(4, fp.eval())\n\n def testWeighted(self):\n for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):\n predictions = math_ops.cast(\n constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),\n dtype=dtype)\n labels = math_ops.cast(\n constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),\n dtype=dtype)\n fp, fp_update_op = metrics.streaming_false_positives(\n predictions,\n labels,\n weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,\n 29.0, 31.0)))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0, fp.eval())\n self.assertEqual(42.0, fp_update_op.eval())\n self.assertEqual(42.0, fp.eval())\n\n\nclass StreamingTrueNegativesTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_true_negatives((0, 1, 0), (0, 1, 1))\n _assert_metric_variables(self, ('true_negatives/count:0',))\n\n def testUnweighted(self):\n for expand_predictions in [True, False]:\n for expand_labels in [True, False]:\n for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):\n predictions = math_ops.cast(\n constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),\n dtype=dtype)\n if expand_predictions:\n predictions = array_ops.expand_dims(predictions, 2)\n labels = math_ops.cast(\n constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),\n dtype=dtype)\n if expand_labels:\n labels = array_ops.expand_dims(labels, 2)\n tn, tn_update_op = metrics.streaming_true_negatives(\n predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0, tn.eval())\n self.assertEqual(5, tn_update_op.eval())\n self.assertEqual(5, tn.eval())\n\n def testWeighted(self):\n for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):\n predictions = math_ops.cast(\n constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),\n dtype=dtype)\n labels = math_ops.cast(\n constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),\n dtype=dtype)\n tn, tn_update_op = metrics.streaming_true_negatives(\n predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0, tn.eval())\n self.assertEqual(15.0, tn_update_op.eval())\n self.assertEqual(15.0, tn.eval())\n\n\nclass StreamingTruePositivesAtThresholdsTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_true_positives_at_thresholds(\n (0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))\n _assert_metric_variables(self, ('true_positives:0',))\n\n def testUnweighted(self):\n predictions = constant_op.constant(\n ((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))\n labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))\n tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(\n predictions, labels, thresholds=(0.15, 0.5, 0.85))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual((0, 0, 0), tp.eval())\n self.assertAllEqual((3, 1, 0), tp_update_op.eval())\n self.assertAllEqual((3, 1, 0), tp.eval())\n\n def testWeighted(self):\n predictions = constant_op.constant(\n ((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))\n labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))\n tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(\n predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())\n self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())\n self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())\n\n\nclass StreamingFalseNegativesAtThresholdsTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_false_negatives_at_thresholds(\n (0.0, 1.0, 0.0), (0, 1, 1), thresholds=(\n 0.15,\n 0.5,\n 0.85,\n ))\n _assert_metric_variables(self, ('false_negatives:0',))\n\n def testUnweighted(self):\n predictions = constant_op.constant(\n ((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))\n labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))\n fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(\n predictions, labels, thresholds=(0.15, 0.5, 0.85))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual((0, 0, 0), fn.eval())\n self.assertAllEqual((0, 2, 3), fn_update_op.eval())\n self.assertAllEqual((0, 2, 3), fn.eval())\n\n def testWeighted(self):\n predictions = constant_op.constant(\n ((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))\n labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))\n fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(\n predictions,\n labels,\n weights=((3.0,), (5.0,), (7.0,)),\n thresholds=(0.15, 0.5, 0.85))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())\n self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())\n self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())\n\n\nclass StreamingFalsePositivesAtThresholdsTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_false_positives_at_thresholds(\n (0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))\n _assert_metric_variables(self, ('false_positives:0',))\n\n def testUnweighted(self):\n predictions = constant_op.constant(\n ((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))\n labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))\n fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(\n predictions, labels, thresholds=(0.15, 0.5, 0.85))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual((0, 0, 0), fp.eval())\n self.assertAllEqual((7, 4, 2), fp_update_op.eval())\n self.assertAllEqual((7, 4, 2), fp.eval())\n\n def testWeighted(self):\n predictions = constant_op.constant(\n ((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))\n labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))\n fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(\n predictions,\n labels,\n weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,\n 29.0, 31.0)),\n thresholds=(0.15, 0.5, 0.85))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())\n self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())\n self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())\n\n\nclass StreamingTrueNegativesAtThresholdsTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_true_negatives_at_thresholds(\n (0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))\n _assert_metric_variables(self, ('true_negatives:0',))\n\n def testUnweighted(self):\n predictions = constant_op.constant(\n ((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))\n labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))\n tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(\n predictions, labels, thresholds=(0.15, 0.5, 0.85))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual((0, 0, 0), tn.eval())\n self.assertAllEqual((2, 5, 7), tn_update_op.eval())\n self.assertAllEqual((2, 5, 7), tn.eval())\n\n def testWeighted(self):\n predictions = constant_op.constant(\n ((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))\n labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))\n tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(\n predictions,\n labels,\n weights=((0.0, 2.0, 3.0, 5.0),),\n thresholds=(0.15, 0.5, 0.85))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())\n self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())\n self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())\n\n\nclass StreamingPrecisionTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_precision(\n predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))\n _assert_metric_variables(self, ('precision/false_positives/count:0',\n 'precision/true_positives/count:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_precision(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_precision(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)\n precision, update_op = metrics.streaming_precision(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_precision = precision.eval()\n for _ in range(10):\n self.assertEqual(initial_precision, precision.eval())\n\n def testAllCorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(inputs)\n labels = constant_op.constant(inputs)\n precision, update_op = metrics.streaming_precision(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(1, sess.run(update_op))\n self.assertAlmostEqual(1, precision.eval())\n\n def testSomeCorrect(self):\n predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))\n labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))\n precision, update_op = metrics.streaming_precision(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.5, update_op.eval())\n self.assertAlmostEqual(0.5, precision.eval())\n\n def testWeighted1d(self):\n predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])\n labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])\n precision, update_op = metrics.streaming_precision(\n predictions, labels, weights=constant_op.constant([[2], [5]]))\n\n with self.test_session():\n variables.local_variables_initializer().run()\n weighted_tp = 2.0 + 5.0\n weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)\n expected_precision = weighted_tp / weighted_positives\n self.assertAlmostEqual(expected_precision, update_op.eval())\n self.assertAlmostEqual(expected_precision, precision.eval())\n\n def testWeighted1d_placeholders(self):\n predictions = array_ops.placeholder(dtype=dtypes_lib.float32)\n labels = array_ops.placeholder(dtype=dtypes_lib.float32)\n feed_dict = {\n predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),\n labels: ((0, 1, 1, 0), (1, 0, 0, 1))\n }\n precision, update_op = metrics.streaming_precision(\n predictions, labels, weights=constant_op.constant([[2], [5]]))\n\n with self.test_session():\n variables.local_variables_initializer().run()\n weighted_tp = 2.0 + 5.0\n weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)\n expected_precision = weighted_tp / weighted_positives\n self.assertAlmostEqual(\n expected_precision, update_op.eval(feed_dict=feed_dict))\n self.assertAlmostEqual(\n expected_precision, precision.eval(feed_dict=feed_dict))\n\n def testWeighted2d(self):\n predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])\n labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])\n precision, update_op = metrics.streaming_precision(\n predictions,\n labels,\n weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))\n\n with self.test_session():\n variables.local_variables_initializer().run()\n weighted_tp = 3.0 + 4.0\n weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)\n expected_precision = weighted_tp / weighted_positives\n self.assertAlmostEqual(expected_precision, update_op.eval())\n self.assertAlmostEqual(expected_precision, precision.eval())\n\n def testWeighted2d_placeholders(self):\n predictions = array_ops.placeholder(dtype=dtypes_lib.float32)\n labels = array_ops.placeholder(dtype=dtypes_lib.float32)\n feed_dict = {\n predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),\n labels: ((0, 1, 1, 0), (1, 0, 0, 1))\n }\n precision, update_op = metrics.streaming_precision(\n predictions,\n labels,\n weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))\n\n with self.test_session():\n variables.local_variables_initializer().run()\n weighted_tp = 3.0 + 4.0\n weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)\n expected_precision = weighted_tp / weighted_positives\n self.assertAlmostEqual(\n expected_precision, update_op.eval(feed_dict=feed_dict))\n self.assertAlmostEqual(\n expected_precision, precision.eval(feed_dict=feed_dict))\n\n def testAllIncorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(inputs)\n labels = constant_op.constant(1 - inputs)\n precision, update_op = metrics.streaming_precision(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertAlmostEqual(0, precision.eval())\n\n def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):\n predictions = constant_op.constant([0, 0, 0, 0])\n labels = constant_op.constant([0, 0, 0, 0])\n precision, update_op = metrics.streaming_precision(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(0.0, precision.eval())\n\n\nclass StreamingRecallTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_recall(\n predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))\n _assert_metric_variables(\n self,\n ('recall/false_negatives/count:0', 'recall/true_positives/count:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_recall(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_recall(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)\n recall, update_op = metrics.streaming_recall(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_recall = recall.eval()\n for _ in range(10):\n self.assertEqual(initial_recall, recall.eval())\n\n def testAllCorrect(self):\n np_inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(np_inputs)\n labels = constant_op.constant(np_inputs)\n recall, update_op = metrics.streaming_recall(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(1, recall.eval())\n\n def testSomeCorrect(self):\n predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))\n labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))\n recall, update_op = metrics.streaming_recall(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.5, update_op.eval())\n self.assertAlmostEqual(0.5, recall.eval())\n\n def testWeighted1d(self):\n predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])\n labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])\n weights = constant_op.constant([[2], [5]])\n recall, update_op = metrics.streaming_recall(\n predictions, labels, weights=weights)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n weighted_tp = 2.0 + 5.0\n weighted_t = (2.0 + 2.0) + (5.0 + 5.0)\n expected_precision = weighted_tp / weighted_t\n self.assertAlmostEqual(expected_precision, update_op.eval())\n self.assertAlmostEqual(expected_precision, recall.eval())\n\n def testWeighted2d(self):\n predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])\n labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])\n weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])\n recall, update_op = metrics.streaming_recall(\n predictions, labels, weights=weights)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n weighted_tp = 3.0 + 1.0\n weighted_t = (2.0 + 3.0) + (4.0 + 1.0)\n expected_precision = weighted_tp / weighted_t\n self.assertAlmostEqual(expected_precision, update_op.eval())\n self.assertAlmostEqual(expected_precision, recall.eval())\n\n def testAllIncorrect(self):\n np_inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(np_inputs)\n labels = constant_op.constant(1 - np_inputs)\n recall, update_op = metrics.streaming_recall(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(0, recall.eval())\n\n def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):\n predictions = array_ops.zeros((1, 4))\n labels = array_ops.zeros((1, 4))\n recall, update_op = metrics.streaming_recall(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(0, recall.eval())\n\n\nclass StreamingFPRTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_false_positive_rate(\n predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))\n _assert_metric_variables(self,\n ('false_positive_rate/false_positives/count:0',\n 'false_positive_rate/true_negatives/count:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_false_positive_rate(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_false_positive_rate(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)\n fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_fpr = fpr.eval()\n for _ in range(10):\n self.assertEqual(initial_fpr, fpr.eval())\n\n def testAllCorrect(self):\n np_inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(np_inputs)\n labels = constant_op.constant(np_inputs)\n fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(0, fpr.eval())\n\n def testSomeCorrect(self):\n predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))\n labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))\n fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.5, update_op.eval())\n self.assertAlmostEqual(0.5, fpr.eval())\n\n def testWeighted1d(self):\n predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])\n labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])\n weights = constant_op.constant([[2], [5]])\n fpr, update_op = metrics.streaming_false_positive_rate(\n predictions, labels, weights=weights)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n weighted_fp = 2.0 + 5.0\n weighted_f = (2.0 + 2.0) + (5.0 + 5.0)\n expected_fpr = weighted_fp / weighted_f\n self.assertAlmostEqual(expected_fpr, update_op.eval())\n self.assertAlmostEqual(expected_fpr, fpr.eval())\n\n def testWeighted2d(self):\n predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])\n labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])\n weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])\n fpr, update_op = metrics.streaming_false_positive_rate(\n predictions, labels, weights=weights)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n weighted_fp = 1.0 + 3.0\n weighted_f = (1.0 + 4.0) + (2.0 + 3.0)\n expected_fpr = weighted_fp / weighted_f\n self.assertAlmostEqual(expected_fpr, update_op.eval())\n self.assertAlmostEqual(expected_fpr, fpr.eval())\n\n def testAllIncorrect(self):\n np_inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(np_inputs)\n labels = constant_op.constant(1 - np_inputs)\n fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(1, fpr.eval())\n\n def testZeroFalsePositivesAndTrueNegativesGivesZeroFPR(self):\n predictions = array_ops.ones((1, 4))\n labels = array_ops.ones((1, 4))\n fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(0, fpr.eval())\n\n\nclass StreamingFNRTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_false_negative_rate(\n predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))\n _assert_metric_variables(self,\n ('false_negative_rate/false_negatives/count:0',\n 'false_negative_rate/true_positives/count:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_false_negative_rate(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_false_negative_rate(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)\n fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_fnr = fnr.eval()\n for _ in range(10):\n self.assertEqual(initial_fnr, fnr.eval())\n\n def testAllCorrect(self):\n np_inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(np_inputs)\n labels = constant_op.constant(np_inputs)\n fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(0, fnr.eval())\n\n def testSomeCorrect(self):\n predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))\n labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))\n fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.5, update_op.eval())\n self.assertAlmostEqual(0.5, fnr.eval())\n\n def testWeighted1d(self):\n predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])\n labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])\n weights = constant_op.constant([[2], [5]])\n fnr, update_op = metrics.streaming_false_negative_rate(\n predictions, labels, weights=weights)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n weighted_fn = 2.0 + 5.0\n weighted_t = (2.0 + 2.0) + (5.0 + 5.0)\n expected_fnr = weighted_fn / weighted_t\n self.assertAlmostEqual(expected_fnr, update_op.eval())\n self.assertAlmostEqual(expected_fnr, fnr.eval())\n\n def testWeighted2d(self):\n predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])\n labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])\n weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])\n fnr, update_op = metrics.streaming_false_negative_rate(\n predictions, labels, weights=weights)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n weighted_fn = 2.0 + 4.0\n weighted_t = (2.0 + 3.0) + (1.0 + 4.0)\n expected_fnr = weighted_fn / weighted_t\n self.assertAlmostEqual(expected_fnr, update_op.eval())\n self.assertAlmostEqual(expected_fnr, fnr.eval())\n\n def testAllIncorrect(self):\n np_inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(np_inputs)\n labels = constant_op.constant(1 - np_inputs)\n fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(1, fnr.eval())\n\n def testZeroFalseNegativesAndTruePositivesGivesZeroFNR(self):\n predictions = array_ops.zeros((1, 4))\n labels = array_ops.zeros((1, 4))\n fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(0, fnr.eval())\n\n\nclass StreamingCurvePointsTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metric_ops.streaming_curve_points(\n predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))\n _assert_metric_variables(\n self,\n ('curve_points/true_positives:0', 'curve_points/false_negatives:0',\n 'curve_points/false_positives:0', 'curve_points/true_negatives:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n points, _ = metric_ops.streaming_curve_points(\n labels=array_ops.ones((10, 1)),\n predictions=array_ops.ones((10, 1)),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [points])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metric_ops.streaming_curve_points(\n labels=array_ops.ones((10, 1)),\n predictions=array_ops.ones((10, 1)),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def _testValueTensorIsIdempotent(self, curve):\n predictions = constant_op.constant(\n np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)\n\n points, update_op = metric_ops.streaming_curve_points(\n labels, predictions=predictions, curve=curve)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n sess.run(update_op)\n initial_points = points.eval()\n\n sess.run(update_op)\n self.assertAllClose(initial_points, points.eval())\n\n def testValueTensorIsIdempotentROC(self):\n self._testValueTensorIsIdempotent(curve='ROC')\n\n def testValueTensorIsIdempotentPR(self):\n self._testValueTensorIsIdempotent(curve='PR')\n\n def _testCase(self, labels, predictions, curve, expected_points):\n with self.test_session() as sess:\n predictions_tensor = constant_op.constant(\n predictions, dtype=dtypes_lib.float32)\n labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)\n points, update_op = metric_ops.streaming_curve_points(\n labels=labels_tensor,\n predictions=predictions_tensor,\n num_thresholds=3,\n curve=curve)\n\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n\n self.assertAllClose(expected_points, points.eval())\n\n def testEdgeCasesROC(self):\n self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])\n self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])\n self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])\n self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])\n\n def testManyValuesROC(self):\n self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],\n [[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',\n [[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])\n\n def testEdgeCasesPR(self):\n self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])\n self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])\n self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])\n self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])\n\n def testManyValuesPR(self):\n self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],\n [[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',\n [[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])\n\n\ndef _np_auc(predictions, labels, weights=None):\n \"\"\"Computes the AUC explicitly using Numpy.\n\n Args:\n predictions: an ndarray with shape [N].\n labels: an ndarray with shape [N].\n weights: an ndarray with shape [N].\n\n Returns:\n the area under the ROC curve.\n \"\"\"\n if weights is None:\n weights = np.ones(np.size(predictions))\n is_positive = labels > 0\n num_positives = np.sum(weights[is_positive])\n num_negatives = np.sum(weights[~is_positive])\n\n # Sort descending:\n inds = np.argsort(-predictions)\n\n sorted_labels = labels[inds]\n sorted_weights = weights[inds]\n is_positive = sorted_labels > 0\n\n tp = np.cumsum(sorted_weights * is_positive) / num_positives\n return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives\n\n\nclass StreamingAUCTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_auc(\n predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))\n _assert_metric_variables(self,\n ('auc/true_positives:0', 'auc/false_negatives:0',\n 'auc/false_positives:0', 'auc/true_negatives:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_auc(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_auc(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)\n auc, update_op = metrics.streaming_auc(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_auc = auc.eval()\n for _ in range(10):\n self.assertAlmostEqual(initial_auc, auc.eval(), 5)\n\n def testPredictionsOutOfRange(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [1, -1, 1, -1], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))\n _, update_op = metrics.streaming_auc(predictions, labels)\n sess.run(variables.local_variables_initializer())\n self.assertRaises(errors_impl.InvalidArgumentError, update_op.eval)\n\n def testAllCorrect(self):\n self.allCorrectAsExpected('ROC')\n\n def allCorrectAsExpected(self, curve):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n with self.test_session() as sess:\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(inputs)\n auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)\n\n sess.run(variables.local_variables_initializer())\n self.assertEqual(1, sess.run(update_op))\n\n self.assertEqual(1, auc.eval())\n\n def testSomeCorrect(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))\n auc, update_op = metrics.streaming_auc(predictions, labels)\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.5, sess.run(update_op))\n\n self.assertAlmostEqual(0.5, auc.eval())\n\n def testWeighted1d(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))\n weights = constant_op.constant([2], shape=(1, 1))\n auc, update_op = metrics.streaming_auc(\n predictions, labels, weights=weights)\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.5, sess.run(update_op), 5)\n\n self.assertAlmostEqual(0.5, auc.eval(), 5)\n\n def testWeighted2d(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))\n weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))\n auc, update_op = metrics.streaming_auc(\n predictions, labels, weights=weights)\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.7, sess.run(update_op), 5)\n\n self.assertAlmostEqual(0.7, auc.eval(), 5)\n\n def testAUCPRSpecialCase(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))\n auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)\n\n self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)\n\n def testAnotherAUCPRSpecialCase(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],\n shape=(1, 7),\n dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))\n auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)\n\n self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)\n\n def testThirdAUCPRSpecialCase(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],\n shape=(1, 7),\n dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))\n auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)\n\n self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)\n\n def testAllIncorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n with self.test_session() as sess:\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)\n auc, update_op = metrics.streaming_auc(predictions, labels)\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0, sess.run(update_op))\n\n self.assertAlmostEqual(0, auc.eval())\n\n def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):\n with self.test_session() as sess:\n predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)\n labels = array_ops.zeros([4])\n auc, update_op = metrics.streaming_auc(predictions, labels)\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(1, sess.run(update_op), 6)\n\n self.assertAlmostEqual(1, auc.eval(), 6)\n\n def testRecallOneAndPrecisionOneGivesOnePRAUC(self):\n with self.test_session() as sess:\n predictions = array_ops.ones([4], dtype=dtypes_lib.float32)\n labels = array_ops.ones([4])\n auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(1, sess.run(update_op), 6)\n\n self.assertAlmostEqual(1, auc.eval(), 6)\n\n def testWithMultipleUpdates(self):\n num_samples = 1000\n batch_size = 10\n num_batches = int(num_samples / batch_size)\n\n # Create the labels and data.\n labels = np.random.randint(0, 2, size=num_samples)\n noise = np.random.normal(0.0, scale=0.2, size=num_samples)\n predictions = 0.4 + 0.2 * labels + noise\n predictions[predictions > 1] = 1\n predictions[predictions < 0] = 0\n\n def _enqueue_as_batches(x, enqueue_ops):\n x_batches = x.astype(np.float32).reshape((num_batches, batch_size))\n x_queue = data_flow_ops.FIFOQueue(\n num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))\n for i in range(num_batches):\n enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))\n return x_queue.dequeue()\n\n for weights in (None, np.ones(num_samples),\n np.random.exponential(scale=1.0, size=num_samples)):\n expected_auc = _np_auc(predictions, labels, weights)\n\n with self.test_session() as sess:\n enqueue_ops = [[] for i in range(num_batches)]\n tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)\n tf_labels = _enqueue_as_batches(labels, enqueue_ops)\n tf_weights = (\n _enqueue_as_batches(weights, enqueue_ops)\n if weights is not None else None)\n\n for i in range(num_batches):\n sess.run(enqueue_ops[i])\n\n auc, update_op = metrics.streaming_auc(\n tf_predictions,\n tf_labels,\n curve='ROC',\n num_thresholds=500,\n weights=tf_weights)\n\n sess.run(variables.local_variables_initializer())\n for i in range(num_batches):\n sess.run(update_op)\n\n # Since this is only approximate, we can't expect a 6 digits match.\n # Although with higher number of samples/thresholds we should see the\n # accuracy improving\n self.assertAlmostEqual(expected_auc, auc.eval(), 2)\n\n\nclass StreamingDynamicAUCTest(test.TestCase):\n\n def setUp(self):\n super(StreamingDynamicAUCTest, self).setUp()\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testUnknownCurve(self):\n with self.assertRaisesRegexp(\n ValueError, 'curve must be either ROC or PR, TEST_CURVE unknown'):\n metrics.streaming_dynamic_auc(\n labels=array_ops.ones((10, 1)),\n predictions=array_ops.ones((10, 1)),\n curve='TEST_CURVE')\n\n def testVars(self):\n metrics.streaming_dynamic_auc(\n labels=array_ops.ones((10, 1)), predictions=array_ops.ones((10, 1)))\n _assert_metric_variables(self, [\n 'dynamic_auc/concat_labels/array:0', 'dynamic_auc/concat_labels/size:0',\n 'dynamic_auc/concat_preds/array:0', 'dynamic_auc/concat_preds/size:0'\n ])\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n auc, _ = metrics.streaming_dynamic_auc(\n labels=array_ops.ones((10, 1)),\n predictions=array_ops.ones((10, 1)),\n metrics_collections=[my_collection_name])\n self.assertEqual(ops.get_collection(my_collection_name), [auc])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_dynamic_auc(\n labels=array_ops.ones((10, 1)),\n predictions=array_ops.ones((10, 1)),\n updates_collections=[my_collection_name])\n self.assertEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)\n auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n # Run several updates.\n for _ in xrange(10):\n sess.run(update_op)\n # Then verify idempotency.\n initial_auc = auc.eval()\n for _ in xrange(10):\n self.assertAlmostEqual(initial_auc, auc.eval(), 5)\n\n def testAllLabelsOnes(self):\n with self.test_session() as sess:\n predictions = constant_op.constant([1., 1., 1.])\n labels = constant_op.constant([1, 1, 1])\n auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(0, auc.eval())\n\n def testAllLabelsZeros(self):\n with self.test_session() as sess:\n predictions = constant_op.constant([1., 1., 1.])\n labels = constant_op.constant([0, 0, 0])\n auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(0, auc.eval())\n\n def testNonZeroOnePredictions(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [2.5, -2.5, 2.5, -2.5], dtype=dtypes_lib.float32)\n labels = constant_op.constant([1, 0, 1, 0])\n auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertAlmostEqual(auc.eval(), 1.0)\n\n def testAllCorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n with self.test_session() as sess:\n predictions = constant_op.constant(inputs)\n labels = constant_op.constant(inputs)\n auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertEqual(1, auc.eval())\n\n def testSomeCorrect(self):\n with self.test_session() as sess:\n predictions = constant_op.constant([1, 0, 1, 0])\n labels = constant_op.constant([0, 1, 1, 0])\n auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertAlmostEqual(0.5, auc.eval())\n\n def testAllIncorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n with self.test_session() as sess:\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)\n auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertAlmostEqual(0, auc.eval())\n\n def testExceptionOnIncompatibleShapes(self):\n with self.test_session() as sess:\n predictions = array_ops.ones([5])\n labels = array_ops.zeros([6])\n with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):\n _, update_op = metrics.streaming_dynamic_auc(labels, predictions)\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n\n def testExceptionOnGreaterThanOneLabel(self):\n with self.test_session() as sess:\n predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)\n labels = constant_op.constant([2, 1, 0])\n _, update_op = metrics.streaming_dynamic_auc(labels, predictions)\n sess.run(variables.local_variables_initializer())\n with self.assertRaisesRegexp(\n errors_impl.InvalidArgumentError,\n '.*labels must be 0 or 1, at least one is >1.*'):\n sess.run(update_op)\n\n def testExceptionOnNegativeLabel(self):\n with self.test_session() as sess:\n predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)\n labels = constant_op.constant([1, 0, -1])\n _, update_op = metrics.streaming_dynamic_auc(labels, predictions)\n sess.run(variables.local_variables_initializer())\n with self.assertRaisesRegexp(\n errors_impl.InvalidArgumentError,\n '.*labels must be 0 or 1, at least one is <0.*'):\n sess.run(update_op)\n\n def testWithMultipleUpdates(self):\n batch_size = 10\n num_batches = 100\n labels = np.array([])\n predictions = np.array([])\n tf_labels = variables.Variable(\n array_ops.ones(batch_size, dtypes_lib.int32),\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n dtype=dtypes_lib.int32)\n tf_predictions = variables.Variable(\n array_ops.ones(batch_size),\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n dtype=dtypes_lib.float32)\n auc, update_op = metrics.streaming_dynamic_auc(tf_labels, tf_predictions)\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n for _ in xrange(num_batches):\n new_labels = np.random.randint(0, 2, size=batch_size)\n noise = np.random.normal(0.0, scale=0.2, size=batch_size)\n new_predictions = 0.4 + 0.2 * new_labels + noise\n labels = np.concatenate([labels, new_labels])\n predictions = np.concatenate([predictions, new_predictions])\n sess.run(tf_labels.assign(new_labels))\n sess.run(tf_predictions.assign(new_predictions))\n sess.run(update_op)\n expected_auc = _np_auc(predictions, labels)\n self.assertAlmostEqual(expected_auc, auc.eval())\n\n def testAUCPRReverseIncreasingPredictions(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [0.1, 0.4, 0.35, 0.8], dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 0, 1, 1])\n auc, update_op = metrics.streaming_dynamic_auc(\n labels, predictions, curve='PR')\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-5)\n\n def testAUCPRJumbledPredictions(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81], dtypes_lib.float32)\n labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1])\n auc, update_op = metrics.streaming_dynamic_auc(\n labels, predictions, curve='PR')\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-6)\n\n def testAUCPRPredictionsLessThanHalf(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],\n shape=(1, 7),\n dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))\n auc, update_op = metrics.streaming_dynamic_auc(\n labels, predictions, curve='PR')\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-5)\n\n def testWithWeights(self):\n batch_size = 10\n num_batches = 100\n labels = np.array([])\n predictions = np.array([])\n weights = np.array([])\n tf_labels = variables.Variable(\n array_ops.ones(batch_size, dtypes_lib.int32),\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n dtype=dtypes_lib.int32)\n tf_predictions = variables.Variable(\n array_ops.ones(batch_size),\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n dtype=dtypes_lib.float32)\n tf_weights = variables.Variable(\n array_ops.ones(batch_size),\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n dtype=dtypes_lib.float32)\n auc, update_op = metrics.streaming_dynamic_auc(tf_labels,\n tf_predictions,\n weights=tf_weights)\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n for _ in xrange(num_batches):\n new_labels = np.random.randint(0, 2, size=batch_size)\n noise = np.random.uniform(-0.2, 0.2, size=batch_size)\n new_predictions = 0.4 + 0.2 * new_labels + noise\n new_weights = np.random.uniform(0.0, 3.0, size=batch_size)\n labels = np.concatenate([labels, new_labels])\n predictions = np.concatenate([predictions, new_predictions])\n weights = np.concatenate([weights, new_weights])\n sess.run([tf_labels.assign(new_labels),\n tf_predictions.assign(new_predictions),\n tf_weights.assign(new_weights)])\n sess.run(update_op)\n expected_auc = _np_auc(predictions, labels, weights)\n self.assertAlmostEqual(expected_auc, auc.eval())\n\n\nclass AucWithConfidenceIntervalsTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def _testResultsEqual(self, expected_dict, gotten_result):\n \"\"\"Tests that 2 results (dicts) represent the same data.\n\n Args:\n expected_dict: A dictionary with keys that are the names of properties\n of PrecisionRecallData and whose values are lists of floats.\n gotten_result: A AucWithConfidenceIntervalData object.\n \"\"\"\n gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}\n self.assertItemsEqual(\n list(expected_dict.keys()), list(gotten_dict.keys()))\n\n for key, expected_values in expected_dict.items():\n self.assertAllClose(expected_values, gotten_dict[key])\n\n def _testCase(self, predictions, labels, expected_result, weights=None):\n \"\"\"Performs a test given a certain scenario of labels, predictions, weights.\n\n Args:\n predictions: The predictions tensor. Of type float32.\n labels: The labels tensor. Of type bool.\n expected_result: The expected result (dict) that maps to tensors.\n weights: Optional weights tensor.\n \"\"\"\n with self.test_session() as sess:\n predictions_tensor = constant_op.constant(\n predictions, dtype=dtypes_lib.float32)\n labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.int64)\n weights_tensor = None\n if weights:\n weights_tensor = constant_op.constant(weights, dtype=dtypes_lib.float32)\n gotten_result, update_op = (\n metric_ops.auc_with_confidence_intervals(\n labels=labels_tensor,\n predictions=predictions_tensor,\n weights=weights_tensor))\n\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n\n self._testResultsEqual(expected_result, gotten_result)\n\n def testAucAllCorrect(self):\n self._testCase(\n predictions=[0., 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],\n labels=[0, 0, 1, 0, 0, 1, 0, 1, 1, 0],\n expected_result={\n 'auc': 0.66666667,\n 'lower': 0.27826795,\n 'upper': 0.91208512,\n })\n\n def testAucUnorderedInput(self):\n self._testCase(\n predictions=[1.0, 0.6, 0., 0.3, 0.4, 0.2, 0.5, 0.3, 0.6, 0.8],\n labels=[0, 1, 0, 1, 0, 0, 1, 0, 0, 1],\n expected_result={\n 'auc': 0.66666667,\n 'lower': 0.27826795,\n 'upper': 0.91208512,\n })\n\n def testAucWithWeights(self):\n self._testCase(\n predictions=[0., 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],\n labels=[0, 0, 1, 0, 0, 1, 0, 1, 1, 0],\n weights=[0.5, 0.6, 1.2, 1.5, 2.0, 2.0, 1.5, 1.2, 0.6, 0.5],\n expected_result={\n 'auc': 0.65151515,\n 'lower': 0.28918604,\n 'upper': 0.89573906,\n })\n\n def testAucEqualOne(self):\n self._testCase(\n predictions=[0, 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],\n labels=[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],\n expected_result={\n 'auc': 1.0,\n 'lower': 1.0,\n 'upper': 1.0,\n })\n\n def testAucEqualZero(self):\n self._testCase(\n predictions=[0, 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],\n labels=[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n expected_result={\n 'auc': 0.0,\n 'lower': 0.0,\n 'upper': 0.0,\n })\n\n def testNonZeroOnePredictions(self):\n self._testCase(\n predictions=[2.5, -2.5, .5, -.5, 1],\n labels=[1, 0, 1, 0, 0],\n expected_result={\n 'auc': 0.83333333,\n 'lower': 0.15229267,\n 'upper': 0.99286517,\n })\n\n def testAllLabelsOnes(self):\n self._testCase(\n predictions=[1., 1., 1., 1., 1.],\n labels=[1, 1, 1, 1, 1],\n expected_result={\n 'auc': 0.,\n 'lower': 0.,\n 'upper': 0.,\n })\n\n def testAllLabelsZeros(self):\n self._testCase(\n predictions=[0., 0., 0., 0., 0.],\n labels=[0, 0, 0, 0, 0],\n expected_result={\n 'auc': 0.,\n 'lower': 0.,\n 'upper': 0.,\n })\n\n def testWeightSumLessThanOneAll(self):\n self._testCase(\n predictions=[1., 1., 0., 1., 0., 0.],\n labels=[1, 1, 1, 0, 0, 0],\n weights=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],\n expected_result={\n 'auc': 0.,\n 'lower': 0.,\n 'upper': 0.,\n })\n\n def testWithMultipleUpdates(self):\n batch_size = 50\n num_batches = 100\n labels = np.array([])\n predictions = np.array([])\n tf_labels = variables.Variable(array_ops.ones(batch_size, dtypes_lib.int32),\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n dtype=dtypes_lib.int32)\n tf_predictions = variables.Variable(\n array_ops.ones(batch_size),\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n dtype=dtypes_lib.float32)\n auc, update_op = metrics.auc_with_confidence_intervals(tf_labels,\n tf_predictions)\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n for _ in xrange(num_batches):\n new_labels = np.random.randint(0, 2, size=batch_size)\n noise = np.random.normal(0.0, scale=0.2, size=batch_size)\n new_predictions = 0.4 + 0.2 * new_labels + noise\n labels = np.concatenate([labels, new_labels])\n predictions = np.concatenate([predictions, new_predictions])\n sess.run(tf_labels.assign(new_labels))\n sess.run(tf_predictions.assign(new_predictions))\n sess.run(update_op)\n expected_auc = _np_auc(predictions, labels)\n self.assertAllClose(expected_auc, auc.auc.eval())\n\n def testExceptionOnFloatLabels(self):\n with self.test_session() as sess:\n predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)\n labels = constant_op.constant([0.7, 0, 1, 0, 1])\n _, update_op = metrics.auc_with_confidence_intervals(labels, predictions)\n sess.run(variables.local_variables_initializer())\n self.assertRaises(TypeError, sess.run(update_op))\n\n def testExceptionOnGreaterThanOneLabel(self):\n with self.test_session() as sess:\n predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)\n labels = constant_op.constant([2, 1, 0, 1, 0])\n _, update_op = metrics.auc_with_confidence_intervals(labels, predictions)\n sess.run(variables.local_variables_initializer())\n with self.assertRaisesRegexp(\n errors_impl.InvalidArgumentError,\n '.*labels must be 0 or 1, at least one is >1.*'):\n sess.run(update_op)\n\n def testExceptionOnNegativeLabel(self):\n with self.test_session() as sess:\n predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)\n labels = constant_op.constant([1, 0, -1, 1, 0])\n _, update_op = metrics.auc_with_confidence_intervals(labels, predictions)\n sess.run(variables.local_variables_initializer())\n with self.assertRaisesRegexp(\n errors_impl.InvalidArgumentError,\n '.*labels must be 0 or 1, at least one is <0.*'):\n sess.run(update_op)\n\n\nclass StreamingPrecisionRecallAtEqualThresholdsTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def _testResultsEqual(self, expected_dict, gotten_result, eps=None):\n \"\"\"Tests that 2 results (dicts) represent the same data.\n\n Args:\n expected_dict: A dictionary with keys that are the names of properties\n of PrecisionRecallData and whose values are lists of floats.\n gotten_result: A PrecisionRecallData object.\n eps: Epsilon value to use for testing output values. If unspecified, use\n default from assertAllClose.\n \"\"\"\n gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}\n self.assertItemsEqual(list(expected_dict.keys()), list(gotten_dict.keys()))\n\n for key, expected_values in expected_dict.items():\n if eps is not None:\n self.assertAllClose(expected_values, gotten_dict[key], atol=eps)\n else:\n self.assertAllClose(expected_values, gotten_dict[key])\n\n def testVars(self):\n metric_ops.precision_recall_at_equal_thresholds(\n labels=constant_op.constant([True], dtype=dtypes_lib.bool),\n predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32))\n _assert_metric_variables(\n self, ('precision_recall_at_equal_thresholds/variables/tp_buckets:0',\n 'precision_recall_at_equal_thresholds/variables/fp_buckets:0'))\n\n def testVarsWithName(self):\n metric_ops.precision_recall_at_equal_thresholds(\n labels=constant_op.constant([True], dtype=dtypes_lib.bool),\n predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32),\n name='foo')\n _assert_metric_variables(\n self, ('foo/variables/tp_buckets:0', 'foo/variables/fp_buckets:0'))\n\n def testValuesAreIdempotent(self):\n predictions = constant_op.constant(\n np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n np.random.uniform(size=(10, 3)) > 0.5, dtype=dtypes_lib.bool)\n\n result, update_op = metric_ops.precision_recall_at_equal_thresholds(\n labels=labels, predictions=predictions)\n\n with self.test_session() as sess:\n # Run several updates.\n sess.run(variables.local_variables_initializer())\n for _ in range(3):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_result = {\n k: value.eval().tolist()\n for k, value in result._asdict().items()\n }\n for _ in range(3):\n self._testResultsEqual(initial_result, result)\n\n def _testCase(self,\n predictions,\n labels,\n expected_result,\n dtype=dtypes_lib.float32,\n eps=None,\n weights=None):\n \"\"\"Performs a test given a certain scenario of labels, predictions, weights.\n\n Args:\n predictions: The predictions tensor. Of type dtype.\n labels: The labels tensor. Of type bool.\n expected_result: The expected result (dict) that maps to tensors.\n dtype: Data type to use for predictions and weights tensor. Default\n is float32.\n eps: Epsilon value to use for testing output values. If unspecified, use\n default from assertAllClose.\n weights: Optional weights tensor.\n \"\"\"\n with self.test_session() as sess:\n predictions_tensor = constant_op.constant(predictions, dtype=dtype)\n labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.bool)\n weights_tensor = None\n if weights:\n weights_tensor = constant_op.constant(weights, dtype=dtype)\n gotten_result, update_op = (\n metric_ops.precision_recall_at_equal_thresholds(\n labels=labels_tensor,\n predictions=predictions_tensor,\n weights=weights_tensor,\n num_thresholds=3))\n self.assertEqual(gotten_result.tp.dtype, dtype)\n self.assertEqual(gotten_result.fp.dtype, dtype)\n self.assertEqual(gotten_result.tn.dtype, dtype)\n self.assertEqual(gotten_result.fn.dtype, dtype)\n self.assertEqual(gotten_result.precision.dtype, dtype)\n self.assertEqual(gotten_result.recall.dtype, dtype)\n self.assertEqual(gotten_result.thresholds.dtype, dtype)\n\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n\n self._testResultsEqual(expected_result, gotten_result, eps=eps)\n\n def testAllTruePositives(self):\n self._testCase(\n [[1]], [[True]], {\n 'tp': [1, 1, 1],\n 'fp': [0, 0, 0],\n 'tn': [0, 0, 0],\n 'fn': [0, 0, 0],\n 'precision': [1.0, 1.0, 1.0],\n 'recall': [1.0, 1.0, 1.0],\n 'thresholds': [0.0, 0.5, 1.0],\n })\n\n def testAllTrueNegatives(self):\n self._testCase(\n [[0]], [[False]], {\n 'tp': [0, 0, 0],\n 'fp': [1, 0, 0],\n 'tn': [0, 1, 1],\n 'fn': [0, 0, 0],\n 'precision': [0.0, 0.0, 0.0],\n 'recall': [0.0, 0.0, 0.0],\n 'thresholds': [0.0, 0.5, 1.0],\n })\n\n def testAllFalsePositives(self):\n self._testCase(\n [[1]], [[False]], {\n 'tp': [0, 0, 0],\n 'fp': [1, 1, 1],\n 'tn': [0, 0, 0],\n 'fn': [0, 0, 0],\n 'precision': [0.0, 0.0, 0.0],\n 'recall': [0.0, 0.0, 0.0],\n 'thresholds': [0.0, 0.5, 1.0],\n })\n\n def testAllFalseNegatives(self):\n self._testCase(\n [[0]], [[True]], {\n 'tp': [1, 0, 0],\n 'fp': [0, 0, 0],\n 'tn': [0, 0, 0],\n 'fn': [0, 1, 1],\n 'precision': [1.0, 0.0, 0.0],\n 'recall': [1.0, 0.0, 0.0],\n 'thresholds': [0.0, 0.5, 1.0],\n })\n\n def testManyValues(self):\n self._testCase(\n [[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],\n [[True, False, False, True, True, True]], {\n 'tp': [4, 3, 0],\n 'fp': [2, 0, 0],\n 'tn': [0, 2, 2],\n 'fn': [0, 1, 4],\n 'precision': [2.0 / 3.0, 1.0, 0.0],\n 'recall': [1.0, 0.75, 0.0],\n 'thresholds': [0.0, 0.5, 1.0],\n })\n\n def testManyValuesWithWeights(self):\n self._testCase(\n [[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],\n [[True, False, False, True, True, True]], {\n 'tp': [1.5, 1.5, 0.0],\n 'fp': [2.5, 0.0, 0.0],\n 'tn': [0.0, 2.5, 2.5],\n 'fn': [0.0, 0.0, 1.5],\n 'precision': [0.375, 1.0, 0.0],\n 'recall': [1.0, 1.0, 0.0],\n 'thresholds': [0.0, 0.5, 1.0],\n },\n weights=[[0.0, 0.5, 2.0, 0.0, 0.5, 1.0]])\n\n def testFloat64(self):\n self._testCase(\n [[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],\n [[True, False, False, True, True, True]], {\n 'tp': [4, 3, 0],\n 'fp': [2, 0, 0],\n 'tn': [0, 2, 2],\n 'fn': [0, 1, 4],\n 'precision': [2.0 / 3.0, 1.0, 0.0],\n 'recall': [1.0, 0.75, 0.0],\n 'thresholds': [0.0, 0.5, 1.0],\n },\n dtype=dtypes_lib.float64)\n\n def testFloat16(self):\n self._testCase(\n [[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],\n [[True, False, False, True, True, True]], {\n 'tp': [4, 3, 0],\n 'fp': [2, 0, 0],\n 'tn': [0, 2, 2],\n 'fn': [0, 1, 4],\n 'precision': [2.0 / 3.0, 1.0, 0.0],\n 'recall': [1.0, 0.75, 0.0],\n 'thresholds': [0.0, 0.5, 1.0],\n },\n dtype=dtypes_lib.float16,\n eps=1e-3)\n\n\nclass StreamingSpecificityAtSensitivityTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_specificity_at_sensitivity(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n sensitivity=0.7)\n _assert_metric_variables(self,\n ('specificity_at_sensitivity/true_positives:0',\n 'specificity_at_sensitivity/false_negatives:0',\n 'specificity_at_sensitivity/false_positives:0',\n 'specificity_at_sensitivity/true_negatives:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_specificity_at_sensitivity(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n sensitivity=0.7,\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_specificity_at_sensitivity(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n sensitivity=0.7,\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)\n specificity, update_op = metrics.streaming_specificity_at_sensitivity(\n predictions, labels, sensitivity=0.7)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_specificity = specificity.eval()\n for _ in range(10):\n self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)\n\n def testAllCorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(inputs)\n specificity, update_op = metrics.streaming_specificity_at_sensitivity(\n predictions, labels, sensitivity=0.7)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(1, sess.run(update_op))\n self.assertEqual(1, specificity.eval())\n\n def testSomeCorrectHighSensitivity(self):\n predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]\n labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels_values)\n specificity, update_op = metrics.streaming_specificity_at_sensitivity(\n predictions, labels, sensitivity=0.8)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(1.0, sess.run(update_op))\n self.assertAlmostEqual(1.0, specificity.eval())\n\n def testSomeCorrectLowSensitivity(self):\n predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]\n labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels_values)\n specificity, update_op = metrics.streaming_specificity_at_sensitivity(\n predictions, labels, sensitivity=0.4)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n self.assertAlmostEqual(0.6, sess.run(update_op))\n self.assertAlmostEqual(0.6, specificity.eval())\n\n def testWeighted1d(self):\n predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]\n labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n weights_values = [3]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels_values)\n weights = constant_op.constant(weights_values)\n specificity, update_op = metrics.streaming_specificity_at_sensitivity(\n predictions, labels, weights=weights, sensitivity=0.4)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n self.assertAlmostEqual(0.6, sess.run(update_op))\n self.assertAlmostEqual(0.6, specificity.eval())\n\n def testWeighted2d(self):\n predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]\n labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels_values)\n weights = constant_op.constant(weights_values)\n specificity, update_op = metrics.streaming_specificity_at_sensitivity(\n predictions, labels, weights=weights, sensitivity=0.4)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))\n self.assertAlmostEqual(8.0 / 15.0, specificity.eval())\n\n\nclass StreamingSensitivityAtSpecificityTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_sensitivity_at_specificity(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n specificity=0.7)\n _assert_metric_variables(self,\n ('sensitivity_at_specificity/true_positives:0',\n 'sensitivity_at_specificity/false_negatives:0',\n 'sensitivity_at_specificity/false_positives:0',\n 'sensitivity_at_specificity/true_negatives:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_sensitivity_at_specificity(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n specificity=0.7,\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_sensitivity_at_specificity(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n specificity=0.7,\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)\n sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(\n predictions, labels, specificity=0.7)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_sensitivity = sensitivity.eval()\n for _ in range(10):\n self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)\n\n def testAllCorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(inputs)\n specificity, update_op = metrics.streaming_sensitivity_at_specificity(\n predictions, labels, specificity=0.7)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(1, sess.run(update_op))\n self.assertEqual(1, specificity.eval())\n\n def testSomeCorrectHighSpecificity(self):\n predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]\n labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels_values)\n specificity, update_op = metrics.streaming_sensitivity_at_specificity(\n predictions, labels, specificity=0.8)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.8, sess.run(update_op))\n self.assertAlmostEqual(0.8, specificity.eval())\n\n def testSomeCorrectLowSpecificity(self):\n predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]\n labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels_values)\n specificity, update_op = metrics.streaming_sensitivity_at_specificity(\n predictions, labels, specificity=0.4)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.6, sess.run(update_op))\n self.assertAlmostEqual(0.6, specificity.eval())\n\n def testWeighted(self):\n predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]\n labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels_values)\n weights = constant_op.constant(weights_values)\n specificity, update_op = metrics.streaming_sensitivity_at_specificity(\n predictions, labels, weights=weights, specificity=0.4)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.675, sess.run(update_op))\n self.assertAlmostEqual(0.675, specificity.eval())\n\n\n# TODO(nsilberman): Break this up into two sets of tests.\nclass StreamingPrecisionRecallThresholdsTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_precision_at_thresholds(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n thresholds=[0, 0.5, 1.0])\n _assert_metric_variables(self, (\n 'precision_at_thresholds/true_positives:0',\n 'precision_at_thresholds/false_positives:0',\n ))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n prec, _ = metrics.streaming_precision_at_thresholds(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n thresholds=[0, 0.5, 1.0],\n metrics_collections=[my_collection_name])\n rec, _ = metrics.streaming_recall_at_thresholds(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n thresholds=[0, 0.5, 1.0],\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, precision_op = metrics.streaming_precision_at_thresholds(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n thresholds=[0, 0.5, 1.0],\n updates_collections=[my_collection_name])\n _, recall_op = metrics.streaming_recall_at_thresholds(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n thresholds=[0, 0.5, 1.0],\n updates_collections=[my_collection_name])\n self.assertListEqual(\n ops.get_collection(my_collection_name), [precision_op, recall_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)\n thresholds = [0, 0.5, 1.0]\n prec, prec_op = metrics.streaming_precision_at_thresholds(\n predictions, labels, thresholds)\n rec, rec_op = metrics.streaming_recall_at_thresholds(\n predictions, labels, thresholds)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run([prec_op, rec_op])\n\n # Then verify idempotency.\n initial_prec = prec.eval()\n initial_rec = rec.eval()\n for _ in range(10):\n self.assertAllClose(initial_prec, prec.eval())\n self.assertAllClose(initial_rec, rec.eval())\n\n # TODO(nsilberman): fix tests (passing but incorrect).\n def testAllCorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n with self.test_session() as sess:\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(inputs)\n thresholds = [0.5]\n prec, prec_op = metrics.streaming_precision_at_thresholds(\n predictions, labels, thresholds)\n rec, rec_op = metrics.streaming_recall_at_thresholds(\n predictions, labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n sess.run([prec_op, rec_op])\n\n self.assertEqual(1, prec.eval())\n self.assertEqual(1, rec.eval())\n\n def testSomeCorrect(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))\n thresholds = [0.5]\n prec, prec_op = metrics.streaming_precision_at_thresholds(\n predictions, labels, thresholds)\n rec, rec_op = metrics.streaming_recall_at_thresholds(\n predictions, labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n sess.run([prec_op, rec_op])\n\n self.assertAlmostEqual(0.5, prec.eval())\n self.assertAlmostEqual(0.5, rec.eval())\n\n def testAllIncorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n with self.test_session() as sess:\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)\n thresholds = [0.5]\n prec, prec_op = metrics.streaming_precision_at_thresholds(\n predictions, labels, thresholds)\n rec, rec_op = metrics.streaming_recall_at_thresholds(\n predictions, labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n sess.run([prec_op, rec_op])\n\n self.assertAlmostEqual(0, prec.eval())\n self.assertAlmostEqual(0, rec.eval())\n\n def testWeights1d(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)\n labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))\n weights = constant_op.constant(\n [[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)\n thresholds = [0.5, 1.1]\n prec, prec_op = metrics.streaming_precision_at_thresholds(\n predictions, labels, thresholds, weights=weights)\n rec, rec_op = metrics.streaming_recall_at_thresholds(\n predictions, labels, thresholds, weights=weights)\n\n prec_low = prec[0]\n prec_high = prec[1]\n rec_low = rec[0]\n rec_high = rec[1]\n\n sess.run(variables.local_variables_initializer())\n sess.run([prec_op, rec_op])\n\n self.assertAlmostEqual(1.0, prec_low.eval(), places=5)\n self.assertAlmostEqual(0.0, prec_high.eval(), places=5)\n self.assertAlmostEqual(1.0, rec_low.eval(), places=5)\n self.assertAlmostEqual(0.0, rec_high.eval(), places=5)\n\n def testWeights2d(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)\n labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))\n weights = constant_op.constant(\n [[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)\n thresholds = [0.5, 1.1]\n prec, prec_op = metrics.streaming_precision_at_thresholds(\n predictions, labels, thresholds, weights=weights)\n rec, rec_op = metrics.streaming_recall_at_thresholds(\n predictions, labels, thresholds, weights=weights)\n\n prec_low = prec[0]\n prec_high = prec[1]\n rec_low = rec[0]\n rec_high = rec[1]\n\n sess.run(variables.local_variables_initializer())\n sess.run([prec_op, rec_op])\n\n self.assertAlmostEqual(1.0, prec_low.eval(), places=5)\n self.assertAlmostEqual(0.0, prec_high.eval(), places=5)\n self.assertAlmostEqual(1.0, rec_low.eval(), places=5)\n self.assertAlmostEqual(0.0, rec_high.eval(), places=5)\n\n def testExtremeThresholds(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))\n thresholds = [-1.0, 2.0] # lower/higher than any values\n prec, prec_op = metrics.streaming_precision_at_thresholds(\n predictions, labels, thresholds)\n rec, rec_op = metrics.streaming_recall_at_thresholds(\n predictions, labels, thresholds)\n\n prec_low = prec[0]\n prec_high = prec[1]\n rec_low = rec[0]\n rec_high = rec[1]\n\n sess.run(variables.local_variables_initializer())\n sess.run([prec_op, rec_op])\n\n self.assertAlmostEqual(0.75, prec_low.eval())\n self.assertAlmostEqual(0.0, prec_high.eval())\n self.assertAlmostEqual(1.0, rec_low.eval())\n self.assertAlmostEqual(0.0, rec_high.eval())\n\n def testZeroLabelsPredictions(self):\n with self.test_session() as sess:\n predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)\n labels = array_ops.zeros([4])\n thresholds = [0.5]\n prec, prec_op = metrics.streaming_precision_at_thresholds(\n predictions, labels, thresholds)\n rec, rec_op = metrics.streaming_recall_at_thresholds(\n predictions, labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n sess.run([prec_op, rec_op])\n\n self.assertAlmostEqual(0, prec.eval(), 6)\n self.assertAlmostEqual(0, rec.eval(), 6)\n\n def testWithMultipleUpdates(self):\n num_samples = 1000\n batch_size = 10\n num_batches = int(num_samples / batch_size)\n\n # Create the labels and data.\n labels = np.random.randint(0, 2, size=(num_samples, 1))\n noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))\n predictions = 0.4 + 0.2 * labels + noise\n predictions[predictions > 1] = 1\n predictions[predictions < 0] = 0\n thresholds = [0.3]\n\n tp = 0\n fp = 0\n fn = 0\n tn = 0\n for i in range(num_samples):\n if predictions[i] > thresholds[0]:\n if labels[i] == 1:\n tp += 1\n else:\n fp += 1\n else:\n if labels[i] == 1:\n fn += 1\n else:\n tn += 1\n epsilon = 1e-7\n expected_prec = tp / (epsilon + tp + fp)\n expected_rec = tp / (epsilon + tp + fn)\n\n labels = labels.astype(np.float32)\n predictions = predictions.astype(np.float32)\n\n with self.test_session() as sess:\n # Reshape the data so its easy to queue up:\n predictions_batches = predictions.reshape((batch_size, num_batches))\n labels_batches = labels.reshape((batch_size, num_batches))\n\n # Enqueue the data:\n predictions_queue = data_flow_ops.FIFOQueue(\n num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))\n labels_queue = data_flow_ops.FIFOQueue(\n num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))\n\n for i in range(int(num_batches)):\n tf_prediction = constant_op.constant(predictions_batches[:, i])\n tf_label = constant_op.constant(labels_batches[:, i])\n sess.run([\n predictions_queue.enqueue(tf_prediction),\n labels_queue.enqueue(tf_label)\n ])\n\n tf_predictions = predictions_queue.dequeue()\n tf_labels = labels_queue.dequeue()\n\n prec, prec_op = metrics.streaming_precision_at_thresholds(\n tf_predictions, tf_labels, thresholds)\n rec, rec_op = metrics.streaming_recall_at_thresholds(\n tf_predictions, tf_labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(int(num_samples / batch_size)):\n sess.run([prec_op, rec_op])\n # Since this is only approximate, we can't expect a 6 digits match.\n # Although with higher number of samples/thresholds we should see the\n # accuracy improving\n self.assertAlmostEqual(expected_prec, prec.eval(), 2)\n self.assertAlmostEqual(expected_rec, rec.eval(), 2)\n\n\nclass StreamingFPRThresholdsTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_false_positive_rate_at_thresholds(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n thresholds=[0, 0.5, 1.0])\n _assert_metric_variables(self, (\n 'false_positive_rate_at_thresholds/false_positives:0',\n 'false_positive_rate_at_thresholds/true_negatives:0',\n ))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n fpr, _ = metrics.streaming_false_positive_rate_at_thresholds(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n thresholds=[0, 0.5, 1.0],\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [fpr])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_false_positive_rate_at_thresholds(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n thresholds=[0, 0.5, 1.0],\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)\n thresholds = [0, 0.5, 1.0]\n fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(\n predictions, labels, thresholds)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(fpr_op)\n\n # Then verify idempotency.\n initial_fpr = fpr.eval()\n for _ in range(10):\n self.assertAllClose(initial_fpr, fpr.eval())\n\n def testAllCorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n with self.test_session() as sess:\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(inputs)\n thresholds = [0.5]\n fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(\n predictions, labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n sess.run(fpr_op)\n\n self.assertEqual(0, fpr.eval())\n\n def testSomeCorrect(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))\n thresholds = [0.5]\n fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(\n predictions, labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n sess.run(fpr_op)\n\n self.assertAlmostEqual(0.5, fpr.eval())\n\n def testAllIncorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n with self.test_session() as sess:\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)\n thresholds = [0.5]\n fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(\n predictions, labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n sess.run(fpr_op)\n\n self.assertAlmostEqual(1, fpr.eval())\n\n def testWeights1d(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)\n labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))\n weights = constant_op.constant(\n [[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)\n thresholds = [0.5, 1.1]\n fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(\n predictions, labels, thresholds, weights=weights)\n\n fpr_low = fpr[0]\n fpr_high = fpr[1]\n\n sess.run(variables.local_variables_initializer())\n sess.run(fpr_op)\n\n self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)\n self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)\n\n def testWeights2d(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)\n labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))\n weights = constant_op.constant(\n [[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)\n thresholds = [0.5, 1.1]\n fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(\n predictions, labels, thresholds, weights=weights)\n\n fpr_low = fpr[0]\n fpr_high = fpr[1]\n\n sess.run(variables.local_variables_initializer())\n sess.run(fpr_op)\n\n self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)\n self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)\n\n def testExtremeThresholds(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))\n thresholds = [-1.0, 2.0] # lower/higher than any values\n fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(\n predictions, labels, thresholds)\n\n fpr_low = fpr[0]\n fpr_high = fpr[1]\n\n sess.run(variables.local_variables_initializer())\n sess.run(fpr_op)\n\n self.assertAlmostEqual(1.0, fpr_low.eval(), places=5)\n self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)\n\n def testZeroLabelsPredictions(self):\n with self.test_session() as sess:\n predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)\n labels = array_ops.zeros([4])\n thresholds = [0.5]\n fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(\n predictions, labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n sess.run(fpr_op)\n\n self.assertAlmostEqual(0, fpr.eval(), 6)\n\n def testWithMultipleUpdates(self):\n num_samples = 1000\n batch_size = 10\n num_batches = int(num_samples / batch_size)\n\n # Create the labels and data.\n labels = np.random.randint(0, 2, size=(num_samples, 1))\n noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))\n predictions = 0.4 + 0.2 * labels + noise\n predictions[predictions > 1] = 1\n predictions[predictions < 0] = 0\n thresholds = [0.3]\n\n fp = 0\n tn = 0\n for i in range(num_samples):\n if predictions[i] > thresholds[0]:\n if labels[i] == 0:\n fp += 1\n else:\n if labels[i] == 0:\n tn += 1\n epsilon = 1e-7\n expected_fpr = fp / (epsilon + fp + tn)\n\n labels = labels.astype(np.float32)\n predictions = predictions.astype(np.float32)\n\n with self.test_session() as sess:\n # Reshape the data so its easy to queue up:\n predictions_batches = predictions.reshape((batch_size, num_batches))\n labels_batches = labels.reshape((batch_size, num_batches))\n\n # Enqueue the data:\n predictions_queue = data_flow_ops.FIFOQueue(\n num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))\n labels_queue = data_flow_ops.FIFOQueue(\n num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))\n\n for i in range(int(num_batches)):\n tf_prediction = constant_op.constant(predictions_batches[:, i])\n tf_label = constant_op.constant(labels_batches[:, i])\n sess.run([\n predictions_queue.enqueue(tf_prediction),\n labels_queue.enqueue(tf_label)\n ])\n\n tf_predictions = predictions_queue.dequeue()\n tf_labels = labels_queue.dequeue()\n\n fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(\n tf_predictions, tf_labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(int(num_samples / batch_size)):\n sess.run(fpr_op)\n # Since this is only approximate, we can't expect a 6 digits match.\n # Although with higher number of samples/thresholds we should see the\n # accuracy improving\n self.assertAlmostEqual(expected_fpr, fpr.eval(), 2)\n\n\nclass RecallAtPrecisionTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.recall_at_precision(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n precision=0.7)\n _assert_metric_variables(self, ('recall_at_precision/true_positives:0',\n 'recall_at_precision/false_negatives:0',\n 'recall_at_precision/false_positives:0',\n 'recall_at_precision/true_negatives:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.recall_at_precision(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n precision=0.7,\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.recall_at_precision(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n precision=0.7,\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)\n recall, update_op = metrics.recall_at_precision(\n labels, predictions, precision=0.7)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_recall = recall.eval()\n for _ in range(10):\n self.assertAlmostEqual(initial_recall, recall.eval(), 5)\n\n def testAllCorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(inputs)\n recall, update_op = metrics.recall_at_precision(\n labels, predictions, precision=1.0)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(1, sess.run(update_op))\n self.assertEqual(1, recall.eval())\n\n def testSomeCorrectHighPrecision(self):\n predictions_values = [1, .9, .8, .7, .6, .5, .4, .3]\n labels_values = [1, 1, 1, 1, 0, 0, 0, 1]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels_values)\n recall, update_op = metrics.recall_at_precision(\n labels, predictions, precision=0.8)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.8, sess.run(update_op))\n self.assertAlmostEqual(0.8, recall.eval())\n\n def testSomeCorrectLowPrecision(self):\n predictions_values = [1, .9, .8, .7, .6, .5, .4, .3, .2, .1]\n labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels_values)\n recall, update_op = metrics.recall_at_precision(\n labels, predictions, precision=0.4)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n target_recall = 2.0 / 3.0\n self.assertAlmostEqual(target_recall, sess.run(update_op))\n self.assertAlmostEqual(target_recall, recall.eval())\n\n def testWeighted(self):\n predictions_values = [1, .9, .8, .7, .6]\n labels_values = [1, 1, 0, 0, 1]\n weights_values = [1, 1, 3, 4, 1]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels_values)\n weights = constant_op.constant(weights_values)\n recall, update_op = metrics.recall_at_precision(\n labels, predictions, weights=weights, precision=0.4)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n target_recall = 2.0 / 3.0\n self.assertAlmostEqual(target_recall, sess.run(update_op))\n self.assertAlmostEqual(target_recall, recall.eval())\n\n\nclass PrecisionAtRecallTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.precision_at_recall(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n target_recall=0.7)\n _assert_metric_variables(self,\n ('precision_at_recall/true_positives:0',\n 'precision_at_recall/false_negatives:0',\n 'precision_at_recall/false_positives:0',\n 'precision_at_recall/true_negatives:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.precision_at_recall(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n target_recall=0.7,\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.precision_at_recall(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n target_recall=0.7,\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)\n precision, update_op = metrics.precision_at_recall(\n labels, predictions, target_recall=0.7)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_precision = precision.eval()\n for _ in range(10):\n self.assertAlmostEqual(initial_precision, precision.eval(), places=5)\n\n def testAllCorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(inputs)\n precision, update_op = metrics.precision_at_recall(\n labels, predictions, target_recall=0.7)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(1, sess.run(update_op))\n self.assertEqual(1, precision.eval())\n\n def testAllIncorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = 1.0 - predictions\n label_prior = math_ops.reduce_mean(labels)\n precision, update_op = metrics.precision_at_recall(\n labels, predictions, target_recall=0.2)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(sess.run(label_prior), sess.run(update_op))\n self.assertEqual(sess.run(label_prior), precision.eval())\n\n def testSomeCorrectHighRecall(self):\n predictions_values = [0.1, 0.2, 0.5, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]\n labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels_values)\n precision, update_op = metrics.precision_at_recall(\n labels, predictions, target_recall=0.8)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(0.8, sess.run(update_op))\n self.assertAlmostEqual(0.8, precision.eval())\n\n def testSomeCorrectLowRecall(self):\n predictions_values = [0.1, 0.2, 0.7, 0.3, 0.0, 0.1, 0.45, 0.5, 0.6, 0.9]\n labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels_values)\n precision, update_op = metrics.precision_at_recall(\n labels, predictions, target_recall=0.4)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(2.0/3, sess.run(update_op))\n self.assertAlmostEqual(2.0/3, precision.eval())\n\n def testWeighted_multipleLabelDtypes(self):\n for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):\n predictions_values = [\n 0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.22, 0.25, 0.31, 0.35]\n labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n predictions = constant_op.constant(\n predictions_values, dtype=dtypes_lib.float32)\n labels = math_ops.cast(labels_values, dtype=label_dtype)\n weights = constant_op.constant(weights_values)\n precision, update_op = metrics.precision_at_recall(\n labels, predictions, target_recall=0.8, weights=weights)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(34.0/43, sess.run(update_op))\n self.assertAlmostEqual(34.0/43, precision.eval())\n\n\nclass StreamingFNRThresholdsTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_false_negative_rate_at_thresholds(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n thresholds=[0, 0.5, 1.0])\n _assert_metric_variables(self, (\n 'false_negative_rate_at_thresholds/false_negatives:0',\n 'false_negative_rate_at_thresholds/true_positives:0',\n ))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n fnr, _ = metrics.streaming_false_negative_rate_at_thresholds(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n thresholds=[0, 0.5, 1.0],\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [fnr])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_false_negative_rate_at_thresholds(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n thresholds=[0, 0.5, 1.0],\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)\n labels = random_ops.random_uniform(\n (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)\n thresholds = [0, 0.5, 1.0]\n fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(\n predictions, labels, thresholds)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(fnr_op)\n\n # Then verify idempotency.\n initial_fnr = fnr.eval()\n for _ in range(10):\n self.assertAllClose(initial_fnr, fnr.eval())\n\n def testAllCorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n with self.test_session() as sess:\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(inputs)\n thresholds = [0.5]\n fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(\n predictions, labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n sess.run(fnr_op)\n\n self.assertEqual(0, fnr.eval())\n\n def testSomeCorrect(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))\n thresholds = [0.5]\n fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(\n predictions, labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n sess.run(fnr_op)\n\n self.assertAlmostEqual(0.5, fnr.eval())\n\n def testAllIncorrect(self):\n inputs = np.random.randint(0, 2, size=(100, 1))\n\n with self.test_session() as sess:\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)\n thresholds = [0.5]\n fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(\n predictions, labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n sess.run(fnr_op)\n\n self.assertAlmostEqual(1, fnr.eval())\n\n def testWeights1d(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)\n labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))\n weights = constant_op.constant(\n [[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)\n thresholds = [0.5, 1.1]\n fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(\n predictions, labels, thresholds, weights=weights)\n\n fnr_low = fnr[0]\n fnr_high = fnr[1]\n\n sess.run(variables.local_variables_initializer())\n sess.run(fnr_op)\n\n self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)\n self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)\n\n def testWeights2d(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)\n labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))\n weights = constant_op.constant(\n [[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)\n thresholds = [0.5, 1.1]\n fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(\n predictions, labels, thresholds, weights=weights)\n\n fnr_low = fnr[0]\n fnr_high = fnr[1]\n\n sess.run(variables.local_variables_initializer())\n sess.run(fnr_op)\n\n self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)\n self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)\n\n def testExtremeThresholds(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))\n thresholds = [-1.0, 2.0] # lower/higher than any values\n fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(\n predictions, labels, thresholds)\n\n fnr_low = fnr[0]\n fnr_high = fnr[1]\n\n sess.run(variables.local_variables_initializer())\n sess.run(fnr_op)\n\n self.assertAlmostEqual(0.0, fnr_low.eval())\n self.assertAlmostEqual(1.0, fnr_high.eval())\n\n def testZeroLabelsPredictions(self):\n with self.test_session() as sess:\n predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)\n labels = array_ops.zeros([4])\n thresholds = [0.5]\n fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(\n predictions, labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n sess.run(fnr_op)\n\n self.assertAlmostEqual(0, fnr.eval(), 6)\n\n def testWithMultipleUpdates(self):\n num_samples = 1000\n batch_size = 10\n num_batches = int(num_samples / batch_size)\n\n # Create the labels and data.\n labels = np.random.randint(0, 2, size=(num_samples, 1))\n noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))\n predictions = 0.4 + 0.2 * labels + noise\n predictions[predictions > 1] = 1\n predictions[predictions < 0] = 0\n thresholds = [0.3]\n\n fn = 0\n tp = 0\n for i in range(num_samples):\n if predictions[i] > thresholds[0]:\n if labels[i] == 1:\n tp += 1\n else:\n if labels[i] == 1:\n fn += 1\n epsilon = 1e-7\n expected_fnr = fn / (epsilon + fn + tp)\n\n labels = labels.astype(np.float32)\n predictions = predictions.astype(np.float32)\n\n with self.test_session() as sess:\n # Reshape the data so its easy to queue up:\n predictions_batches = predictions.reshape((batch_size, num_batches))\n labels_batches = labels.reshape((batch_size, num_batches))\n\n # Enqueue the data:\n predictions_queue = data_flow_ops.FIFOQueue(\n num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))\n labels_queue = data_flow_ops.FIFOQueue(\n num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))\n\n for i in range(int(num_batches)):\n tf_prediction = constant_op.constant(predictions_batches[:, i])\n tf_label = constant_op.constant(labels_batches[:, i])\n sess.run([\n predictions_queue.enqueue(tf_prediction),\n labels_queue.enqueue(tf_label)\n ])\n\n tf_predictions = predictions_queue.dequeue()\n tf_labels = labels_queue.dequeue()\n\n fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(\n tf_predictions, tf_labels, thresholds)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(int(num_samples / batch_size)):\n sess.run(fnr_op)\n # Since this is only approximate, we can't expect a 6 digits match.\n # Although with higher number of samples/thresholds we should see the\n # accuracy improving\n self.assertAlmostEqual(expected_fnr, fnr.eval(), 2)\n\n\n# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.\n# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.\n# Until then, this test validates that both ops yield the same results.\nclass StreamingRecallAtKTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n self._batch_size = 4\n self._num_classes = 3\n self._np_predictions = np.matrix(('0.1 0.2 0.7;'\n '0.6 0.2 0.2;'\n '0.0 0.9 0.1;'\n '0.2 0.0 0.8'))\n self._np_labels = [0, 0, 0, 0]\n\n def testVars(self):\n metrics.streaming_recall_at_k(\n predictions=array_ops.ones((self._batch_size, self._num_classes)),\n labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),\n k=1)\n _assert_metric_variables(self,\n ('recall_at_1/count:0', 'recall_at_1/total:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_recall_at_k(\n predictions=array_ops.ones((self._batch_size, self._num_classes)),\n labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),\n k=1,\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_recall_at_k(\n predictions=array_ops.ones((self._batch_size, self._num_classes)),\n labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),\n k=1,\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testSingleUpdateKIs1(self):\n predictions = constant_op.constant(\n self._np_predictions,\n shape=(self._batch_size, self._num_classes),\n dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)\n recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)\n sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(\n predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0.25, sess.run(update_op))\n self.assertEqual(0.25, recall.eval())\n self.assertEqual(0.25, sess.run(sp_update_op))\n self.assertEqual(0.25, sp_recall.eval())\n\n def testSingleUpdateKIs2(self):\n predictions = constant_op.constant(\n self._np_predictions,\n shape=(self._batch_size, self._num_classes),\n dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)\n recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)\n sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(\n predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0.5, sess.run(update_op))\n self.assertEqual(0.5, recall.eval())\n self.assertEqual(0.5, sess.run(sp_update_op))\n self.assertEqual(0.5, sp_recall.eval())\n\n def testSingleUpdateKIs3(self):\n predictions = constant_op.constant(\n self._np_predictions,\n shape=(self._batch_size, self._num_classes),\n dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)\n recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)\n sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(\n predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(1.0, sess.run(update_op))\n self.assertEqual(1.0, recall.eval())\n self.assertEqual(1.0, sess.run(sp_update_op))\n self.assertEqual(1.0, sp_recall.eval())\n\n def testSingleUpdateSomeMissingKIs2(self):\n predictions = constant_op.constant(\n self._np_predictions,\n shape=(self._batch_size, self._num_classes),\n dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)\n weights = constant_op.constant(\n [0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)\n recall, update_op = metrics.streaming_recall_at_k(\n predictions, labels, k=2, weights=weights)\n sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(\n predictions,\n array_ops.reshape(labels, (self._batch_size, 1)),\n k=2,\n weights=weights)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(1.0, sess.run(update_op))\n self.assertEqual(1.0, recall.eval())\n self.assertEqual(1.0, sess.run(sp_update_op))\n self.assertEqual(1.0, sp_recall.eval())\n\n\nclass StreamingSparsePrecisionTest(test.TestCase):\n\n def _test_streaming_sparse_precision_at_k(self,\n predictions,\n labels,\n k,\n expected,\n class_id=None,\n weights=None):\n with ops.Graph().as_default() as g, self.session(g):\n if weights is not None:\n weights = constant_op.constant(weights, dtypes_lib.float32)\n metric, update = metrics.streaming_sparse_precision_at_k(\n predictions=constant_op.constant(predictions, dtypes_lib.float32),\n labels=labels,\n k=k,\n class_id=class_id,\n weights=weights)\n\n # Fails without initialized vars.\n self.assertRaises(errors_impl.OpError, metric.eval)\n self.assertRaises(errors_impl.OpError, update.eval)\n variables.variables_initializer(variables.local_variables()).run()\n\n # Run per-step op and assert expected values.\n if math.isnan(expected):\n _assert_nan(self, update.eval())\n _assert_nan(self, metric.eval())\n else:\n self.assertEqual(expected, update.eval())\n self.assertEqual(expected, metric.eval())\n\n def _test_streaming_sparse_precision_at_top_k(self,\n top_k_predictions,\n labels,\n expected,\n class_id=None,\n weights=None):\n with ops.Graph().as_default() as g, self.session(g):\n if weights is not None:\n weights = constant_op.constant(weights, dtypes_lib.float32)\n metric, update = metrics.streaming_sparse_precision_at_top_k(\n top_k_predictions=constant_op.constant(top_k_predictions,\n dtypes_lib.int32),\n labels=labels,\n class_id=class_id,\n weights=weights)\n\n # Fails without initialized vars.\n self.assertRaises(errors_impl.OpError, metric.eval)\n self.assertRaises(errors_impl.OpError, update.eval)\n variables.variables_initializer(variables.local_variables()).run()\n\n # Run per-step op and assert expected values.\n if math.isnan(expected):\n self.assertTrue(math.isnan(update.eval()))\n self.assertTrue(math.isnan(metric.eval()))\n else:\n self.assertEqual(expected, update.eval())\n self.assertEqual(expected, metric.eval())\n\n def _test_streaming_sparse_average_precision_at_k(self,\n predictions,\n labels,\n k,\n expected,\n weights=None):\n with ops.Graph().as_default() as g, self.session(g):\n if weights is not None:\n weights = constant_op.constant(weights, dtypes_lib.float32)\n predictions = constant_op.constant(predictions, dtypes_lib.float32)\n metric, update = metrics.streaming_sparse_average_precision_at_k(\n predictions, labels, k, weights=weights)\n\n # Fails without initialized vars.\n self.assertRaises(errors_impl.OpError, metric.eval)\n self.assertRaises(errors_impl.OpError, update.eval)\n local_variables = variables.local_variables()\n variables.variables_initializer(local_variables).run()\n\n # Run per-step op and assert expected values.\n if math.isnan(expected):\n _assert_nan(self, update.eval())\n _assert_nan(self, metric.eval())\n else:\n self.assertAlmostEqual(expected, update.eval())\n self.assertAlmostEqual(expected, metric.eval())\n\n def _test_streaming_sparse_average_precision_at_top_k(self,\n top_k_predictions,\n labels,\n expected,\n weights=None):\n with ops.Graph().as_default() as g, self.session(g):\n if weights is not None:\n weights = constant_op.constant(weights, dtypes_lib.float32)\n metric, update = metrics.streaming_sparse_average_precision_at_top_k(\n top_k_predictions, labels, weights=weights)\n\n # Fails without initialized vars.\n self.assertRaises(errors_impl.OpError, metric.eval)\n self.assertRaises(errors_impl.OpError, update.eval)\n local_variables = variables.local_variables()\n variables.variables_initializer(local_variables).run()\n\n # Run per-step op and assert expected values.\n if math.isnan(expected):\n _assert_nan(self, update.eval())\n _assert_nan(self, metric.eval())\n else:\n self.assertAlmostEqual(expected, update.eval())\n self.assertAlmostEqual(expected, metric.eval())\n\n def test_top_k_rank_invalid(self):\n with self.test_session():\n # top_k_predictions has rank < 2.\n top_k_predictions = [9, 4, 6, 2, 0]\n sp_labels = sparse_tensor.SparseTensorValue(\n indices=np.array([[\n 0,\n ], [\n 1,\n ], [\n 2,\n ]], np.int64),\n values=np.array([2, 7, 8], np.int64),\n dense_shape=np.array([\n 10,\n ], np.int64))\n\n with self.assertRaises(ValueError):\n precision, _ = metrics.streaming_sparse_precision_at_top_k(\n top_k_predictions=constant_op.constant(top_k_predictions,\n dtypes_lib.int64),\n labels=sp_labels)\n variables.variables_initializer(variables.local_variables()).run()\n precision.eval()\n\n def test_average_precision(self):\n # Example 1.\n # Matches example here:\n # fastml.com/what-you-wanted-to-know-about-mean-average-precision\n labels_ex1 = (0, 1, 2, 3, 4)\n labels = np.array([labels_ex1], dtype=np.int64)\n predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)\n predictions = (predictions_ex1,)\n predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)\n precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)\n avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,\n (precision_ex1[1] + precision_ex1[3]) / 4)\n for i in xrange(4):\n k = i + 1\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k, expected=precision_ex1[i])\n self._test_streaming_sparse_precision_at_top_k(\n (predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])\n self._test_streaming_sparse_average_precision_at_k(\n predictions, labels, k, expected=avg_precision_ex1[i])\n self._test_streaming_sparse_average_precision_at_top_k(\n (predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])\n\n # Example 2.\n labels_ex2 = (0, 2, 4, 5, 6)\n labels = np.array([labels_ex2], dtype=np.int64)\n predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)\n predictions = (predictions_ex2,)\n predictions_top_k_ex2 = (1, 3, 0, 6, 5)\n precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)\n avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,\n (precision_ex2[2] + precision_ex2[3]) / 4)\n for i in xrange(4):\n k = i + 1\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k, expected=precision_ex2[i])\n self._test_streaming_sparse_precision_at_top_k(\n (predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])\n self._test_streaming_sparse_average_precision_at_k(\n predictions, labels, k, expected=avg_precision_ex2[i])\n self._test_streaming_sparse_average_precision_at_top_k(\n (predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])\n\n # Both examples, we expect both precision and average precision to be the\n # average of the 2 examples.\n labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)\n predictions = (predictions_ex1, predictions_ex2)\n streaming_precision = [\n (ex1 + ex2) / 2 for ex1, ex2 in zip(precision_ex1, precision_ex2)\n ]\n streaming_average_precision = [\n (ex1 + ex2) / 2\n for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)\n ]\n for i in xrange(4):\n k = i + 1\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k, expected=streaming_precision[i])\n predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])\n self._test_streaming_sparse_precision_at_top_k(\n predictions_top_k, labels, expected=streaming_precision[i])\n self._test_streaming_sparse_average_precision_at_k(\n predictions, labels, k, expected=streaming_average_precision[i])\n self._test_streaming_sparse_average_precision_at_top_k(\n predictions_top_k, labels, expected=streaming_average_precision[i])\n\n # Weighted examples, we expect streaming average precision to be the\n # weighted average of the 2 examples.\n weights = (0.3, 0.6)\n streaming_average_precision = [\n (weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])\n for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)\n ]\n for i in xrange(4):\n k = i + 1\n self._test_streaming_sparse_average_precision_at_k(\n predictions,\n labels,\n k,\n expected=streaming_average_precision[i],\n weights=weights)\n self._test_streaming_sparse_average_precision_at_top_k(\n (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),\n labels,\n expected=streaming_average_precision[i],\n weights=weights)\n\n def test_average_precision_some_labels_out_of_range(self):\n \"\"\"Tests that labels outside the [0, n_classes) range are ignored.\"\"\"\n labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)\n labels = np.array([labels_ex1], dtype=np.int64)\n predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)\n predictions = (predictions_ex1,)\n predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)\n precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)\n avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,\n (precision_ex1[1] + precision_ex1[3]) / 4)\n for i in xrange(4):\n k = i + 1\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k, expected=precision_ex1[i])\n self._test_streaming_sparse_precision_at_top_k(\n (predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])\n self._test_streaming_sparse_average_precision_at_k(\n predictions, labels, k, expected=avg_precision_ex1[i])\n self._test_streaming_sparse_average_precision_at_top_k(\n (predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])\n\n def test_average_precision_at_top_k_static_shape_check(self):\n predictions_top_k = array_ops.placeholder(\n shape=(2, None), dtype=dtypes_lib.int64)\n labels = np.array(((1,), (2,)), dtype=np.int64)\n # Fails due to non-static predictions_idx shape.\n with self.assertRaises(ValueError):\n metric_ops.streaming_sparse_average_precision_at_top_k(\n predictions_top_k, labels)\n\n predictions_top_k = (2, 1)\n # Fails since rank of predictions_idx is less than one.\n with self.assertRaises(ValueError):\n metric_ops.streaming_sparse_average_precision_at_top_k(\n predictions_top_k, labels)\n predictions_top_k = ((2,), (1,))\n # Valid static shape.\n metric_ops.streaming_sparse_average_precision_at_top_k(\n predictions_top_k, labels)\n\n def test_one_label_at_k1_nan(self):\n predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]\n top_k_predictions = [[3], [3]]\n sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],\n [0, 0, 1, 0]])\n dense_labels = np.array([[3], [2]], dtype=np.int64)\n\n for labels in (sparse_labels, dense_labels):\n # Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.\n for class_id in (-1, 0, 1, 2, 4):\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=1, expected=NAN, class_id=class_id)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=NAN, class_id=class_id)\n\n def test_one_label_at_k1(self):\n predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]\n top_k_predictions = [[3], [3]]\n sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],\n [0, 0, 1, 0]])\n dense_labels = np.array([[3], [2]], dtype=np.int64)\n\n for labels in (sparse_labels, dense_labels):\n # Class 3: 1 label, 2 predictions, 1 correct.\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=1, expected=1.0 / 2, class_id=3)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=1.0 / 2, class_id=3)\n\n # All classes: 2 labels, 2 predictions, 1 correct.\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=1, expected=1.0 / 2)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=1.0 / 2)\n\n def test_three_labels_at_k5_no_predictions(self):\n predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]\n top_k_predictions = [\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ]\n sparse_labels = _binary_2d_label_to_sparse_value(\n [[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])\n dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)\n\n for labels in (sparse_labels, dense_labels):\n # Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.\n for class_id in (-1, 1, 3, 8, 10):\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=NAN, class_id=class_id)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=NAN, class_id=class_id)\n\n def test_three_labels_at_k5_no_labels(self):\n predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]\n top_k_predictions = [\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ]\n sparse_labels = _binary_2d_label_to_sparse_value(\n [[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])\n dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)\n\n for labels in (sparse_labels, dense_labels):\n # Classes 0,4,6,9: 0 labels, >=1 prediction.\n for class_id in (0, 4, 6, 9):\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=0.0, class_id=class_id)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=0.0, class_id=class_id)\n\n def test_three_labels_at_k5(self):\n predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]\n top_k_predictions = [\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ]\n sparse_labels = _binary_2d_label_to_sparse_value(\n [[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])\n dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)\n\n for labels in (sparse_labels, dense_labels):\n # Class 2: 2 labels, 2 correct predictions.\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=2.0 / 2, class_id=2)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=2.0 / 2, class_id=2)\n\n # Class 5: 1 label, 1 correct prediction.\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=1.0 / 1, class_id=5)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=1.0 / 1, class_id=5)\n\n # Class 7: 1 label, 1 incorrect prediction.\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=0.0 / 1, class_id=7)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=0.0 / 1, class_id=7)\n\n # All classes: 10 predictions, 3 correct.\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=3.0 / 10)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=3.0 / 10)\n\n def test_three_labels_at_k5_some_out_of_range(self):\n \"\"\"Tests that labels outside the [0, n_classes) range are ignored.\"\"\"\n predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]\n top_k_predictions = [\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ]\n sp_labels = sparse_tensor.SparseTensorValue(\n indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,\n 3]],\n # values -1 and 10 are outside the [0, n_classes) range and are ignored.\n values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),\n dense_shape=[2, 4])\n\n # Class 2: 2 labels, 2 correct predictions.\n self._test_streaming_sparse_precision_at_k(\n predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)\n\n # Class 5: 1 label, 1 correct prediction.\n self._test_streaming_sparse_precision_at_k(\n predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)\n\n # Class 7: 1 label, 1 incorrect prediction.\n self._test_streaming_sparse_precision_at_k(\n predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)\n\n # All classes: 10 predictions, 3 correct.\n self._test_streaming_sparse_precision_at_k(\n predictions, sp_labels, k=5, expected=3.0 / 10)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, sp_labels, expected=3.0 / 10)\n\n def test_3d_nan(self):\n predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],\n [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],\n [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]\n top_k_predictions = [[\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ], [\n [5, 7, 2, 9, 6],\n [9, 4, 6, 2, 0],\n ]]\n labels = _binary_3d_label_to_sparse_value(\n [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],\n [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])\n\n # Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.\n for class_id in (-1, 1, 3, 8, 10):\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=NAN, class_id=class_id)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=NAN, class_id=class_id)\n\n def test_3d_no_labels(self):\n predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],\n [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],\n [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]\n top_k_predictions = [[\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ], [\n [5, 7, 2, 9, 6],\n [9, 4, 6, 2, 0],\n ]]\n labels = _binary_3d_label_to_sparse_value(\n [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],\n [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])\n\n # Classes 0,4,6,9: 0 labels, >=1 prediction.\n for class_id in (0, 4, 6, 9):\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=0.0, class_id=class_id)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=0.0, class_id=class_id)\n\n def test_3d(self):\n predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],\n [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],\n [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]\n top_k_predictions = [[\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ], [\n [5, 7, 2, 9, 6],\n [9, 4, 6, 2, 0],\n ]]\n labels = _binary_3d_label_to_sparse_value(\n [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],\n [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])\n\n # Class 2: 4 predictions, all correct.\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=4.0 / 4, class_id=2)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=4.0 / 4, class_id=2)\n\n # Class 5: 2 predictions, both correct.\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=2.0 / 2, class_id=5)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=2.0 / 2, class_id=5)\n\n # Class 7: 2 predictions, 1 correct.\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=1.0 / 2, class_id=7)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=1.0 / 2, class_id=7)\n\n # All classes: 20 predictions, 7 correct.\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=7.0 / 20)\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=7.0 / 20)\n\n def test_3d_ignore_all(self):\n predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],\n [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],\n [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]\n top_k_predictions = [[\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ], [\n [5, 7, 2, 9, 6],\n [9, 4, 6, 2, 0],\n ]]\n labels = _binary_3d_label_to_sparse_value(\n [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],\n [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])\n\n for class_id in xrange(10):\n self._test_streaming_sparse_precision_at_k(\n predictions,\n labels,\n k=5,\n expected=NAN,\n class_id=class_id,\n weights=[[0], [0]])\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions,\n labels,\n expected=NAN,\n class_id=class_id,\n weights=[[0], [0]])\n self._test_streaming_sparse_precision_at_k(\n predictions,\n labels,\n k=5,\n expected=NAN,\n class_id=class_id,\n weights=[[0, 0], [0, 0]])\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions,\n labels,\n expected=NAN,\n class_id=class_id,\n weights=[[0, 0], [0, 0]])\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=NAN, weights=[[0], [0]])\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=NAN, weights=[[0], [0]])\n self._test_streaming_sparse_precision_at_k(\n predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])\n\n def test_3d_ignore_some(self):\n predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],\n [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],\n [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]\n top_k_predictions = [[\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ], [\n [5, 7, 2, 9, 6],\n [9, 4, 6, 2, 0],\n ]]\n labels = _binary_3d_label_to_sparse_value(\n [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],\n [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])\n\n # Class 2: 2 predictions, both correct.\n self._test_streaming_sparse_precision_at_k(\n predictions,\n labels,\n k=5,\n expected=2.0 / 2.0,\n class_id=2,\n weights=[[1], [0]])\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions,\n labels,\n expected=2.0 / 2.0,\n class_id=2,\n weights=[[1], [0]])\n\n # Class 2: 2 predictions, both correct.\n self._test_streaming_sparse_precision_at_k(\n predictions,\n labels,\n k=5,\n expected=2.0 / 2.0,\n class_id=2,\n weights=[[0], [1]])\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions,\n labels,\n expected=2.0 / 2.0,\n class_id=2,\n weights=[[0], [1]])\n\n # Class 7: 1 incorrect prediction.\n self._test_streaming_sparse_precision_at_k(\n predictions,\n labels,\n k=5,\n expected=0.0 / 1.0,\n class_id=7,\n weights=[[1], [0]])\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions,\n labels,\n expected=0.0 / 1.0,\n class_id=7,\n weights=[[1], [0]])\n\n # Class 7: 1 correct prediction.\n self._test_streaming_sparse_precision_at_k(\n predictions,\n labels,\n k=5,\n expected=1.0 / 1.0,\n class_id=7,\n weights=[[0], [1]])\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions,\n labels,\n expected=1.0 / 1.0,\n class_id=7,\n weights=[[0], [1]])\n\n # Class 7: no predictions.\n self._test_streaming_sparse_precision_at_k(\n predictions,\n labels,\n k=5,\n expected=NAN,\n class_id=7,\n weights=[[1, 0], [0, 1]])\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions,\n labels,\n expected=NAN,\n class_id=7,\n weights=[[1, 0], [0, 1]])\n\n # Class 7: 2 predictions, 1 correct.\n self._test_streaming_sparse_precision_at_k(\n predictions,\n labels,\n k=5,\n expected=1.0 / 2.0,\n class_id=7,\n weights=[[0, 1], [1, 0]])\n self._test_streaming_sparse_precision_at_top_k(\n top_k_predictions,\n labels,\n expected=1.0 / 2.0,\n class_id=7,\n weights=[[0, 1], [1, 0]])\n\n def test_sparse_tensor_value(self):\n predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]\n labels = [[0, 0, 0, 1], [0, 0, 1, 0]]\n expected_precision = 0.5\n with self.test_session():\n _, precision = metrics.streaming_sparse_precision_at_k(\n predictions=constant_op.constant(predictions, dtypes_lib.float32),\n labels=_binary_2d_label_to_sparse_value(labels),\n k=1)\n\n variables.variables_initializer(variables.local_variables()).run()\n\n self.assertEqual(expected_precision, precision.eval())\n\n\nclass StreamingSparseRecallTest(test.TestCase):\n\n def _test_streaming_sparse_recall_at_k(self,\n predictions,\n labels,\n k,\n expected,\n class_id=None,\n weights=None):\n with ops.Graph().as_default() as g, self.session(g):\n if weights is not None:\n weights = constant_op.constant(weights, dtypes_lib.float32)\n metric, update = metrics.streaming_sparse_recall_at_k(\n predictions=constant_op.constant(predictions, dtypes_lib.float32),\n labels=labels,\n k=k,\n class_id=class_id,\n weights=weights)\n\n # Fails without initialized vars.\n self.assertRaises(errors_impl.OpError, metric.eval)\n self.assertRaises(errors_impl.OpError, update.eval)\n variables.variables_initializer(variables.local_variables()).run()\n\n # Run per-step op and assert expected values.\n if math.isnan(expected):\n _assert_nan(self, update.eval())\n _assert_nan(self, metric.eval())\n else:\n self.assertEqual(expected, update.eval())\n self.assertEqual(expected, metric.eval())\n\n def _test_sparse_recall_at_top_k(self,\n labels,\n top_k_predictions,\n expected,\n class_id=None,\n weights=None):\n with ops.Graph().as_default() as g, self.session(g):\n if weights is not None:\n weights = constant_op.constant(weights, dtypes_lib.float32)\n metric, update = metric_ops.sparse_recall_at_top_k(\n labels=labels,\n top_k_predictions=constant_op.constant(top_k_predictions,\n dtypes_lib.int32),\n class_id=class_id,\n weights=weights)\n\n # Fails without initialized vars.\n self.assertRaises(errors_impl.OpError, metric.eval)\n self.assertRaises(errors_impl.OpError, update.eval)\n variables.variables_initializer(variables.local_variables()).run()\n\n # Run per-step op and assert expected values.\n if math.isnan(expected):\n self.assertTrue(math.isnan(update.eval()))\n self.assertTrue(math.isnan(metric.eval()))\n else:\n self.assertEqual(expected, update.eval())\n self.assertEqual(expected, metric.eval())\n\n def test_one_label_at_k1_nan(self):\n predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]\n top_k_predictions = [[3], [3]]\n sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],\n [0, 0, 1, 0]])\n dense_labels = np.array([[3], [2]], dtype=np.int64)\n\n # Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of\n # range.\n for labels in (sparse_labels, dense_labels):\n for class_id in (-1, 0, 1, 4):\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=NAN, class_id=class_id)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=NAN, class_id=class_id)\n\n def test_one_label_at_k1_no_predictions(self):\n predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]\n top_k_predictions = [[3], [3]]\n sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],\n [0, 0, 1, 0]])\n dense_labels = np.array([[3], [2]], dtype=np.int64)\n\n for labels in (sparse_labels, dense_labels):\n # Class 2: 0 predictions.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=0.0, class_id=2)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=0.0, class_id=2)\n\n def test_one_label_at_k1(self):\n predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]\n top_k_predictions = [[3], [3]]\n sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],\n [0, 0, 1, 0]])\n dense_labels = np.array([[3], [2]], dtype=np.int64)\n\n for labels in (sparse_labels, dense_labels):\n # Class 3: 1 label, 2 predictions, 1 correct.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=1.0 / 1, class_id=3)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=1.0 / 1, class_id=3)\n\n # All classes: 2 labels, 2 predictions, 1 correct.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=1.0 / 2)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=1.0 / 2)\n\n def _test_one_label_at_k1_weighted(self, labels):\n predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]\n top_k_predictions = [[3], [3]]\n\n # Class 3: 1 label, 2 predictions, 1 correct.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=1,\n expected=1.0 / 1,\n class_id=3,\n weights=(1.0,))\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=1.0 / 1,\n class_id=3,\n weights=(1.0,))\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=1,\n expected=1.0 / 1,\n class_id=3,\n weights=(2.0,))\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=1.0 / 1,\n class_id=3,\n weights=(2.0,))\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=1,\n expected=NAN,\n class_id=3,\n weights=(0.0, 0.0))\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=NAN,\n class_id=3,\n weights=(0.0, 0.0))\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=1,\n expected=NAN,\n class_id=3,\n weights=(0.0, 1.0))\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=NAN,\n class_id=3,\n weights=(0.0, 1.0))\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=1,\n expected=1.0 / 1,\n class_id=3,\n weights=(1.0, 0.0))\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=1.0 / 1,\n class_id=3,\n weights=(1.0, 0.0))\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=1,\n expected=1.0 / 1,\n class_id=3,\n weights=(1.0, 1.0))\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=1.0 / 1,\n class_id=3,\n weights=(1.0, 1.0))\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=1,\n expected=2.0 / 2,\n class_id=3,\n weights=(2.0, 3.0))\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=2.0 / 2,\n class_id=3,\n weights=(2.0, 3.0))\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=1,\n expected=3.0 / 3,\n class_id=3,\n weights=(3.0, 2.0))\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=3.0 / 3,\n class_id=3,\n weights=(3.0, 2.0))\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=1,\n expected=0.3 / 0.3,\n class_id=3,\n weights=(0.3, 0.6))\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=0.3 / 0.3,\n class_id=3,\n weights=(0.3, 0.6))\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=1,\n expected=0.6 / 0.6,\n class_id=3,\n weights=(0.6, 0.3))\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=0.6 / 0.6,\n class_id=3,\n weights=(0.6, 0.3))\n\n # All classes: 2 labels, 2 predictions, 1 correct.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=NAN, weights=(0.0,))\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=NAN, weights=(0.0,))\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))\n\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))\n\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))\n\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))\n\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))\n\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))\n\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))\n\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))\n\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))\n\n def test_one_label_at_k1_weighted_sparse_labels(self):\n sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],\n [0, 0, 1, 0]])\n self._test_one_label_at_k1_weighted(sparse_labels)\n\n def test_one_label_at_k1_weighted_dense_labels(self):\n dense_labels = np.array([[3], [2]], dtype=np.int64)\n self._test_one_label_at_k1_weighted(dense_labels)\n\n def test_three_labels_at_k5_nan(self):\n predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]\n top_k_predictions = [\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ]\n sparse_labels = _binary_2d_label_to_sparse_value(\n [[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])\n dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)\n\n for labels in (sparse_labels, dense_labels):\n # Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.\n for class_id in (0, 3, 4, 6, 9, 10):\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=NAN, class_id=class_id)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=NAN, class_id=class_id)\n\n def test_three_labels_at_k5_no_predictions(self):\n predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]\n top_k_predictions = [\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ]\n sparse_labels = _binary_2d_label_to_sparse_value(\n [[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])\n dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)\n\n for labels in (sparse_labels, dense_labels):\n # Class 8: 1 label, no predictions.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=0.0 / 1, class_id=8)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=0.0 / 1, class_id=8)\n\n def test_three_labels_at_k5(self):\n predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]\n top_k_predictions = [\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ]\n sparse_labels = _binary_2d_label_to_sparse_value(\n [[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])\n dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)\n\n for labels in (sparse_labels, dense_labels):\n # Class 2: 2 labels, both correct.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=2.0 / 2, class_id=2)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=2.0 / 2, class_id=2)\n\n # Class 5: 1 label, incorrect.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=1.0 / 1, class_id=5)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=1.0 / 1, class_id=5)\n\n # Class 7: 1 label, incorrect.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=0.0 / 1, class_id=7)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=0.0 / 1, class_id=7)\n\n # All classes: 6 labels, 3 correct.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=3.0 / 6)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=3.0 / 6)\n\n def test_three_labels_at_k5_some_out_of_range(self):\n \"\"\"Tests that labels outside the [0, n_classes) count in denominator.\"\"\"\n predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]\n top_k_predictions = [\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ]\n sp_labels = sparse_tensor.SparseTensorValue(\n indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,\n 3]],\n # values -1 and 10 are outside the [0, n_classes) range.\n values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),\n dense_shape=[2, 4])\n\n # Class 2: 2 labels, both correct.\n self._test_streaming_sparse_recall_at_k(\n predictions=predictions,\n labels=sp_labels,\n k=5,\n expected=2.0 / 2,\n class_id=2)\n self._test_sparse_recall_at_top_k(\n sp_labels, top_k_predictions, expected=2.0 / 2, class_id=2)\n\n # Class 5: 1 label, incorrect.\n self._test_streaming_sparse_recall_at_k(\n predictions=predictions,\n labels=sp_labels,\n k=5,\n expected=1.0 / 1,\n class_id=5)\n self._test_sparse_recall_at_top_k(\n sp_labels, top_k_predictions, expected=1.0 / 1, class_id=5)\n\n # Class 7: 1 label, incorrect.\n self._test_streaming_sparse_recall_at_k(\n predictions=predictions,\n labels=sp_labels,\n k=5,\n expected=0.0 / 1,\n class_id=7)\n self._test_sparse_recall_at_top_k(\n sp_labels, top_k_predictions, expected=0.0 / 1, class_id=7)\n\n # All classes: 8 labels, 3 correct.\n self._test_streaming_sparse_recall_at_k(\n predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)\n self._test_sparse_recall_at_top_k(\n sp_labels, top_k_predictions, expected=3.0 / 8)\n\n def test_3d_nan(self):\n predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],\n [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],\n [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]\n top_k_predictions = [[\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ], [\n [5, 7, 2, 9, 6],\n [9, 4, 6, 2, 0],\n ]]\n sparse_labels = _binary_3d_label_to_sparse_value(\n [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],\n [[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])\n dense_labels = np.array(\n [[[2, 7, 8], [1, 2, 5]], [\n [1, 2, 5],\n [2, 7, 8],\n ]], dtype=np.int64)\n\n for labels in (sparse_labels, dense_labels):\n # Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.\n for class_id in (0, 3, 4, 6, 9, 10):\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=NAN, class_id=class_id)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=NAN, class_id=class_id)\n\n def test_3d_no_predictions(self):\n predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],\n [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],\n [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]\n top_k_predictions = [[\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ], [\n [5, 7, 2, 9, 6],\n [9, 4, 6, 2, 0],\n ]]\n sparse_labels = _binary_3d_label_to_sparse_value(\n [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],\n [[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])\n dense_labels = np.array(\n [[[2, 7, 8], [1, 2, 5]], [\n [1, 2, 5],\n [2, 7, 8],\n ]], dtype=np.int64)\n\n for labels in (sparse_labels, dense_labels):\n # Classes 1,8 have 0 predictions, >=1 label.\n for class_id in (1, 8):\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=0.0, class_id=class_id)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=0.0, class_id=class_id)\n\n def test_3d(self):\n predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],\n [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],\n [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]\n top_k_predictions = [[\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ], [\n [5, 7, 2, 9, 6],\n [9, 4, 6, 2, 0],\n ]]\n labels = _binary_3d_label_to_sparse_value(\n [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],\n [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])\n\n # Class 2: 4 labels, all correct.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=4.0 / 4, class_id=2)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=4.0 / 4, class_id=2)\n\n # Class 5: 2 labels, both correct.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=2.0 / 2, class_id=5)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=2.0 / 2, class_id=5)\n\n # Class 7: 2 labels, 1 incorrect.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=1.0 / 2, class_id=7)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=1.0 / 2, class_id=7)\n\n # All classes: 12 labels, 7 correct.\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=7.0 / 12)\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=7.0 / 12)\n\n def test_3d_ignore_all(self):\n predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],\n [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],\n [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]\n top_k_predictions = [[\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ], [\n [5, 7, 2, 9, 6],\n [9, 4, 6, 2, 0],\n ]]\n labels = _binary_3d_label_to_sparse_value(\n [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],\n [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])\n\n for class_id in xrange(10):\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=5,\n expected=NAN,\n class_id=class_id,\n weights=[[0], [0]])\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=NAN,\n class_id=class_id,\n weights=[[0], [0]])\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=5,\n expected=NAN,\n class_id=class_id,\n weights=[[0, 0], [0, 0]])\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=NAN,\n class_id=class_id,\n weights=[[0, 0], [0, 0]])\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=NAN, weights=[[0], [0]])\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=NAN, weights=[[0], [0]])\n self._test_streaming_sparse_recall_at_k(\n predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])\n self._test_sparse_recall_at_top_k(\n labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])\n\n def test_3d_ignore_some(self):\n predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],\n [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],\n [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],\n [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]\n top_k_predictions = [[\n [9, 4, 6, 2, 0],\n [5, 7, 2, 9, 6],\n ], [\n [5, 7, 2, 9, 6],\n [9, 4, 6, 2, 0],\n ]]\n labels = _binary_3d_label_to_sparse_value(\n [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],\n [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])\n\n # Class 2: 2 labels, both correct.\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=5,\n expected=2.0 / 2.0,\n class_id=2,\n weights=[[1], [0]])\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=2.0 / 2.0,\n class_id=2,\n weights=[[1], [0]])\n\n # Class 2: 2 labels, both correct.\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=5,\n expected=2.0 / 2.0,\n class_id=2,\n weights=[[0], [1]])\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=2.0 / 2.0,\n class_id=2,\n weights=[[0], [1]])\n\n # Class 7: 1 label, correct.\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=5,\n expected=1.0 / 1.0,\n class_id=7,\n weights=[[0], [1]])\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=1.0 / 1.0,\n class_id=7,\n weights=[[0], [1]])\n\n # Class 7: 1 label, incorrect.\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=5,\n expected=0.0 / 1.0,\n class_id=7,\n weights=[[1], [0]])\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=0.0 / 1.0,\n class_id=7,\n weights=[[1], [0]])\n\n # Class 7: 2 labels, 1 correct.\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=5,\n expected=1.0 / 2.0,\n class_id=7,\n weights=[[1, 0], [1, 0]])\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=1.0 / 2.0,\n class_id=7,\n weights=[[1, 0], [1, 0]])\n\n # Class 7: No labels.\n self._test_streaming_sparse_recall_at_k(\n predictions,\n labels,\n k=5,\n expected=NAN,\n class_id=7,\n weights=[[0, 1], [0, 1]])\n self._test_sparse_recall_at_top_k(\n labels,\n top_k_predictions,\n expected=NAN,\n class_id=7,\n weights=[[0, 1], [0, 1]])\n\n def test_sparse_tensor_value(self):\n predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]\n labels = [[0, 0, 1, 0], [0, 0, 0, 1]]\n expected_recall = 0.5\n with self.test_session():\n _, recall = metrics.streaming_sparse_recall_at_k(\n predictions=constant_op.constant(predictions, dtypes_lib.float32),\n labels=_binary_2d_label_to_sparse_value(labels),\n k=1)\n\n variables.variables_initializer(variables.local_variables()).run()\n\n self.assertEqual(expected_recall, recall.eval())\n\n\nclass StreamingMeanAbsoluteErrorTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_mean_absolute_error(\n predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))\n _assert_metric_variables(\n self, ('mean_absolute_error/count:0', 'mean_absolute_error/total:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_mean_absolute_error(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_mean_absolute_error(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_normal((10, 3), seed=1)\n labels = random_ops.random_normal((10, 3), seed=2)\n error, update_op = metrics.streaming_mean_absolute_error(\n predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_error = error.eval()\n for _ in range(10):\n self.assertEqual(initial_error, error.eval())\n\n def testSingleUpdateWithErrorAndWeights(self):\n predictions = constant_op.constant(\n [2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n [1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)\n weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))\n\n error, update_op = metrics.streaming_mean_absolute_error(\n predictions, labels, weights)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(3, sess.run(update_op))\n self.assertEqual(3, error.eval())\n\n\nclass StreamingMeanRelativeErrorTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_mean_relative_error(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n normalizer=array_ops.ones((10, 1)))\n _assert_metric_variables(\n self, ('mean_relative_error/count:0', 'mean_relative_error/total:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_mean_relative_error(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n normalizer=array_ops.ones((10, 1)),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_mean_relative_error(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n normalizer=array_ops.ones((10, 1)),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_normal((10, 3), seed=1)\n labels = random_ops.random_normal((10, 3), seed=2)\n normalizer = random_ops.random_normal((10, 3), seed=3)\n error, update_op = metrics.streaming_mean_relative_error(\n predictions, labels, normalizer)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_error = error.eval()\n for _ in range(10):\n self.assertEqual(initial_error, error.eval())\n\n def testSingleUpdateNormalizedByLabels(self):\n np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)\n np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)\n expected_error = np.mean(\n np.divide(np.absolute(np_predictions - np_labels), np_labels))\n\n predictions = constant_op.constant(\n np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant(np_labels, shape=(1, 4))\n\n error, update_op = metrics.streaming_mean_relative_error(\n predictions, labels, normalizer=labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(expected_error, sess.run(update_op))\n self.assertEqual(expected_error, error.eval())\n\n def testSingleUpdateNormalizedByZeros(self):\n np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)\n\n predictions = constant_op.constant(\n np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n [1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)\n\n error, update_op = metrics.streaming_mean_relative_error(\n predictions, labels, normalizer=array_ops.zeros_like(labels))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0.0, sess.run(update_op))\n self.assertEqual(0.0, error.eval())\n\n\nclass StreamingMeanSquaredErrorTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_mean_squared_error(\n predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))\n _assert_metric_variables(\n self, ('mean_squared_error/count:0', 'mean_squared_error/total:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_mean_squared_error(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_mean_squared_error(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_normal((10, 3), seed=1)\n labels = random_ops.random_normal((10, 3), seed=2)\n error, update_op = metrics.streaming_mean_squared_error(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_error = error.eval()\n for _ in range(10):\n self.assertEqual(initial_error, error.eval())\n\n def testSingleUpdateZeroError(self):\n predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)\n labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)\n\n error, update_op = metrics.streaming_mean_squared_error(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0, sess.run(update_op))\n self.assertEqual(0, error.eval())\n\n def testSingleUpdateWithError(self):\n predictions = constant_op.constant(\n [2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n [1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)\n\n error, update_op = metrics.streaming_mean_squared_error(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(6, sess.run(update_op))\n self.assertEqual(6, error.eval())\n\n def testSingleUpdateWithErrorAndWeights(self):\n predictions = constant_op.constant(\n [2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n [1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)\n weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))\n\n error, update_op = metrics.streaming_mean_squared_error(\n predictions, labels, weights)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(13, sess.run(update_op))\n self.assertEqual(13, error.eval())\n\n def testMultipleBatchesOfSizeOne(self):\n with self.test_session() as sess:\n # Create the queue that populates the predictions.\n preds_queue = data_flow_ops.FIFOQueue(\n 2, dtypes=dtypes_lib.float32, shapes=(1, 3))\n _enqueue_vector(sess, preds_queue, [10, 8, 6])\n _enqueue_vector(sess, preds_queue, [-4, 3, -1])\n predictions = preds_queue.dequeue()\n\n # Create the queue that populates the labels.\n labels_queue = data_flow_ops.FIFOQueue(\n 2, dtypes=dtypes_lib.float32, shapes=(1, 3))\n _enqueue_vector(sess, labels_queue, [1, 3, 2])\n _enqueue_vector(sess, labels_queue, [2, 4, 6])\n labels = labels_queue.dequeue()\n\n error, update_op = metrics.streaming_mean_squared_error(\n predictions, labels)\n\n sess.run(variables.local_variables_initializer())\n sess.run(update_op)\n self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)\n\n self.assertAlmostEqual(208.0 / 6, error.eval(), 5)\n\n def testMetricsComputedConcurrently(self):\n with self.test_session() as sess:\n # Create the queue that populates one set of predictions.\n preds_queue0 = data_flow_ops.FIFOQueue(\n 2, dtypes=dtypes_lib.float32, shapes=(1, 3))\n _enqueue_vector(sess, preds_queue0, [10, 8, 6])\n _enqueue_vector(sess, preds_queue0, [-4, 3, -1])\n predictions0 = preds_queue0.dequeue()\n\n # Create the queue that populates one set of predictions.\n preds_queue1 = data_flow_ops.FIFOQueue(\n 2, dtypes=dtypes_lib.float32, shapes=(1, 3))\n _enqueue_vector(sess, preds_queue1, [0, 1, 1])\n _enqueue_vector(sess, preds_queue1, [1, 1, 0])\n predictions1 = preds_queue1.dequeue()\n\n # Create the queue that populates one set of labels.\n labels_queue0 = data_flow_ops.FIFOQueue(\n 2, dtypes=dtypes_lib.float32, shapes=(1, 3))\n _enqueue_vector(sess, labels_queue0, [1, 3, 2])\n _enqueue_vector(sess, labels_queue0, [2, 4, 6])\n labels0 = labels_queue0.dequeue()\n\n # Create the queue that populates another set of labels.\n labels_queue1 = data_flow_ops.FIFOQueue(\n 2, dtypes=dtypes_lib.float32, shapes=(1, 3))\n _enqueue_vector(sess, labels_queue1, [-5, -3, -1])\n _enqueue_vector(sess, labels_queue1, [5, 4, 3])\n labels1 = labels_queue1.dequeue()\n\n mse0, update_op0 = metrics.streaming_mean_squared_error(\n predictions0, labels0, name='msd0')\n mse1, update_op1 = metrics.streaming_mean_squared_error(\n predictions1, labels1, name='msd1')\n\n sess.run(variables.local_variables_initializer())\n sess.run([update_op0, update_op1])\n sess.run([update_op0, update_op1])\n\n mse0, mse1 = sess.run([mse0, mse1])\n self.assertAlmostEqual(208.0 / 6, mse0, 5)\n self.assertAlmostEqual(79.0 / 6, mse1, 5)\n\n def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):\n with self.test_session() as sess:\n # Create the queue that populates the predictions.\n preds_queue = data_flow_ops.FIFOQueue(\n 2, dtypes=dtypes_lib.float32, shapes=(1, 3))\n _enqueue_vector(sess, preds_queue, [10, 8, 6])\n _enqueue_vector(sess, preds_queue, [-4, 3, -1])\n predictions = preds_queue.dequeue()\n\n # Create the queue that populates the labels.\n labels_queue = data_flow_ops.FIFOQueue(\n 2, dtypes=dtypes_lib.float32, shapes=(1, 3))\n _enqueue_vector(sess, labels_queue, [1, 3, 2])\n _enqueue_vector(sess, labels_queue, [2, 4, 6])\n labels = labels_queue.dequeue()\n\n mae, ma_update_op = metrics.streaming_mean_absolute_error(\n predictions, labels)\n mse, ms_update_op = metrics.streaming_mean_squared_error(\n predictions, labels)\n\n sess.run(variables.local_variables_initializer())\n sess.run([ma_update_op, ms_update_op])\n sess.run([ma_update_op, ms_update_op])\n\n self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)\n self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)\n\n\nclass StreamingRootMeanSquaredErrorTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_root_mean_squared_error(\n predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))\n _assert_metric_variables(\n self,\n ('root_mean_squared_error/count:0', 'root_mean_squared_error/total:0'))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_root_mean_squared_error(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_root_mean_squared_error(\n predictions=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_normal((10, 3), seed=1)\n labels = random_ops.random_normal((10, 3), seed=2)\n error, update_op = metrics.streaming_root_mean_squared_error(\n predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_error = error.eval()\n for _ in range(10):\n self.assertEqual(initial_error, error.eval())\n\n def testSingleUpdateZeroError(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n 0.0, shape=(1, 3), dtype=dtypes_lib.float32)\n labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)\n\n rmse, update_op = metrics.streaming_root_mean_squared_error(\n predictions, labels)\n\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0, sess.run(update_op))\n\n self.assertEqual(0, rmse.eval())\n\n def testSingleUpdateWithError(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n [1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)\n\n rmse, update_op = metrics.streaming_root_mean_squared_error(\n predictions, labels)\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)\n self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)\n\n def testSingleUpdateWithErrorAndWeights(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n [1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)\n weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))\n\n rmse, update_op = metrics.streaming_root_mean_squared_error(\n predictions, labels, weights)\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))\n\n self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)\n\n\nclass StreamingCovarianceTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_covariance(\n predictions=math_ops.to_float(math_ops.range(10)) +\n array_ops.ones([10, 10]),\n labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))\n _assert_metric_variables(self, (\n 'covariance/comoment:0',\n 'covariance/count:0',\n 'covariance/mean_label:0',\n 'covariance/mean_prediction:0',\n ))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n cov, _ = metrics.streaming_covariance(\n predictions=math_ops.to_float(math_ops.range(10)) +\n array_ops.ones([10, 10]),\n labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [cov])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_covariance(\n predictions=math_ops.to_float(math_ops.range(10)) +\n array_ops.ones([10, 10]),\n labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n labels = random_ops.random_normal((10, 3), seed=2)\n predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5\n cov, update_op = metrics.streaming_covariance(predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_cov = cov.eval()\n for _ in range(10):\n self.assertEqual(initial_cov, cov.eval())\n\n def testSingleUpdateIdentical(self):\n with self.test_session() as sess:\n predictions = math_ops.to_float(math_ops.range(10))\n labels = math_ops.to_float(math_ops.range(10))\n\n cov, update_op = metrics.streaming_covariance(predictions, labels)\n\n expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)\n self.assertAlmostEqual(expected_cov, cov.eval(), 5)\n\n def testSingleUpdateNonIdentical(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n [1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)\n\n cov, update_op = metrics.streaming_covariance(predictions, labels)\n\n expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(expected_cov, update_op.eval())\n self.assertAlmostEqual(expected_cov, cov.eval())\n\n def testSingleUpdateWithErrorAndWeights(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n [1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)\n weights = constant_op.constant(\n [0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)\n\n cov, update_op = metrics.streaming_covariance(\n predictions, labels, weights=weights)\n\n expected_cov = np.cov(\n [2, 4, 6, 8], [1, 3, 2, 7], fweights=[0, 1, 3, 1])[0, 1]\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(expected_cov, sess.run(update_op))\n self.assertAlmostEqual(expected_cov, cov.eval())\n\n def testMultiUpdateWithErrorNoWeights(self):\n with self.test_session() as sess:\n np.random.seed(123)\n n = 100\n predictions = np.random.randn(n)\n labels = 0.5 * predictions + np.random.randn(n)\n\n stride = 10\n predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n\n cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)\n\n sess.run(variables.local_variables_initializer())\n prev_expected_cov = NAN\n for i in range(n // stride):\n feed_dict = {\n predictions_t: predictions[stride * i:stride * (i + 1)],\n labels_t: labels[stride * i:stride * (i + 1)]\n }\n self.assertEqual(\n np.isnan(prev_expected_cov),\n np.isnan(sess.run(cov, feed_dict=feed_dict)))\n if not np.isnan(prev_expected_cov):\n self.assertAlmostEqual(prev_expected_cov,\n sess.run(cov, feed_dict=feed_dict), 5)\n expected_cov = np.cov(predictions[:stride * (i + 1)],\n labels[:stride * (i + 1)])[0, 1]\n self.assertAlmostEqual(expected_cov,\n sess.run(update_op, feed_dict=feed_dict), 5)\n self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),\n 5)\n prev_expected_cov = expected_cov\n\n def testMultiUpdateWithErrorAndWeights(self):\n with self.test_session() as sess:\n np.random.seed(123)\n n = 100\n predictions = np.random.randn(n)\n labels = 0.5 * predictions + np.random.randn(n)\n weights = np.tile(np.arange(n // 10), n // 10)\n np.random.shuffle(weights)\n\n stride = 10\n predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n\n cov, update_op = metrics.streaming_covariance(\n predictions_t, labels_t, weights=weights_t)\n\n sess.run(variables.local_variables_initializer())\n prev_expected_cov = NAN\n for i in range(n // stride):\n feed_dict = {\n predictions_t: predictions[stride * i:stride * (i + 1)],\n labels_t: labels[stride * i:stride * (i + 1)],\n weights_t: weights[stride * i:stride * (i + 1)]\n }\n self.assertEqual(\n np.isnan(prev_expected_cov),\n np.isnan(sess.run(cov, feed_dict=feed_dict)))\n if not np.isnan(prev_expected_cov):\n self.assertAlmostEqual(prev_expected_cov,\n sess.run(cov, feed_dict=feed_dict), 5)\n expected_cov = np.cov(\n predictions[:stride * (i + 1)],\n labels[:stride * (i + 1)],\n fweights=weights[:stride * (i + 1)])[0, 1]\n self.assertAlmostEqual(expected_cov,\n sess.run(update_op, feed_dict=feed_dict), 5)\n self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),\n 5)\n prev_expected_cov = expected_cov\n\n\nclass StreamingPearsonRTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_pearson_correlation(\n predictions=math_ops.to_float(math_ops.range(10)) +\n array_ops.ones([10, 10]),\n labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))\n _assert_metric_variables(self, (\n 'pearson_r/covariance/comoment:0',\n 'pearson_r/covariance/count:0',\n 'pearson_r/covariance/mean_label:0',\n 'pearson_r/covariance/mean_prediction:0',\n 'pearson_r/variance_labels/count:0',\n 'pearson_r/variance_labels/comoment:0',\n 'pearson_r/variance_labels/mean_label:0',\n 'pearson_r/variance_labels/mean_prediction:0',\n 'pearson_r/variance_predictions/comoment:0',\n 'pearson_r/variance_predictions/count:0',\n 'pearson_r/variance_predictions/mean_label:0',\n 'pearson_r/variance_predictions/mean_prediction:0',\n ))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n pearson_r, _ = metrics.streaming_pearson_correlation(\n predictions=math_ops.to_float(math_ops.range(10)) +\n array_ops.ones([10, 10]),\n labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_pearson_correlation(\n predictions=math_ops.to_float(math_ops.range(10)) +\n array_ops.ones([10, 10]),\n labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n labels = random_ops.random_normal((10, 3), seed=2)\n predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5\n pearson_r, update_op = metrics.streaming_pearson_correlation(\n predictions, labels)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_r = pearson_r.eval()\n for _ in range(10):\n self.assertEqual(initial_r, pearson_r.eval())\n\n def testSingleUpdateIdentical(self):\n with self.test_session() as sess:\n predictions = math_ops.to_float(math_ops.range(10))\n labels = math_ops.to_float(math_ops.range(10))\n\n pearson_r, update_op = metrics.streaming_pearson_correlation(\n predictions, labels)\n\n expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(expected_r, sess.run(update_op), 5)\n self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)\n\n def testSingleUpdateNonIdentical(self):\n with self.test_session() as sess:\n predictions = constant_op.constant(\n [2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n [1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)\n\n pearson_r, update_op = metrics.streaming_pearson_correlation(\n predictions, labels)\n\n expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(expected_r, update_op.eval())\n self.assertAlmostEqual(expected_r, pearson_r.eval())\n\n def testSingleUpdateWithErrorAndWeights(self):\n with self.test_session() as sess:\n predictions = np.array([2, 4, 6, 8])\n labels = np.array([1, 3, 2, 7])\n weights = np.array([0, 1, 3, 1])\n predictions_t = constant_op.constant(\n predictions, shape=(1, 4), dtype=dtypes_lib.float32)\n labels_t = constant_op.constant(\n labels, shape=(1, 4), dtype=dtypes_lib.float32)\n weights_t = constant_op.constant(\n weights, shape=(1, 4), dtype=dtypes_lib.float32)\n\n pearson_r, update_op = metrics.streaming_pearson_correlation(\n predictions_t, labels_t, weights=weights_t)\n\n cmat = np.cov(predictions, labels, fweights=weights)\n expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(expected_r, sess.run(update_op))\n self.assertAlmostEqual(expected_r, pearson_r.eval())\n\n def testMultiUpdateWithErrorNoWeights(self):\n with self.test_session() as sess:\n np.random.seed(123)\n n = 100\n predictions = np.random.randn(n)\n labels = 0.5 * predictions + np.random.randn(n)\n\n stride = 10\n predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n\n pearson_r, update_op = metrics.streaming_pearson_correlation(\n predictions_t, labels_t)\n\n sess.run(variables.local_variables_initializer())\n prev_expected_r = NAN\n for i in range(n // stride):\n feed_dict = {\n predictions_t: predictions[stride * i:stride * (i + 1)],\n labels_t: labels[stride * i:stride * (i + 1)]\n }\n self.assertEqual(\n np.isnan(prev_expected_r),\n np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))\n if not np.isnan(prev_expected_r):\n self.assertAlmostEqual(prev_expected_r,\n sess.run(pearson_r, feed_dict=feed_dict), 5)\n expected_r = np.corrcoef(predictions[:stride * (i + 1)],\n labels[:stride * (i + 1)])[0, 1]\n self.assertAlmostEqual(expected_r,\n sess.run(update_op, feed_dict=feed_dict), 5)\n self.assertAlmostEqual(expected_r,\n sess.run(pearson_r, feed_dict=feed_dict), 5)\n prev_expected_r = expected_r\n\n def testMultiUpdateWithErrorAndWeights(self):\n with self.test_session() as sess:\n np.random.seed(123)\n n = 100\n predictions = np.random.randn(n)\n labels = 0.5 * predictions + np.random.randn(n)\n weights = np.tile(np.arange(n // 10), n // 10)\n np.random.shuffle(weights)\n\n stride = 10\n predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n\n pearson_r, update_op = metrics.streaming_pearson_correlation(\n predictions_t, labels_t, weights=weights_t)\n\n sess.run(variables.local_variables_initializer())\n prev_expected_r = NAN\n for i in range(n // stride):\n feed_dict = {\n predictions_t: predictions[stride * i:stride * (i + 1)],\n labels_t: labels[stride * i:stride * (i + 1)],\n weights_t: weights[stride * i:stride * (i + 1)]\n }\n self.assertEqual(\n np.isnan(prev_expected_r),\n np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))\n if not np.isnan(prev_expected_r):\n self.assertAlmostEqual(prev_expected_r,\n sess.run(pearson_r, feed_dict=feed_dict), 5)\n cmat = np.cov(\n predictions[:stride * (i + 1)],\n labels[:stride * (i + 1)],\n fweights=weights[:stride * (i + 1)])\n expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])\n self.assertAlmostEqual(expected_r,\n sess.run(update_op, feed_dict=feed_dict), 5)\n self.assertAlmostEqual(expected_r,\n sess.run(pearson_r, feed_dict=feed_dict), 5)\n prev_expected_r = expected_r\n\n def testMultiUpdateWithErrorAndSingletonBatches(self):\n with self.test_session() as sess:\n np.random.seed(123)\n n = 100\n predictions = np.random.randn(n)\n labels = 0.5 * predictions + np.random.randn(n)\n stride = 10\n weights = (np.arange(n).reshape(n // stride, stride) % stride == 0)\n for row in weights:\n np.random.shuffle(row)\n # Now, weights is one-hot by row - one item per batch has non-zero weight.\n weights = weights.reshape((n,))\n\n predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])\n\n pearson_r, update_op = metrics.streaming_pearson_correlation(\n predictions_t, labels_t, weights=weights_t)\n\n sess.run(variables.local_variables_initializer())\n for i in range(n // stride):\n feed_dict = {\n predictions_t: predictions[stride * i:stride * (i + 1)],\n labels_t: labels[stride * i:stride * (i + 1)],\n weights_t: weights[stride * i:stride * (i + 1)]\n }\n cmat = np.cov(\n predictions[:stride * (i + 1)],\n labels[:stride * (i + 1)],\n fweights=weights[:stride * (i + 1)])\n expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])\n actual_r = sess.run(update_op, feed_dict=feed_dict)\n self.assertEqual(np.isnan(expected_r), np.isnan(actual_r))\n self.assertEqual(\n np.isnan(expected_r),\n np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))\n if not np.isnan(expected_r):\n self.assertAlmostEqual(expected_r, actual_r, 5)\n self.assertAlmostEqual(expected_r,\n sess.run(pearson_r, feed_dict=feed_dict), 5)\n\n\nclass StreamingMeanCosineDistanceTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_mean_cosine_distance(\n predictions=array_ops.ones((10, 3)),\n labels=array_ops.ones((10, 3)),\n dim=1)\n _assert_metric_variables(self, (\n 'mean_cosine_distance/count:0',\n 'mean_cosine_distance/total:0',\n ))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_mean_cosine_distance(\n predictions=array_ops.ones((10, 3)),\n labels=array_ops.ones((10, 3)),\n dim=1,\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_mean_cosine_distance(\n predictions=array_ops.ones((10, 3)),\n labels=array_ops.ones((10, 3)),\n dim=1,\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_normal((10, 3), seed=1)\n labels = random_ops.random_normal((10, 3), seed=2)\n error, update_op = metrics.streaming_mean_cosine_distance(\n predictions, labels, dim=1)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_error = error.eval()\n for _ in range(10):\n self.assertEqual(initial_error, error.eval())\n\n def testSingleUpdateZeroError(self):\n np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))\n\n predictions = constant_op.constant(\n np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)\n\n error, update_op = metrics.streaming_mean_cosine_distance(\n predictions, labels, dim=2)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0, sess.run(update_op))\n self.assertEqual(0, error.eval())\n\n def testSingleUpdateWithError1(self):\n np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))\n np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))\n\n predictions = constant_op.constant(\n np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)\n\n error, update_op = metrics.streaming_mean_cosine_distance(\n predictions, labels, dim=2)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(1, sess.run(update_op), 5)\n self.assertAlmostEqual(1, error.eval(), 5)\n\n def testSingleUpdateWithError2(self):\n np_predictions = np.matrix(\n ('0.819031913261206 0.567041924552012 0.087465312324590;'\n '-0.665139432070255 -0.739487441769973 -0.103671883216994;'\n '0.707106781186548 -0.707106781186548 0'))\n np_labels = np.matrix(\n ('0.819031913261206 0.567041924552012 0.087465312324590;'\n '0.665139432070255 0.739487441769973 0.103671883216994;'\n '0.707106781186548 0.707106781186548 0'))\n\n predictions = constant_op.constant(\n np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)\n error, update_op = metrics.streaming_mean_cosine_distance(\n predictions, labels, dim=2)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(1.0, sess.run(update_op), 5)\n self.assertAlmostEqual(1.0, error.eval(), 5)\n\n def testSingleUpdateWithErrorAndWeights1(self):\n np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))\n np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))\n\n predictions = constant_op.constant(\n np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)\n weights = constant_op.constant(\n [1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)\n\n error, update_op = metrics.streaming_mean_cosine_distance(\n predictions, labels, dim=2, weights=weights)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(0, sess.run(update_op))\n self.assertEqual(0, error.eval())\n\n def testSingleUpdateWithErrorAndWeights2(self):\n np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))\n np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))\n\n predictions = constant_op.constant(\n np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)\n labels = constant_op.constant(\n np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)\n weights = constant_op.constant(\n [0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)\n\n error, update_op = metrics.streaming_mean_cosine_distance(\n predictions, labels, dim=2, weights=weights)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(1.5, update_op.eval())\n self.assertEqual(1.5, error.eval())\n\n\nclass PcntBelowThreshTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)\n _assert_metric_variables(self, (\n 'percentage_below_threshold/count:0',\n 'percentage_below_threshold/total:0',\n ))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.streaming_percentage_less(\n values=array_ops.ones((10,)),\n threshold=2,\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_percentage_less(\n values=array_ops.ones((10,)),\n threshold=2,\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testOneUpdate(self):\n with self.test_session() as sess:\n values = constant_op.constant(\n [2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)\n\n pcnt0, update_op0 = metrics.streaming_percentage_less(\n values, 100, name='high')\n pcnt1, update_op1 = metrics.streaming_percentage_less(\n values, 7, name='medium')\n pcnt2, update_op2 = metrics.streaming_percentage_less(\n values, 1, name='low')\n\n sess.run(variables.local_variables_initializer())\n sess.run([update_op0, update_op1, update_op2])\n\n pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])\n self.assertAlmostEqual(1.0, pcnt0, 5)\n self.assertAlmostEqual(0.75, pcnt1, 5)\n self.assertAlmostEqual(0.0, pcnt2, 5)\n\n def testSomePresentOneUpdate(self):\n with self.test_session() as sess:\n values = constant_op.constant(\n [2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)\n weights = constant_op.constant(\n [1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)\n\n pcnt0, update_op0 = metrics.streaming_percentage_less(\n values, 100, weights=weights, name='high')\n pcnt1, update_op1 = metrics.streaming_percentage_less(\n values, 7, weights=weights, name='medium')\n pcnt2, update_op2 = metrics.streaming_percentage_less(\n values, 1, weights=weights, name='low')\n\n sess.run(variables.local_variables_initializer())\n self.assertListEqual([1.0, 0.5, 0.0],\n sess.run([update_op0, update_op1, update_op2]))\n\n pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])\n self.assertAlmostEqual(1.0, pcnt0, 5)\n self.assertAlmostEqual(0.5, pcnt1, 5)\n self.assertAlmostEqual(0.0, pcnt2, 5)\n\n\nclass StreamingMeanIOUTest(test.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_mean_iou(\n predictions=array_ops.ones([10, 1]),\n labels=array_ops.ones([10, 1]),\n num_classes=2)\n _assert_metric_variables(self, ('mean_iou/total_confusion_matrix:0',))\n\n def testMetricsCollections(self):\n my_collection_name = '__metrics__'\n mean_iou, _ = metrics.streaming_mean_iou(\n predictions=array_ops.ones([10, 1]),\n labels=array_ops.ones([10, 1]),\n num_classes=2,\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_mean_iou(\n predictions=array_ops.ones([10, 1]),\n labels=array_ops.ones([10, 1]),\n num_classes=2,\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):\n predictions = array_ops.ones([10, 3])\n labels = array_ops.ones([10, 4])\n with self.assertRaises(ValueError):\n metrics.streaming_mean_iou(predictions, labels, num_classes=2)\n\n def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):\n predictions = array_ops.ones([10])\n labels = array_ops.ones([10])\n weights = array_ops.zeros([9])\n with self.assertRaises(ValueError):\n metrics.streaming_mean_iou(\n predictions, labels, num_classes=2, weights=weights)\n\n def testValueTensorIsIdempotent(self):\n num_classes = 3\n predictions = random_ops.random_uniform(\n [10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)\n labels = random_ops.random_uniform(\n [10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)\n miou, update_op = metrics.streaming_mean_iou(\n predictions, labels, num_classes=num_classes)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_miou = miou.eval()\n for _ in range(10):\n self.assertEqual(initial_miou, miou.eval())\n\n def testMultipleUpdates(self):\n num_classes = 3\n with self.test_session() as sess:\n # Create the queue that populates the predictions.\n preds_queue = data_flow_ops.FIFOQueue(\n 5, dtypes=dtypes_lib.int32, shapes=(1, 1))\n _enqueue_vector(sess, preds_queue, [0])\n _enqueue_vector(sess, preds_queue, [1])\n _enqueue_vector(sess, preds_queue, [2])\n _enqueue_vector(sess, preds_queue, [1])\n _enqueue_vector(sess, preds_queue, [0])\n predictions = preds_queue.dequeue()\n\n # Create the queue that populates the labels.\n labels_queue = data_flow_ops.FIFOQueue(\n 5, dtypes=dtypes_lib.int32, shapes=(1, 1))\n _enqueue_vector(sess, labels_queue, [0])\n _enqueue_vector(sess, labels_queue, [1])\n _enqueue_vector(sess, labels_queue, [1])\n _enqueue_vector(sess, labels_queue, [2])\n _enqueue_vector(sess, labels_queue, [1])\n labels = labels_queue.dequeue()\n\n miou, update_op = metrics.streaming_mean_iou(predictions, labels,\n num_classes)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(5):\n sess.run(update_op)\n desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])\n self.assertEqual(desired_output, miou.eval())\n\n def testMultipleUpdatesWithWeights(self):\n num_classes = 2\n with self.test_session() as sess:\n # Create the queue that populates the predictions.\n preds_queue = data_flow_ops.FIFOQueue(\n 6, dtypes=dtypes_lib.int32, shapes=(1, 1))\n _enqueue_vector(sess, preds_queue, [0])\n _enqueue_vector(sess, preds_queue, [1])\n _enqueue_vector(sess, preds_queue, [0])\n _enqueue_vector(sess, preds_queue, [1])\n _enqueue_vector(sess, preds_queue, [0])\n _enqueue_vector(sess, preds_queue, [1])\n predictions = preds_queue.dequeue()\n\n # Create the queue that populates the labels.\n labels_queue = data_flow_ops.FIFOQueue(\n 6, dtypes=dtypes_lib.int32, shapes=(1, 1))\n _enqueue_vector(sess, labels_queue, [0])\n _enqueue_vector(sess, labels_queue, [1])\n _enqueue_vector(sess, labels_queue, [1])\n _enqueue_vector(sess, labels_queue, [0])\n _enqueue_vector(sess, labels_queue, [0])\n _enqueue_vector(sess, labels_queue, [1])\n labels = labels_queue.dequeue()\n\n # Create the queue that populates the weights.\n weights_queue = data_flow_ops.FIFOQueue(\n 6, dtypes=dtypes_lib.float32, shapes=(1, 1))\n _enqueue_vector(sess, weights_queue, [1.0])\n _enqueue_vector(sess, weights_queue, [1.0])\n _enqueue_vector(sess, weights_queue, [1.0])\n _enqueue_vector(sess, weights_queue, [0.0])\n _enqueue_vector(sess, weights_queue, [1.0])\n _enqueue_vector(sess, weights_queue, [0.0])\n weights = weights_queue.dequeue()\n\n miou, update_op = metrics.streaming_mean_iou(\n predictions, labels, num_classes, weights=weights)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(6):\n sess.run(update_op)\n desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])\n self.assertAlmostEqual(desired_output, miou.eval())\n\n def testMultipleUpdatesWithMissingClass(self):\n # Test the case where there are no predicions and labels for\n # one class, and thus there is one row and one column with\n # zero entries in the confusion matrix.\n num_classes = 3\n with self.test_session() as sess:\n # Create the queue that populates the predictions.\n # There is no prediction for class 2.\n preds_queue = data_flow_ops.FIFOQueue(\n 5, dtypes=dtypes_lib.int32, shapes=(1, 1))\n _enqueue_vector(sess, preds_queue, [0])\n _enqueue_vector(sess, preds_queue, [1])\n _enqueue_vector(sess, preds_queue, [1])\n _enqueue_vector(sess, preds_queue, [1])\n _enqueue_vector(sess, preds_queue, [0])\n predictions = preds_queue.dequeue()\n\n # Create the queue that populates the labels.\n # There is label for class 2.\n labels_queue = data_flow_ops.FIFOQueue(\n 5, dtypes=dtypes_lib.int32, shapes=(1, 1))\n _enqueue_vector(sess, labels_queue, [0])\n _enqueue_vector(sess, labels_queue, [1])\n _enqueue_vector(sess, labels_queue, [1])\n _enqueue_vector(sess, labels_queue, [0])\n _enqueue_vector(sess, labels_queue, [1])\n labels = labels_queue.dequeue()\n\n miou, update_op = metrics.streaming_mean_iou(predictions, labels,\n num_classes)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(5):\n sess.run(update_op)\n desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0])\n self.assertAlmostEqual(desired_output, miou.eval())\n\n def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):\n predictions = array_ops.concat([\n constant_op.constant(0, shape=[5]),\n constant_op.constant(1, shape=[5])\n ], 0)\n labels = array_ops.concat([\n constant_op.constant(0, shape=[3]),\n constant_op.constant(1, shape=[7])\n ], 0)\n num_classes = 2\n with self.test_session() as sess:\n miou, update_op = metrics.streaming_mean_iou(predictions, labels,\n num_classes)\n sess.run(variables.local_variables_initializer())\n confusion_matrix = update_op.eval()\n self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)\n desired_miou = np.mean([3. / 5., 5. / 7.])\n self.assertAlmostEqual(desired_miou, miou.eval())\n\n def testAllCorrect(self):\n predictions = array_ops.zeros([40])\n labels = array_ops.zeros([40])\n num_classes = 1\n with self.test_session() as sess:\n miou, update_op = metrics.streaming_mean_iou(predictions, labels,\n num_classes)\n sess.run(variables.local_variables_initializer())\n self.assertEqual(40, update_op.eval()[0])\n self.assertEqual(1.0, miou.eval())\n\n def testAllWrong(self):\n predictions = array_ops.zeros([40])\n labels = array_ops.ones([40])\n num_classes = 2\n with self.test_session() as sess:\n miou, update_op = metrics.streaming_mean_iou(predictions, labels,\n num_classes)\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())\n self.assertEqual(0., miou.eval())\n\n def testResultsWithSomeMissing(self):\n predictions = array_ops.concat([\n constant_op.constant(0, shape=[5]),\n constant_op.constant(1, shape=[5])\n ], 0)\n labels = array_ops.concat([\n constant_op.constant(0, shape=[3]),\n constant_op.constant(1, shape=[7])\n ], 0)\n num_classes = 2\n weights = array_ops.concat([\n constant_op.constant(0, shape=[1]),\n constant_op.constant(1, shape=[8]),\n constant_op.constant(0, shape=[1])\n ], 0)\n with self.test_session() as sess:\n miou, update_op = metrics.streaming_mean_iou(\n predictions, labels, num_classes, weights=weights)\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())\n desired_miou = np.mean([2. / 4., 4. / 6.])\n self.assertAlmostEqual(desired_miou, miou.eval())\n\n def testMissingClassInLabels(self):\n labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],\n [[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])\n predictions = constant_op.constant(\n [[[0, 0, 2, 1, 1, 0], [0, 1, 2, 2, 0, 1]], [[0, 0, 2, 1, 1, 1],\n [1, 1, 2, 0, 0, 0]]])\n num_classes = 3\n with self.test_session() as sess:\n miou, update_op = metrics.streaming_mean_iou(predictions, labels,\n num_classes)\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op.eval())\n self.assertAlmostEqual(1 / 3 * (7 / (7 + 3 + 7) + 5 / (5 + 4 + 5) + 0 /\n (0 + 5 + 0)), miou.eval())\n\n def testMissingClassOverallSmall(self):\n labels = constant_op.constant([0])\n predictions = constant_op.constant([0])\n num_classes = 2\n with self.test_session() as sess:\n miou, update_op = metrics.streaming_mean_iou(predictions, labels,\n num_classes)\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual([[1, 0], [0, 0]], update_op.eval())\n self.assertAlmostEqual(1, miou.eval())\n\n def testMissingClassOverallLarge(self):\n labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],\n [[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])\n predictions = constant_op.constant(\n [[[0, 0, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1]], [[0, 0, 0, 1, 1, 1],\n [1, 1, 1, 0, 0, 0]]])\n num_classes = 3\n with self.test_session() as sess:\n miou, update_op = metrics.streaming_mean_iou(predictions, labels,\n num_classes)\n sess.run(variables.local_variables_initializer())\n self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op.eval())\n self.assertAlmostEqual(1 / 2 * (9 / (9 + 3 + 5) + 7 / (7 + 5 + 3)),\n miou.eval())\n\n\nclass StreamingConcatTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.streaming_concat(values=array_ops.ones((10,)))\n _assert_metric_variables(self, (\n 'streaming_concat/array:0',\n 'streaming_concat/size:0',\n ))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n value, _ = metrics.streaming_concat(\n values=array_ops.ones((10,)), metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [value])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.streaming_concat(\n values=array_ops.ones((10,)), updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testNextArraySize(self):\n next_array_size = metric_ops._next_array_size # pylint: disable=protected-access\n with self.test_session():\n self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)\n self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)\n self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)\n self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)\n self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)\n\n def testStreamingConcat(self):\n with self.test_session() as sess:\n values = array_ops.placeholder(dtypes_lib.int32, [None])\n concatenated, update_op = metrics.streaming_concat(values)\n sess.run(variables.local_variables_initializer())\n\n self.assertAllEqual([], concatenated.eval())\n\n sess.run([update_op], feed_dict={values: [0, 1, 2]})\n self.assertAllEqual([0, 1, 2], concatenated.eval())\n\n sess.run([update_op], feed_dict={values: [3, 4]})\n self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())\n\n sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})\n self.assertAllEqual(np.arange(10), concatenated.eval())\n\n def testStreamingConcatStringValues(self):\n with self.test_session() as sess:\n values = array_ops.placeholder(dtypes_lib.string, [None])\n concatenated, update_op = metrics.streaming_concat(values)\n sess.run(variables.local_variables_initializer())\n\n self.assertItemsEqual([], concatenated.eval())\n\n sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})\n self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())\n\n sess.run([update_op], feed_dict={values: ['d', 'e']})\n self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())\n\n sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})\n self.assertItemsEqual(\n [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],\n concatenated.eval())\n\n def testStreamingConcatMaxSize(self):\n with self.test_session() as sess:\n values = math_ops.range(3)\n concatenated, update_op = metrics.streaming_concat(values, max_size=5)\n sess.run(variables.local_variables_initializer())\n\n self.assertAllEqual([], concatenated.eval())\n\n sess.run([update_op])\n self.assertAllEqual([0, 1, 2], concatenated.eval())\n\n sess.run([update_op])\n self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())\n\n sess.run([update_op])\n self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())\n\n def testStreamingConcat2D(self):\n with self.test_session() as sess:\n values = array_ops.reshape(math_ops.range(3), (3, 1))\n concatenated, update_op = metrics.streaming_concat(values, axis=-1)\n sess.run(variables.local_variables_initializer())\n for _ in range(10):\n sess.run([update_op])\n self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())\n\n def testStreamingConcatErrors(self):\n with self.assertRaises(ValueError):\n metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))\n\n values = array_ops.zeros((2, 3))\n with self.assertRaises(ValueError):\n metrics.streaming_concat(values, axis=-3, max_size=3)\n with self.assertRaises(ValueError):\n metrics.streaming_concat(values, axis=2, max_size=3)\n\n with self.assertRaises(ValueError):\n metrics.streaming_concat(\n array_ops.placeholder(dtypes_lib.float32, [None, None]))\n\n def testStreamingConcatReset(self):\n with self.test_session() as sess:\n values = array_ops.placeholder(dtypes_lib.int32, [None])\n concatenated, update_op = metrics.streaming_concat(values)\n sess.run(variables.local_variables_initializer())\n\n self.assertAllEqual([], concatenated.eval())\n\n sess.run([update_op], feed_dict={values: [0, 1, 2]})\n self.assertAllEqual([0, 1, 2], concatenated.eval())\n\n sess.run(variables.local_variables_initializer())\n\n sess.run([update_op], feed_dict={values: [3, 4]})\n self.assertAllEqual([3, 4], concatenated.eval())\n\n\nclass AggregateMetricsTest(test.TestCase):\n\n def testAggregateNoMetricsRaisesValueError(self):\n with self.assertRaises(ValueError):\n metrics.aggregate_metrics()\n\n def testAggregateSingleMetricReturnsOneItemLists(self):\n values = array_ops.ones((10, 4))\n value_tensors, update_ops = metrics.aggregate_metrics(\n metrics.streaming_mean(values))\n self.assertEqual(len(value_tensors), 1)\n self.assertEqual(len(update_ops), 1)\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(1, update_ops[0].eval())\n self.assertEqual(1, value_tensors[0].eval())\n\n def testAggregateMultipleMetricsReturnsListsInOrder(self):\n predictions = array_ops.ones((10, 4))\n labels = array_ops.ones((10, 4)) * 3\n value_tensors, update_ops = metrics.aggregate_metrics(\n metrics.streaming_mean_absolute_error(predictions, labels),\n metrics.streaming_mean_squared_error(predictions, labels))\n self.assertEqual(len(value_tensors), 2)\n self.assertEqual(len(update_ops), 2)\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(2, update_ops[0].eval())\n self.assertEqual(4, update_ops[1].eval())\n self.assertEqual(2, value_tensors[0].eval())\n self.assertEqual(4, value_tensors[1].eval())\n\n\nclass AggregateMetricMapTest(test.TestCase):\n\n def testAggregateMultipleMetricsReturnsListsInOrder(self):\n predictions = array_ops.ones((10, 4))\n labels = array_ops.ones((10, 4)) * 3\n names_to_values, names_to_updates = metrics.aggregate_metric_map({\n 'm1': metrics.streaming_mean_absolute_error(predictions, labels),\n 'm2': metrics.streaming_mean_squared_error(predictions, labels),\n })\n\n self.assertEqual(2, len(names_to_values))\n self.assertEqual(2, len(names_to_updates))\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n self.assertEqual(2, names_to_updates['m1'].eval())\n self.assertEqual(4, names_to_updates['m2'].eval())\n self.assertEqual(2, names_to_values['m1'].eval())\n self.assertEqual(4, names_to_values['m2'].eval())\n\n\nclass CountTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.count(array_ops.ones([4, 3]))\n _assert_metric_variables(self, ['count/count:0'])\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n mean, _ = metrics.count(\n array_ops.ones([4, 3]), metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [mean])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.count(\n array_ops.ones([4, 3]), updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testReturnType(self):\n c, op = metrics.count(array_ops.ones([4, 3]))\n self.assertTrue(isinstance(c, ops.Tensor))\n self.assertTrue(isinstance(op, ops.Operation) or isinstance(op, ops.Tensor))\n\n def testBasic(self):\n with self.test_session() as sess:\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n result, update_op = metrics.count(values)\n\n sess.run(variables.local_variables_initializer())\n for _ in range(4):\n sess.run(update_op)\n self.assertAlmostEqual(8.0, sess.run(result), 5)\n\n def testUpdateOpsReturnsCurrentValue(self):\n with self.test_session() as sess:\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n result, update_op = metrics.count(values)\n\n sess.run(variables.local_variables_initializer())\n\n self.assertAlmostEqual(2.0, sess.run(update_op), 5)\n self.assertAlmostEqual(4.0, sess.run(update_op), 5)\n self.assertAlmostEqual(6.0, sess.run(update_op), 5)\n self.assertAlmostEqual(8.0, sess.run(update_op), 5)\n\n self.assertAlmostEqual(8.0, sess.run(result), 5)\n\n def test1dWeightedValues(self):\n with self.test_session() as sess:\n # Create the queue that populates the values.\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n # Create the queue that populates the weighted labels.\n weights_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 1))\n _enqueue_vector(sess, weights_queue, [0.5])\n _enqueue_vector(sess, weights_queue, [0])\n _enqueue_vector(sess, weights_queue, [0])\n _enqueue_vector(sess, weights_queue, [1.2])\n weights = weights_queue.dequeue()\n\n result, update_op = metrics.count(values, weights)\n\n variables.local_variables_initializer().run()\n for _ in range(4):\n update_op.eval()\n self.assertAlmostEqual(3.4, result.eval(), 5)\n\n def test1dWeightedValues_placeholders(self):\n with self.test_session() as sess:\n # Create the queue that populates the values.\n feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))\n values = array_ops.placeholder(dtype=dtypes_lib.float32)\n\n # Create the queue that populates the weighted labels.\n weights_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1,))\n _enqueue_vector(sess, weights_queue, 0.5, shape=(1,))\n _enqueue_vector(sess, weights_queue, 0, shape=(1,))\n _enqueue_vector(sess, weights_queue, 0, shape=(1,))\n _enqueue_vector(sess, weights_queue, 1.2, shape=(1,))\n weights = weights_queue.dequeue()\n\n result, update_op = metrics.count(values, weights)\n\n variables.local_variables_initializer().run()\n for i in range(4):\n update_op.eval(feed_dict={values: feed_values[i]})\n self.assertAlmostEqual(3.4, result.eval(), 5)\n\n def test2dWeightedValues(self):\n with self.test_session() as sess:\n # Create the queue that populates the values.\n values_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, values_queue, [0, 1])\n _enqueue_vector(sess, values_queue, [-4.2, 9.1])\n _enqueue_vector(sess, values_queue, [6.5, 0])\n _enqueue_vector(sess, values_queue, [-3.2, 4.0])\n values = values_queue.dequeue()\n\n # Create the queue that populates the weighted labels.\n weights_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(1, 2))\n _enqueue_vector(sess, weights_queue, [1.1, 1])\n _enqueue_vector(sess, weights_queue, [1, 0])\n _enqueue_vector(sess, weights_queue, [0, 1])\n _enqueue_vector(sess, weights_queue, [0, 0])\n weights = weights_queue.dequeue()\n\n result, update_op = metrics.count(values, weights)\n\n variables.local_variables_initializer().run()\n for _ in range(4):\n update_op.eval()\n self.assertAlmostEqual(4.1, result.eval(), 5)\n\n def test2dWeightedValues_placeholders(self):\n with self.test_session() as sess:\n # Create the queue that populates the values.\n feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))\n values = array_ops.placeholder(dtype=dtypes_lib.float32)\n\n # Create the queue that populates the weighted labels.\n weights_queue = data_flow_ops.FIFOQueue(\n 4, dtypes=dtypes_lib.float32, shapes=(2,))\n _enqueue_vector(sess, weights_queue, [1.1, 1], shape=(2,))\n _enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))\n _enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))\n _enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))\n weights = weights_queue.dequeue()\n\n result, update_op = metrics.count(values, weights)\n\n variables.local_variables_initializer().run()\n for i in range(4):\n update_op.eval(feed_dict={values: feed_values[i]})\n self.assertAlmostEqual(4.1, result.eval(), 5)\n\n\nclass CohenKappaTest(test.TestCase):\n\n def _confusion_matrix_to_samples(self, confusion_matrix):\n x, y = confusion_matrix.shape\n pairs = []\n for label in range(x):\n for feature in range(y):\n pairs += [label, feature] * confusion_matrix[label, feature]\n pairs = np.array(pairs).reshape((-1, 2))\n return pairs[:, 0], pairs[:, 1]\n\n def setUp(self):\n np.random.seed(1)\n ops.reset_default_graph()\n\n def testVars(self):\n metrics.cohen_kappa(\n predictions_idx=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n num_classes=2)\n _assert_metric_variables(self, (\n 'cohen_kappa/po:0',\n 'cohen_kappa/pe_row:0',\n 'cohen_kappa/pe_col:0',\n ))\n\n def testMetricsCollection(self):\n my_collection_name = '__metrics__'\n kappa, _ = metrics.cohen_kappa(\n predictions_idx=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n num_classes=2,\n metrics_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [kappa])\n\n def testUpdatesCollection(self):\n my_collection_name = '__updates__'\n _, update_op = metrics.cohen_kappa(\n predictions_idx=array_ops.ones((10, 1)),\n labels=array_ops.ones((10, 1)),\n num_classes=2,\n updates_collections=[my_collection_name])\n self.assertListEqual(ops.get_collection(my_collection_name), [update_op])\n\n def testValueTensorIsIdempotent(self):\n predictions = random_ops.random_uniform(\n (10, 1), maxval=3, dtype=dtypes_lib.int64, seed=1)\n labels = random_ops.random_uniform(\n (10, 1), maxval=3, dtype=dtypes_lib.int64, seed=2)\n kappa, update_op = metrics.cohen_kappa(labels, predictions, 3)\n\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n # Run several updates.\n for _ in range(10):\n sess.run(update_op)\n\n # Then verify idempotency.\n initial_kappa = kappa.eval()\n for _ in range(10):\n self.assertAlmostEqual(initial_kappa, kappa.eval(), 5)\n\n def testBasic(self):\n confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])\n # overall total = 36\n # po = [9, 8, 6], sum(po) = 23\n # pe_row = [15, 12, 9], pe_col = [13, 14, 9], so pe = [5.42, 4.67, 2.25]\n # finally, kappa = (sum(po) - sum(pe)) / (N - sum(pe))\n # = (23 - 12.34) / (36 - 12.34)\n # = 0.45\n # see: http://psych.unl.edu/psycrs/handcomp/hckappa.PDF\n expect = 0.45\n labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)\n\n dtypes = [dtypes_lib.int16, dtypes_lib.int32, dtypes_lib.int64]\n shapes = [\n (len(labels,)), # 1-dim\n (len(labels), 1)\n ] # 2-dim\n weights = [None, np.ones_like(labels)]\n\n for dtype in dtypes:\n for shape in shapes:\n for weight in weights:\n with self.test_session() as sess:\n predictions_tensor = constant_op.constant(\n np.reshape(predictions, shape), dtype=dtype)\n labels_tensor = constant_op.constant(\n np.reshape(labels, shape), dtype=dtype)\n kappa, update_op = metrics.cohen_kappa(\n labels_tensor, predictions_tensor, 3, weights=weight)\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(expect, sess.run(update_op), 2)\n self.assertAlmostEqual(expect, kappa.eval(), 2)\n\n def testAllCorrect(self):\n inputs = np.arange(0, 100) % 4\n # confusion matrix\n # [[25, 0, 0],\n # [0, 25, 0],\n # [0, 0, 25]]\n # Calculated by v0.19: sklearn.metrics.cohen_kappa_score(inputs, inputs)\n expect = 1.0\n\n with self.test_session() as sess:\n predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)\n labels = constant_op.constant(inputs)\n kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(expect, sess.run(update_op), 5)\n self.assertAlmostEqual(expect, kappa.eval(), 5)\n\n def testAllIncorrect(self):\n labels = np.arange(0, 100) % 4\n predictions = (labels + 1) % 4\n # confusion matrix\n # [[0, 25, 0],\n # [0, 0, 25],\n # [25, 0, 0]]\n # Calculated by v0.19: sklearn.metrics.cohen_kappa_score(labels, predictions)\n expect = -0.333333333333\n\n with self.test_session() as sess:\n predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels)\n kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(expect, sess.run(update_op), 5)\n self.assertAlmostEqual(expect, kappa.eval(), 5)\n\n def testWeighted(self):\n confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])\n labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)\n num_samples = np.sum(confusion_matrix, dtype=np.int32)\n weights = (np.arange(0, num_samples) % 5) / 5.0\n # Calculated by v0.19: sklearn.metrics.cohen_kappa_score(\n # labels, predictions, sample_weight=weights)\n expect = 0.453466583385\n\n with self.test_session() as sess:\n predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)\n labels = constant_op.constant(labels)\n kappa, update_op = metrics.cohen_kappa(\n labels, predictions, 4, weights=weights)\n\n sess.run(variables.local_variables_initializer())\n self.assertAlmostEqual(expect, sess.run(update_op), 5)\n self.assertAlmostEqual(expect, kappa.eval(), 5)\n\n def testWithMultipleUpdates(self):\n confusion_matrix = np.array([[90, 30, 10, 20], [40, 80, 20, 30],\n [20, 10, 60, 35], [15, 25, 30, 25]])\n labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)\n num_samples = np.sum(confusion_matrix, dtype=np.int32)\n weights = (np.arange(0, num_samples) % 5) / 5.0\n num_classes = confusion_matrix.shape[0]\n\n batch_size = num_samples // 10\n predictions_t = array_ops.placeholder(\n dtypes_lib.float32, shape=(batch_size,))\n labels_t = array_ops.placeholder(dtypes_lib.int32, shape=(batch_size,))\n weights_t = array_ops.placeholder(dtypes_lib.float32, shape=(batch_size,))\n kappa, update_op = metrics.cohen_kappa(\n labels_t, predictions_t, num_classes, weights=weights_t)\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n\n for idx in range(0, num_samples, batch_size):\n batch_start, batch_end = idx, idx + batch_size\n sess.run(\n update_op,\n feed_dict={\n labels_t: labels[batch_start:batch_end],\n predictions_t: predictions[batch_start:batch_end],\n weights_t: weights[batch_start:batch_end]\n })\n # Calculated by v0.19: sklearn.metrics.cohen_kappa_score(\n # labels_np, predictions_np, sample_weight=weights_np)\n expect = 0.289965397924\n self.assertAlmostEqual(expect, kappa.eval(), 5)\n\n def testInvalidNumClasses(self):\n predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))\n labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))\n with self.assertRaisesRegexp(ValueError, 'num_classes'):\n metrics.cohen_kappa(labels, predictions, 1)\n\n def testInvalidDimension(self):\n predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))\n invalid_labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 2))\n with self.assertRaises(ValueError):\n metrics.cohen_kappa(invalid_labels, predictions, 3)\n\n invalid_predictions = array_ops.placeholder(\n dtypes_lib.float32, shape=(4, 2))\n labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))\n with self.assertRaises(ValueError):\n metrics.cohen_kappa(labels, invalid_predictions, 3)\n\n def testConditionalPackingOptimization(self):\n placeholder = array_ops.placeholder(dtypes_lib.float32, [None])\n values, update_op = metric_ops.streaming_concat(placeholder)\n with self.test_session() as sess:\n sess.run(variables.local_variables_initializer())\n for feed in range(10):\n sess.run(update_op, feed_dict={placeholder: [feed]})\n print(sess.run(values))\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "numpy.diag", "tensorflow.contrib.distributions.python.ops.distribution_util.pad_mixture_dimensions", "tensorflow.python.ops.array_ops.constant", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.shape", "numpy.asarray", "tensorflow.python.ops.distributions.normal.Normal", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.contrib.distributions.python.ops.distribution_util.make_tril_scale", "tensorflow.contrib.distributions.python.ops.distribution_util.move_dimension", "numpy.tril", "tensorflow.contrib.distributions.python.ops.distribution_util.shapes_from_loc_and_scale", "numpy.reshape", "tensorflow.contrib.distributions.python.ops.distribution_util.tridiag", "tensorflow.contrib.distributions.python.ops.distribution_util.pad", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.ones", "numpy.float32", "tensorflow.contrib.distributions.python.ops.distribution_util.make_diag_scale", "numpy.zeros", "tensorflow.python.ops.distributions.categorical.Categorical", "tensorflow.python.ops.linalg.linear_operator_diag.LinearOperatorDiag", "tensorflow.contrib.distributions.python.ops.mvn_diag.MultivariateNormalDiag", "tensorflow.contrib.distributions.python.ops.distribution_util.get_broadcast_shape", "numpy.array", "numpy.int32", "numpy.ones", "tensorflow.contrib.distributions.python.ops.distribution_util.mixture_stddev", "tensorflow.python.ops.random_ops.random_normal", "numpy.prod", "tensorflow.python.ops.array_ops.placeholder_with_default", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.contrib.autograph.pyct.anno.hasanno", "tensorflow.contrib.autograph.pyct.ast_util.rename_symbols", "tensorflow.contrib.autograph.pyct.parser.parse_expression", "tensorflow.contrib.autograph.pyct.templates.replace", "tensorflow.contrib.autograph.pyct.anno.getanno", "tensorflow.contrib.autograph.pyct.templates.replace_as_expression" ], [ "numpy.matrix", "numpy.sqrt", "numpy.asarray", "tensorflow.contrib.metrics.python.ops.metric_ops.auc_with_confidence_intervals", "numpy.cumsum", "tensorflow.python.ops.array_ops.placeholder", "numpy.concatenate", "tensorflow.python.ops.array_ops.zeros", "numpy.random.randn", "numpy.mean", "numpy.random.randint", "numpy.ones_like", "tensorflow.contrib.metrics.python.ops.metric_ops.streaming_sparse_average_precision_at_top_k", "tensorflow.python.framework.ops.get_collection", "numpy.arange", "numpy.reshape", "numpy.size", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.variables.local_variables", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.python.ops.math_ops.cast", "numpy.isnan", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.math_ops.reduce_mean", "numpy.cov", "tensorflow.python.framework.ops.convert_to_tensor", "numpy.corrcoef", "numpy.argsort", "numpy.array", "tensorflow.python.ops.data_flow_ops.FIFOQueue", "numpy.sum", "tensorflow.python.ops.variables.variables_initializer", "tensorflow.python.ops.math_ops.range", "numpy.absolute", "numpy.random.seed", "tensorflow.contrib.metrics.python.ops.metric_ops.streaming_curve_points", "numpy.random.exponential", "tensorflow.python.framework.ops.Graph", "numpy.random.shuffle", "numpy.ones", "tensorflow.python.ops.array_ops.reshape", "numpy.random.normal", "tensorflow.contrib.metrics.python.ops.metric_ops.streaming_concat", "tensorflow.python.ops.random_ops.random_normal", "tensorflow.contrib.metrics.python.ops.metric_ops.precision_recall_at_equal_thresholds", "numpy.random.uniform", "tensorflow.python.ops.variables.local_variables_initializer", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.10", "1.12" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.7", "1.10", "1.12" ] } ]
okojoalg/Creative-Adversarial-Networks
[ "7f06f395b9f317f9235dc8c60c7b385cd6530471", "7f06f395b9f317f9235dc8c60c7b385cd6530471", "7f06f395b9f317f9235dc8c60c7b385cd6530471" ]
[ "slim/nets/inception_resnet_v2_test.py", "slim/nets/dcgan.py", "slim/nets/nets_factory_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for slim.inception_resnet_v2.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom nets import inception\n\n\nclass InceptionTest(tf.test.TestCase):\n\n def testBuildLogits(self):\n batch_size = 5\n height, width = 299, 299\n num_classes = 1000\n with self.test_session():\n inputs = tf.random.uniform((batch_size, height, width, 3))\n logits, endpoints = inception.inception_resnet_v2(inputs, num_classes)\n self.assertTrue('AuxLogits' in endpoints)\n auxlogits = endpoints['AuxLogits']\n self.assertTrue(\n auxlogits.op.name.startswith('InceptionResnetV2/AuxLogits'))\n self.assertListEqual(auxlogits.get_shape().as_list(),\n [batch_size, num_classes])\n self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n\n def testBuildWithoutAuxLogits(self):\n batch_size = 5\n height, width = 299, 299\n num_classes = 1000\n with self.test_session():\n inputs = tf.random.uniform((batch_size, height, width, 3))\n logits, endpoints = inception.inception_resnet_v2(inputs, num_classes,\n create_aux_logits=False)\n self.assertTrue('AuxLogits' not in endpoints)\n self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n\n def testBuildNoClasses(self):\n batch_size = 5\n height, width = 299, 299\n num_classes = None\n with self.test_session():\n inputs = tf.random.uniform((batch_size, height, width, 3))\n net, endpoints = inception.inception_resnet_v2(inputs, num_classes)\n self.assertTrue('AuxLogits' not in endpoints)\n self.assertTrue('Logits' not in endpoints)\n self.assertTrue(\n net.op.name.startswith('InceptionResnetV2/Logits/AvgPool'))\n self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1536])\n\n def testBuildEndPoints(self):\n batch_size = 5\n height, width = 299, 299\n num_classes = 1000\n with self.test_session():\n inputs = tf.random.uniform((batch_size, height, width, 3))\n _, end_points = inception.inception_resnet_v2(inputs, num_classes)\n self.assertTrue('Logits' in end_points)\n logits = end_points['Logits']\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n self.assertTrue('AuxLogits' in end_points)\n aux_logits = end_points['AuxLogits']\n self.assertListEqual(aux_logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Conv2d_7b_1x1']\n self.assertListEqual(pre_pool.get_shape().as_list(),\n [batch_size, 8, 8, 1536])\n\n def testBuildBaseNetwork(self):\n batch_size = 5\n height, width = 299, 299\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n net, end_points = inception.inception_resnet_v2_base(inputs)\n self.assertTrue(net.op.name.startswith('InceptionResnetV2/Conv2d_7b_1x1'))\n self.assertListEqual(net.get_shape().as_list(),\n [batch_size, 8, 8, 1536])\n expected_endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',\n 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',\n 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_6a',\n 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']\n self.assertItemsEqual(end_points.keys(), expected_endpoints)\n\n def testBuildOnlyUptoFinalEndpoint(self):\n batch_size = 5\n height, width = 299, 299\n endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',\n 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',\n 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_6a',\n 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']\n for index, endpoint in enumerate(endpoints):\n with tf.Graph().as_default():\n inputs = tf.random.uniform((batch_size, height, width, 3))\n out_tensor, end_points = inception.inception_resnet_v2_base(\n inputs, final_endpoint=endpoint)\n if endpoint != 'PreAuxLogits':\n self.assertTrue(out_tensor.op.name.startswith(\n 'InceptionResnetV2/' + endpoint))\n self.assertItemsEqual(endpoints[:index+1], end_points)\n\n def testBuildAndCheckAllEndPointsUptoPreAuxLogits(self):\n batch_size = 5\n height, width = 299, 299\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n _, end_points = inception.inception_resnet_v2_base(\n inputs, final_endpoint='PreAuxLogits')\n endpoints_shapes = {'Conv2d_1a_3x3': [5, 149, 149, 32],\n 'Conv2d_2a_3x3': [5, 147, 147, 32],\n 'Conv2d_2b_3x3': [5, 147, 147, 64],\n 'MaxPool_3a_3x3': [5, 73, 73, 64],\n 'Conv2d_3b_1x1': [5, 73, 73, 80],\n 'Conv2d_4a_3x3': [5, 71, 71, 192],\n 'MaxPool_5a_3x3': [5, 35, 35, 192],\n 'Mixed_5b': [5, 35, 35, 320],\n 'Mixed_6a': [5, 17, 17, 1088],\n 'PreAuxLogits': [5, 17, 17, 1088]\n }\n\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name in endpoints_shapes:\n expected_shape = endpoints_shapes[endpoint_name]\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testBuildAndCheckAllEndPointsUptoPreAuxLogitsWithAlignedFeatureMaps(self):\n batch_size = 5\n height, width = 299, 299\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n _, end_points = inception.inception_resnet_v2_base(\n inputs, final_endpoint='PreAuxLogits', align_feature_maps=True)\n endpoints_shapes = {'Conv2d_1a_3x3': [5, 150, 150, 32],\n 'Conv2d_2a_3x3': [5, 150, 150, 32],\n 'Conv2d_2b_3x3': [5, 150, 150, 64],\n 'MaxPool_3a_3x3': [5, 75, 75, 64],\n 'Conv2d_3b_1x1': [5, 75, 75, 80],\n 'Conv2d_4a_3x3': [5, 75, 75, 192],\n 'MaxPool_5a_3x3': [5, 38, 38, 192],\n 'Mixed_5b': [5, 38, 38, 320],\n 'Mixed_6a': [5, 19, 19, 1088],\n 'PreAuxLogits': [5, 19, 19, 1088]\n }\n\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name in endpoints_shapes:\n expected_shape = endpoints_shapes[endpoint_name]\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testBuildAndCheckAllEndPointsUptoPreAuxLogitsWithOutputStrideEight(self):\n batch_size = 5\n height, width = 299, 299\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n _, end_points = inception.inception_resnet_v2_base(\n inputs, final_endpoint='PreAuxLogits', output_stride=8)\n endpoints_shapes = {'Conv2d_1a_3x3': [5, 149, 149, 32],\n 'Conv2d_2a_3x3': [5, 147, 147, 32],\n 'Conv2d_2b_3x3': [5, 147, 147, 64],\n 'MaxPool_3a_3x3': [5, 73, 73, 64],\n 'Conv2d_3b_1x1': [5, 73, 73, 80],\n 'Conv2d_4a_3x3': [5, 71, 71, 192],\n 'MaxPool_5a_3x3': [5, 35, 35, 192],\n 'Mixed_5b': [5, 35, 35, 320],\n 'Mixed_6a': [5, 33, 33, 1088],\n 'PreAuxLogits': [5, 33, 33, 1088]\n }\n\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name in endpoints_shapes:\n expected_shape = endpoints_shapes[endpoint_name]\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testVariablesSetDevice(self):\n batch_size = 5\n height, width = 299, 299\n num_classes = 1000\n with self.test_session():\n inputs = tf.random.uniform((batch_size, height, width, 3))\n # Force all Variables to reside on the device.\n with tf.compat.v1.variable_scope('on_cpu'), tf.device('/cpu:0'):\n inception.inception_resnet_v2(inputs, num_classes)\n with tf.compat.v1.variable_scope('on_gpu'), tf.device('/gpu:0'):\n inception.inception_resnet_v2(inputs, num_classes)\n for v in tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):\n self.assertDeviceEqual(v.device, '/cpu:0')\n for v in tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):\n self.assertDeviceEqual(v.device, '/gpu:0')\n\n def testHalfSizeImages(self):\n batch_size = 5\n height, width = 150, 150\n num_classes = 1000\n with self.test_session():\n inputs = tf.random.uniform((batch_size, height, width, 3))\n logits, end_points = inception.inception_resnet_v2(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Conv2d_7b_1x1']\n self.assertListEqual(pre_pool.get_shape().as_list(),\n [batch_size, 3, 3, 1536])\n\n def testGlobalPool(self):\n batch_size = 2\n height, width = 400, 600\n num_classes = 1000\n with self.test_session():\n inputs = tf.random.uniform((batch_size, height, width, 3))\n logits, end_points = inception.inception_resnet_v2(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Conv2d_7b_1x1']\n self.assertListEqual(pre_pool.get_shape().as_list(),\n [batch_size, 11, 17, 1536])\n\n def testGlobalPoolUnknownImageShape(self):\n batch_size = 2\n height, width = 400, 600\n num_classes = 1000\n with self.test_session() as sess:\n inputs = tf.compat.v1.placeholder(tf.float32, (batch_size, None, None, 3))\n logits, end_points = inception.inception_resnet_v2(\n inputs, num_classes, create_aux_logits=False)\n self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Conv2d_7b_1x1']\n images = tf.random.uniform((batch_size, height, width, 3))\n sess.run(tf.compat.v1.global_variables_initializer())\n logits_out, pre_pool_out = sess.run([logits, pre_pool],\n {inputs: images.eval()})\n self.assertTupleEqual(logits_out.shape, (batch_size, num_classes))\n self.assertTupleEqual(pre_pool_out.shape, (batch_size, 11, 17, 1536))\n\n def testUnknownBatchSize(self):\n batch_size = 1\n height, width = 299, 299\n num_classes = 1000\n with self.test_session() as sess:\n inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))\n logits, _ = inception.inception_resnet_v2(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [None, num_classes])\n images = tf.random.uniform((batch_size, height, width, 3))\n sess.run(tf.compat.v1.global_variables_initializer())\n output = sess.run(logits, {inputs: images.eval()})\n self.assertEquals(output.shape, (batch_size, num_classes))\n\n def testEvaluation(self):\n batch_size = 2\n height, width = 299, 299\n num_classes = 1000\n with self.test_session() as sess:\n eval_inputs = tf.random.uniform((batch_size, height, width, 3))\n logits, _ = inception.inception_resnet_v2(eval_inputs,\n num_classes,\n is_training=False)\n predictions = tf.argmax(input=logits, axis=1)\n sess.run(tf.compat.v1.global_variables_initializer())\n output = sess.run(predictions)\n self.assertEquals(output.shape, (batch_size,))\n\n def testTrainEvalWithReuse(self):\n train_batch_size = 5\n eval_batch_size = 2\n height, width = 150, 150\n num_classes = 1000\n with self.test_session() as sess:\n train_inputs = tf.random.uniform((train_batch_size, height, width, 3))\n inception.inception_resnet_v2(train_inputs, num_classes)\n eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))\n logits, _ = inception.inception_resnet_v2(eval_inputs,\n num_classes,\n is_training=False,\n reuse=True)\n predictions = tf.argmax(input=logits, axis=1)\n sess.run(tf.compat.v1.global_variables_initializer())\n output = sess.run(predictions)\n self.assertEquals(output.shape, (eval_batch_size,))\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"DCGAN generator and discriminator from https://arxiv.org/abs/1511.06434.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom math import log\n\nimport tensorflow as tf\nimport tf_slim as slim\n\n\ndef _validate_image_inputs(inputs):\n inputs.get_shape().assert_has_rank(4)\n inputs.get_shape()[1:3].assert_is_fully_defined()\n if inputs.get_shape()[1] != inputs.get_shape()[2]:\n raise ValueError('Input tensor does not have equal width and height: ',\n inputs.get_shape()[1:3])\n width = inputs.get_shape().as_list()[1]\n if log(width, 2) != int(log(width, 2)):\n raise ValueError('Input tensor `width` is not a power of 2: ', width)\n\n\n# TODO(joelshor): Use fused batch norm by default. Investigate why some GAN\n# setups need the gradient of gradient FusedBatchNormGrad.\ndef discriminator(inputs,\n depth=64,\n is_training=True,\n reuse=None,\n scope='Discriminator',\n fused_batch_norm=False):\n \"\"\"Discriminator network for DCGAN.\n\n Construct discriminator network from inputs to the final endpoint.\n\n Args:\n inputs: A tensor of size [batch_size, height, width, channels]. Must be\n floating point.\n depth: Number of channels in first convolution layer.\n is_training: Whether the network is for training or not.\n reuse: Whether or not the network variables should be reused. `scope`\n must be given to be reused.\n scope: Optional variable_scope.\n fused_batch_norm: If `True`, use a faster, fused implementation of\n batch norm.\n\n Returns:\n logits: The pre-softmax activations, a tensor of size [batch_size, 1]\n end_points: a dictionary from components of the network to their activation.\n\n Raises:\n ValueError: If the input image shape is not 4-dimensional, if the spatial\n dimensions aren't defined at graph construction time, if the spatial\n dimensions aren't square, or if the spatial dimensions aren't a power of\n two.\n \"\"\"\n\n normalizer_fn = slim.batch_norm\n normalizer_fn_args = {\n 'is_training': is_training,\n 'zero_debias_moving_mean': True,\n 'fused': fused_batch_norm,\n }\n\n _validate_image_inputs(inputs)\n inp_shape = inputs.get_shape().as_list()[1]\n\n end_points = {}\n with tf.compat.v1.variable_scope(scope, values=[inputs], reuse=reuse) as scope:\n with slim.arg_scope([normalizer_fn], **normalizer_fn_args):\n with slim.arg_scope([slim.conv2d],\n stride=2,\n kernel_size=4,\n activation_fn=tf.nn.leaky_relu):\n net = inputs\n for i in xrange(int(log(inp_shape, 2))):\n scope = 'conv%i' % (i + 1)\n current_depth = depth * 2**i\n normalizer_fn_ = None if i == 0 else normalizer_fn\n net = slim.conv2d(\n net, current_depth, normalizer_fn=normalizer_fn_, scope=scope)\n end_points[scope] = net\n\n logits = slim.conv2d(net, 1, kernel_size=1, stride=1, padding='VALID',\n normalizer_fn=None, activation_fn=None)\n logits = tf.reshape(logits, [-1, 1])\n end_points['logits'] = logits\n\n return logits, end_points\n\n\n# TODO(joelshor): Use fused batch norm by default. Investigate why some GAN\n# setups need the gradient of gradient FusedBatchNormGrad.\ndef generator(inputs,\n depth=64,\n final_size=32,\n num_outputs=3,\n is_training=True,\n reuse=None,\n scope='Generator',\n fused_batch_norm=False):\n \"\"\"Generator network for DCGAN.\n\n Construct generator network from inputs to the final endpoint.\n\n Args:\n inputs: A tensor with any size N. [batch_size, N]\n depth: Number of channels in last deconvolution layer.\n final_size: The shape of the final output.\n num_outputs: Number of output features. For images, this is the number of\n channels.\n is_training: whether is training or not.\n reuse: Whether or not the network has its variables should be reused. scope\n must be given to be reused.\n scope: Optional variable_scope.\n fused_batch_norm: If `True`, use a faster, fused implementation of\n batch norm.\n\n Returns:\n logits: the pre-softmax activations, a tensor of size\n [batch_size, 32, 32, channels]\n end_points: a dictionary from components of the network to their activation.\n\n Raises:\n ValueError: If `inputs` is not 2-dimensional.\n ValueError: If `final_size` isn't a power of 2 or is less than 8.\n \"\"\"\n normalizer_fn = slim.batch_norm\n normalizer_fn_args = {\n 'is_training': is_training,\n 'zero_debias_moving_mean': True,\n 'fused': fused_batch_norm,\n }\n\n inputs.get_shape().assert_has_rank(2)\n if log(final_size, 2) != int(log(final_size, 2)):\n raise ValueError('`final_size` (%i) must be a power of 2.' % final_size)\n if final_size < 8:\n raise ValueError('`final_size` (%i) must be greater than 8.' % final_size)\n\n end_points = {}\n num_layers = int(log(final_size, 2)) - 1\n with tf.compat.v1.variable_scope(scope, values=[inputs], reuse=reuse) as scope:\n with slim.arg_scope([normalizer_fn], **normalizer_fn_args):\n with slim.arg_scope([slim.conv2d_transpose],\n normalizer_fn=normalizer_fn,\n stride=2,\n kernel_size=4):\n net = tf.expand_dims(tf.expand_dims(inputs, 1), 1)\n\n # First upscaling is different because it takes the input vector.\n current_depth = depth * 2 ** (num_layers - 1)\n scope = 'deconv1'\n net = slim.conv2d_transpose(\n net, current_depth, stride=1, padding='VALID', scope=scope)\n end_points[scope] = net\n\n for i in xrange(2, num_layers):\n scope = 'deconv%i' % (i)\n current_depth = depth * 2 ** (num_layers - i)\n net = slim.conv2d_transpose(net, current_depth, scope=scope)\n end_points[scope] = net\n\n # Last layer has different normalizer and activation.\n scope = 'deconv%i' % (num_layers)\n net = slim.conv2d_transpose(\n net, depth, normalizer_fn=None, activation_fn=None, scope=scope)\n end_points[scope] = net\n\n # Convert to proper channels.\n scope = 'logits'\n logits = slim.conv2d(\n net,\n num_outputs,\n normalizer_fn=None,\n activation_fn=None,\n kernel_size=1,\n stride=1,\n padding='VALID',\n scope=scope)\n end_points[scope] = logits\n\n logits.get_shape().assert_has_rank(4)\n logits.get_shape().assert_is_compatible_with(\n [None, final_size, final_size, num_outputs])\n\n return logits, end_points\n", "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for slim.inception.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport tensorflow as tf\n\nfrom nets import nets_factory\n\n\nclass NetworksTest(tf.test.TestCase):\n\n def testGetNetworkFnFirstHalf(self):\n batch_size = 5\n num_classes = 1000\n for net in nets_factory.networks_map.keys()[:10]:\n with tf.Graph().as_default() as g, self.test_session(g):\n net_fn = nets_factory.get_network_fn(net, num_classes)\n # Most networks use 224 as their default_image_size\n image_size = getattr(net_fn, 'default_image_size', 224)\n inputs = tf.random.uniform((batch_size, image_size, image_size, 3))\n logits, end_points = net_fn(inputs)\n self.assertTrue(isinstance(logits, tf.Tensor))\n self.assertTrue(isinstance(end_points, dict))\n self.assertEqual(logits.get_shape().as_list()[0], batch_size)\n self.assertEqual(logits.get_shape().as_list()[-1], num_classes)\n\n def testGetNetworkFnSecondHalf(self):\n batch_size = 5\n num_classes = 1000\n for net in nets_factory.networks_map.keys()[10:]:\n with tf.Graph().as_default() as g, self.test_session(g):\n net_fn = nets_factory.get_network_fn(net, num_classes)\n # Most networks use 224 as their default_image_size\n image_size = getattr(net_fn, 'default_image_size', 224)\n inputs = tf.random.uniform((batch_size, image_size, image_size, 3))\n logits, end_points = net_fn(inputs)\n self.assertTrue(isinstance(logits, tf.Tensor))\n self.assertTrue(isinstance(end_points, dict))\n self.assertEqual(logits.get_shape().as_list()[0], batch_size)\n self.assertEqual(logits.get_shape().as_list()[-1], num_classes)\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.device", "tensorflow.Graph", "tensorflow.random.uniform", "tensorflow.test.main", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.get_collection", "tensorflow.compat.v1.placeholder", "tensorflow.argmax", "tensorflow.compat.v1.variable_scope" ], [ "tensorflow.reshape", "tensorflow.compat.v1.variable_scope", "tensorflow.expand_dims" ], [ "tensorflow.Graph", "tensorflow.random.uniform", "tensorflow.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kashif/pyro
[ "b65b329d8b851c7402acaef9c176a8964caadaf3", "b65b329d8b851c7402acaef9c176a8964caadaf3" ]
[ "pyro/distributions/spanning_tree.py", "pyro/distributions/transforms/ordered.py" ]
[ "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport itertools\nimport math\n\nimport torch\nfrom torch.distributions import constraints\nfrom torch.distributions.utils import lazy_property\n\nfrom pyro.distributions.torch_distribution import TorchDistribution\n\n\nclass SpanningTree(TorchDistribution):\n \"\"\"\n Distribution over spanning trees on a fixed number ``V`` of vertices.\n\n A tree is represented as :class:`torch.LongTensor` ``edges`` of shape\n ``(V-1,2)`` satisfying the following properties:\n\n 1. The edges constitute a tree, i.e. are connected and cycle free.\n 2. Each edge ``(v1,v2) = edges[e]`` is sorted, i.e. ``v1 < v2``.\n 3. The entire tensor is sorted in colexicographic order.\n\n Use :func:`validate_edges` to verify `edges` are correctly formed.\n\n The ``edge_logits`` tensor has one entry for each of the ``V*(V-1)//2``\n edges in the complete graph on ``V`` vertices, where edges are each sorted\n and the edge order is colexicographic::\n\n (0,1), (0,2), (1,2), (0,3), (1,3), (2,3), (0,4), (1,4), (2,4), ...\n\n This ordering corresponds to the size-independent pairing function::\n\n k = v1 + v2 * (v2 - 1) // 2\n\n where ``k`` is the rank of the edge ``(v1,v2)`` in the complete graph.\n To convert a matrix of edge logits to the linear representation used here::\n\n assert my_matrix.shape == (V, V)\n i, j = make_complete_graph(V)\n edge_logits = my_matrix[i, j]\n\n :param torch.Tensor edge_logits: A tensor of length ``V*(V-1)//2``\n containing logits (aka negative energies) of all edges in the complete\n graph on ``V`` vertices. See above comment for edge ordering.\n :param dict sampler_options: An optional dict of sampler options including:\n ``mcmc_steps`` defaulting to a single MCMC step (which is pretty good);\n ``initial_edges`` defaulting to a cheap approximate sample;\n ``backend`` one of \"python\" or \"cpp\", defaulting to \"python\".\n \"\"\"\n arg_constraints = {'edge_logits': constraints.real}\n support = constraints.nonnegative_integer\n has_enumerate_support = True\n\n def __init__(self, edge_logits, sampler_options=None, validate_args=None):\n if edge_logits.is_cuda:\n raise NotImplementedError(\"SpanningTree does not support cuda tensors\")\n K = len(edge_logits)\n V = int(round(0.5 + (0.25 + 2 * K)**0.5))\n assert K == V * (V - 1) // 2\n E = V - 1\n event_shape = (E, 2)\n batch_shape = ()\n self.edge_logits = edge_logits\n super().__init__(batch_shape, event_shape, validate_args=validate_args)\n if self._validate_args:\n if edge_logits.shape != (K,):\n raise ValueError(\"Expected edge_logits of shape ({},), but got shape {}\"\n .format(K, edge_logits.shape))\n self.num_vertices = V\n self.sampler_options = {} if sampler_options is None else sampler_options\n\n def validate_edges(self, edges):\n \"\"\"\n Validates a batch of ``edges`` tensors, as returned by :meth:`sample` or\n :meth:`enumerate_support` or as input to :meth:`log_prob()`.\n\n :param torch.LongTensor edges: A batch of edges.\n :raises: ValueError\n :returns: None\n \"\"\"\n if edges.shape[-2:] != self.event_shape:\n raise ValueError(\"Invalid edges shape: {}\".format(edges.shape))\n\n # Verify canonical ordering.\n if not ((0 <= edges) & (edges < self.num_vertices)).all():\n raise ValueError(\"Invalid vertex ids:\\n{}\".format(edges))\n if not (edges[..., 0] < edges[..., 1]).all():\n raise ValueError(\"Vertices are not sorted in each edge:\\n{}\".format(edges))\n if not ((edges[..., :-1, 1] < edges[..., 1:, 1]) |\n ((edges[..., :-1, 1] == edges[..., 1:, 1]) &\n (edges[..., :-1, 0] < edges[..., 1:, 0]))).all():\n raise ValueError(\"Edges are not sorted colexicographically:\\n{}\".format(edges))\n\n # Verify tree property, i.e. connectivity.\n V = self.num_vertices\n for i in itertools.product(*map(range, edges.shape[:-2])):\n edges_i = edges[i]\n connected = torch.eye(V, dtype=torch.float)\n connected[edges_i[:, 0], edges_i[:, 1]] = 1\n connected[edges_i[:, 1], edges_i[:, 0]] = 1\n for i in range(int(math.ceil(V ** 0.5))):\n connected = connected.mm(connected).clamp_(max=1)\n if not connected.min() > 0:\n raise ValueError(\"Edges do not constitute a tree:\\n{}\".format(edges_i))\n\n @lazy_property\n def log_partition_function(self):\n # By Kirchoff's matrix-tree theorem, the partition function is the\n # determinant of a truncated version of the graph Laplacian matrix. We\n # use a Cholesky decomposition to compute the log determinant.\n # See https://en.wikipedia.org/wiki/Kirchhoff%27s_theorem\n V = self.num_vertices\n v1, v2 = make_complete_graph(V).unbind(0)\n logits = self.edge_logits.new_full((V, V), -math.inf)\n logits[v1, v2] = self.edge_logits\n logits[v2, v1] = self.edge_logits\n log_diag = logits.logsumexp(-1)\n # Numerically stabilize so that laplacian has 1's on the diagonal.\n shift = 0.5 * log_diag\n laplacian = torch.eye(V) - (logits - shift - shift[:, None]).exp()\n truncated = laplacian[:-1, :-1]\n try:\n import gpytorch\n log_det = gpytorch.lazy.NonLazyTensor(truncated).logdet()\n except ImportError:\n log_det = torch.cholesky(truncated).diag().log().sum() * 2\n return log_det + log_diag[:-1].sum()\n\n def log_prob(self, edges):\n if self._validate_args:\n self.validate_edges(edges)\n v1 = edges[..., 0]\n v2 = edges[..., 1]\n k = v1 + v2 * (v2 - 1) // 2\n return self.edge_logits[k].sum(-1) - self.log_partition_function\n\n def sample(self, sample_shape=torch.Size()):\n \"\"\"\n This sampler is implemented using MCMC run for a small number of steps\n after being initialized by a cheap approximate sampler. This sampler is\n approximate and cubic time. This is faster than the classic\n Aldous-Broder sampler [1,2], especially for graphs with large mixing\n time. Recent research [3,4] proposes samplers that run in\n sub-matrix-multiply time but are more complex to implement.\n\n **References**\n\n [1] `Generating random spanning trees`\n Andrei Broder (1989)\n [2] `The Random Walk Construction of Uniform Spanning Trees and Uniform Labelled Trees`,\n David J. Aldous (1990)\n [3] `Sampling Random Spanning Trees Faster than Matrix Multiplication`,\n David Durfee, Rasmus Kyng, John Peebles, Anup B. Rao, Sushant Sachdeva\n (2017) https://arxiv.org/abs/1611.07451\n [4] `An almost-linear time algorithm for uniform random spanning tree generation`,\n Aaron Schild (2017) https://arxiv.org/abs/1711.06455\n \"\"\"\n if sample_shape:\n raise NotImplementedError(\"SpanningTree does not support batching\")\n edges = sample_tree(self.edge_logits, **self.sampler_options)\n assert edges.dim() >= 2 and edges.shape[-2:] == self.event_shape\n return edges\n\n def enumerate_support(self, expand=True):\n \"\"\"\n This is implemented for trees with up to 6 vertices (and 5 edges).\n \"\"\"\n trees = enumerate_spanning_trees(self.num_vertices)\n return torch.tensor(trees, dtype=torch.long)\n\n\n################################################################################\n# Sampler implementation.\n################################################################################\n\n_cpp_module = None\n\n\ndef _get_cpp_module():\n \"\"\"\n JIT compiles the cpp_spanning_tree module.\n \"\"\"\n global _cpp_module\n if _cpp_module is None:\n import os\n from torch.utils.cpp_extension import load\n path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"spanning_tree.cpp\")\n _cpp_module = load(name=\"cpp_spanning_tree\",\n sources=[path],\n extra_cflags=['-O2'],\n verbose=True)\n return _cpp_module\n\n\ndef make_complete_graph(num_vertices, backend=\"python\"):\n \"\"\"\n Constructs a complete graph.\n\n The pairing function is: ``k = v1 + v2 * (v2 - 1) // 2``\n\n :param int num_vertices: Number of vertices.\n :returns: a 2 x K grid of (vertex, vertex) pairs.\n \"\"\"\n if backend == \"python\":\n return _make_complete_graph(num_vertices)\n elif backend == \"cpp\":\n return _get_cpp_module().make_complete_graph(num_vertices)\n else:\n raise ValueError(\"unknown backend: {}\".format(repr(backend)))\n\n\ndef _make_complete_graph(num_vertices):\n if num_vertices < 2:\n raise ValueError('PyTorch cannot handle zero-sized multidimensional tensors')\n V = num_vertices\n K = V * (V - 1) // 2\n v1 = torch.arange(V)\n v2 = torch.arange(V).unsqueeze(-1)\n v1, v2 = torch.broadcast_tensors(v1, v2)\n v1 = v1.contiguous().view(-1)\n v2 = v2.contiguous().view(-1)\n mask = (v1 < v2)\n grid = torch.stack((v1[mask], v2[mask]))\n assert grid.shape == (2, K)\n return grid\n\n\ndef _remove_edge(grid, edge_ids, neighbors, components, e):\n \"\"\"\n Remove an edge from a spanning tree.\n \"\"\"\n k = edge_ids[e]\n v1 = grid[0, k].item()\n v2 = grid[1, k].item()\n neighbors[v1].remove(v2)\n neighbors[v2].remove(v1)\n components[v1] = 1\n pending = [v1]\n while pending:\n v1 = pending.pop()\n for v2 in neighbors[v1]:\n if not components[v2]:\n components[v2] = 1\n pending.append(v2)\n return k\n\n\ndef _add_edge(grid, edge_ids, neighbors, components, e, k):\n \"\"\"\n Add an edge connecting two components to create a spanning tree.\n \"\"\"\n edge_ids[e] = k\n v1 = grid[0, k].item()\n v2 = grid[1, k].item()\n neighbors[v1].add(v2)\n neighbors[v2].add(v1)\n components.fill_(0)\n\n\ndef _find_valid_edges(components, valid_edge_ids):\n \"\"\"\n Find all edges between two components in a complete undirected graph.\n\n :param components: A [V]-shaped array of boolean component ids. This\n assumes there are exactly two nonemtpy components.\n :param valid_edge_ids: An uninitialized array where output is written. On\n return, the subarray valid_edge_ids[:end] will contain edge ids k for all\n valid edges.\n :returns: The number of valid edges found.\n \"\"\"\n k = 0\n end = 0\n for v2, c2 in enumerate(components):\n for v1 in range(v2):\n if c2 ^ components[v1]:\n valid_edge_ids[end] = k\n end += 1\n k += 1\n return end\n\n\[email protected]_grad()\ndef _sample_tree_mcmc(edge_logits, edges):\n if len(edges) <= 1:\n return edges\n\n E = len(edges)\n V = E + 1\n K = V * (V - 1) // 2\n grid = make_complete_graph(V)\n\n # Each of E edges in the tree is stored as an id k in [0, K) indexing into\n # the complete graph. The id of an edge (v1,v2) is k = v1+v2*(v2-1)/2.\n edge_ids = torch.empty(E, dtype=torch.long)\n # This maps each vertex to the set of its neighboring vertices.\n neighbors = {v: set() for v in range(V)}\n # This maps each vertex to its connected component id (0 or 1).\n components = torch.zeros(V, dtype=torch.bool)\n for e in range(E):\n v1, v2 = map(int, edges[e])\n assert v1 < v2\n edge_ids[e] = v1 + v2 * (v2 - 1) // 2\n neighbors[v1].add(v2)\n neighbors[v2].add(v1)\n # This stores ids of edges that are valid candidates for Gibbs moves.\n valid_edges_buffer = torch.empty(K, dtype=torch.long)\n\n # Cycle through all edges in a random order.\n for e in torch.randperm(E):\n e = int(e)\n\n # Perform a single-site Gibbs update by moving this edge elsewhere.\n k = _remove_edge(grid, edge_ids, neighbors, components, e)\n num_valid_edges = _find_valid_edges(components, valid_edges_buffer)\n valid_edge_ids = valid_edges_buffer[:num_valid_edges]\n valid_logits = edge_logits[valid_edge_ids]\n valid_probs = (valid_logits - valid_logits.max()).exp()\n total_prob = valid_probs.sum()\n if total_prob > 0:\n sample = torch.multinomial(valid_probs, 1)[0]\n k = valid_edge_ids[sample]\n _add_edge(grid, edge_ids, neighbors, components, e, k)\n\n # Convert edge ids to a canonical list of pairs.\n edge_ids = edge_ids.sort()[0]\n edges = torch.empty((E, 2), dtype=torch.long)\n edges[:, 0] = grid[0, edge_ids]\n edges[:, 1] = grid[1, edge_ids]\n return edges\n\n\ndef sample_tree_mcmc(edge_logits, edges, backend=\"python\"):\n \"\"\"\n Sample a random spanning tree of a dense weighted graph using MCMC.\n\n This uses Gibbs sampling on edges. Consider E undirected edges that can\n move around a graph of ``V=1+E`` vertices. The edges are constrained so\n that no two edges can span the same pair of vertices and so that the edges\n must form a spanning tree. To Gibbs sample, chose one of the E edges at\n random and move it anywhere else in the graph. After we remove the edge,\n notice that the graph is split into two connected components. The\n constraints imply that the edge must be replaced so as to connect the two\n components. Hence to Gibbs sample, we collect all such bridging\n (vertex,vertex) pairs and sample from them in proportion to\n ``exp(edge_logits)``.\n\n :param torch.Tensor edge_logits: A length-K array of nonnormalized log\n probabilities.\n :param torch.Tensor edges: An E x 2 tensor of initial edges in the form\n of (vertex,vertex) pairs. Each edge should be sorted and the entire\n tensor should be lexicographically sorted.\n :returns: An E x 2 tensor of edges in the form of (vertex,vertex) pairs.\n Each edge should be sorted and the entire tensor should be\n lexicographically sorted.\n :rtype: torch.Tensor\n \"\"\"\n if backend == \"python\":\n return _sample_tree_mcmc(edge_logits, edges)\n elif backend == \"cpp\":\n return _get_cpp_module().sample_tree_mcmc(edge_logits, edges)\n else:\n raise ValueError(\"unknown backend: {}\".format(repr(backend)))\n\n\[email protected]_grad()\ndef _sample_tree_approx(edge_logits):\n K = len(edge_logits)\n V = int(round(0.5 + (0.25 + 2 * K)**0.5))\n assert K == V * (V - 1) // 2\n E = V - 1\n grid = make_complete_graph(V)\n\n # Each of E edges in the tree is stored as an id k in [0, K) indexing into\n # the complete graph. The id of an edge (v1,v2) is k = v1+v2*(v2-1)/2.\n edge_ids = torch.empty((E,), dtype=torch.long)\n # This maps each vertex to whether it is a member of the cumulative tree.\n components = torch.zeros(V, dtype=torch.bool)\n\n # Sample the first edge at random.\n probs = (edge_logits - edge_logits.max()).exp()\n k = torch.multinomial(probs, 1)[0]\n components[grid[:, k]] = 1\n edge_ids[0] = k\n\n # Sample edges connecting the cumulative tree to a new leaf.\n for e in range(1, E):\n c1, c2 = components[grid]\n mask = (c1 != c2)\n valid_logits = edge_logits[mask]\n probs = (valid_logits - valid_logits.max()).exp()\n k = mask.nonzero(as_tuple=False)[torch.multinomial(probs, 1)[0]]\n components[grid[:, k]] = 1\n edge_ids[e] = k\n\n # Convert edge ids to a canonical list of pairs.\n edge_ids = edge_ids.sort()[0]\n edges = torch.empty((E, 2), dtype=torch.long)\n edges[:, 0] = grid[0, edge_ids]\n edges[:, 1] = grid[1, edge_ids]\n return edges\n\n\ndef sample_tree_approx(edge_logits, backend=\"python\"):\n \"\"\"\n Approximately sample a random spanning tree of a dense weighted graph.\n\n This is mainly useful for initializing an MCMC sampler.\n\n :param torch.Tensor edge_logits: A length-K array of nonnormalized log\n probabilities.\n :returns: An E x 2 tensor of edges in the form of (vertex,vertex) pairs.\n Each edge should be sorted and the entire tensor should be\n lexicographically sorted.\n :rtype: torch.Tensor\n \"\"\"\n if backend == \"python\":\n return _sample_tree_approx(edge_logits)\n elif backend == \"cpp\":\n return _get_cpp_module().sample_tree_approx(edge_logits)\n else:\n raise ValueError(\"unknown backend: {}\".format(repr(backend)))\n\n\ndef sample_tree(edge_logits, init_edges=None, mcmc_steps=1, backend=\"python\"):\n edges = init_edges\n if edges is None:\n edges = sample_tree_approx(edge_logits, backend=backend)\n for step in range(mcmc_steps):\n edges = sample_tree_mcmc(edge_logits, edges, backend=backend)\n return edges\n\n\n################################################################################\n# Enumeration implementation.\n################################################################################\n\n# See https://oeis.org/A000272\nNUM_SPANNING_TREES = [\n 1, 1, 1, 3, 16, 125, 1296, 16807, 262144, 4782969, 100000000, 2357947691,\n 61917364224, 1792160394037, 56693912375296, 1946195068359375,\n 72057594037927936, 2862423051509815793, 121439531096594251776,\n 5480386857784802185939,\n]\n\n# These topologically distinct sets of trees generate sets of all trees\n# under permutation of vertices. See https://oeis.org/A000055\n_TREE_GENERATORS = [\n [[]],\n [[]],\n [[(0, 1)]],\n [[(0, 1), (0, 2)]],\n [\n [(0, 1), (0, 2), (0, 3)],\n [(0, 1), (1, 2), (2, 3)],\n ],\n [\n [(0, 1), (0, 2), (0, 3), (0, 4)],\n [(0, 1), (0, 2), (0, 3), (1, 4)],\n [(0, 1), (1, 2), (2, 3), (3, 4)],\n ],\n [\n [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)],\n [(0, 1), (0, 2), (0, 3), (0, 4), (1, 5)],\n [(0, 1), (0, 2), (0, 3), (1, 4), (4, 5)],\n [(0, 1), (0, 2), (0, 3), (2, 4), (3, 5)],\n [(0, 1), (0, 2), (0, 3), (3, 4), (3, 5)],\n [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)],\n ],\n]\n\n\ndef _permute_tree(perm, tree):\n edges = [tuple(sorted([perm[u], perm[v]])) for (u, v) in tree]\n edges.sort(key=lambda uv: (uv[1], uv[0]))\n return tuple(edges)\n\n\ndef _close_under_permutations(V, tree_generators):\n vertices = list(range(V))\n trees = []\n for tree in tree_generators:\n trees.extend(set(_permute_tree(perm, tree)\n for perm in itertools.permutations(vertices)))\n trees.sort()\n return trees\n\n\ndef enumerate_spanning_trees(V):\n \"\"\"\n Compute the set of spanning trees on V vertices.\n \"\"\"\n if V >= len(_TREE_GENERATORS):\n raise NotImplementedError(\n \"enumerate_spanning_trees() is implemented only for trees with up to {} vertices\"\n .format(len(_TREE_GENERATORS) - 1))\n all_trees = _close_under_permutations(V, _TREE_GENERATORS[V])\n assert len(all_trees) == NUM_SPANNING_TREES[V]\n return all_trees\n", "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\nfrom pyro.distributions.transforms import Transform\nfrom pyro.distributions import constraints\n\n\nclass OrderedTransform(Transform):\n \"\"\"\n Transforms a real vector into an ordered vector.\n\n Specifically, enforces monotonically increasing order on the last dimension\n of a given tensor via the transformation :math:`y_0 = x_0`,\n :math:`y_i = \\\\sum_{1 \\\\le j \\\\le i} \\\\exp(x_i)`\n \"\"\"\n domain = constraints.real_vector\n codomain = constraints.ordered_vector\n bijective = True\n sign = +1\n event_dim = 1\n\n def _call(self, x):\n z = torch.cat([x[..., :1], x[..., 1:].exp()], dim=-1)\n return torch.cumsum(z, dim=-1)\n\n def _inverse(self, y):\n x = (y[..., 1:] - y[..., :-1]).log()\n return torch.cat([y[..., :1], x], dim=-1)\n\n def log_abs_det_jacobian(self, x, y):\n return torch.sum(x[..., 1:], dim=-1)\n" ]
[ [ "torch.Size", "torch.utils.cpp_extension.load", "torch.empty", "torch.zeros", "torch.randperm", "torch.eye", "torch.multinomial", "torch.tensor", "torch.cholesky", "torch.no_grad", "torch.arange", "torch.broadcast_tensors", "torch.stack" ], [ "torch.cumsum", "torch.sum", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Blarc/lol-dodge-predictor
[ "01ac9ce1f117dba7f2375958f96fd1336cc0049d" ]
[ "prepare_data.py" ]
[ "import copy\nimport json\n\nimport cassiopeia as cass\nimport pandas as pd\nfrom IPython.display import clear_output\nfrom roleidentification import get_roles, pull_data\n\nNUMBER_OF_LINES = 108941\nchampion_roles = pull_data()\nchampions_mapper = {champion.id: champion.name for champion in cass.get_champions(\"EUW\")}\n\nsummoners = {}\nmatches = {}\n\nsummoners_columns_mapper = {\n 'total_games': 0,\n 'wins': 1\n}\n\nrole_index_mapper = {\n 'TOP': 0,\n 'JUNGLE': 1,\n 'MIDDLE': 2,\n 'BOTTOM': 3,\n 'UTILITY': 4\n}\n\ncolumns_by_role = ['kills', 'deaths', 'assists', 'gold_earned', 'total_damage_dealt_to_champions',\n 'total_minions_killed', 'vision_score', 'vision_wards_bought', 'total_games', 'wins']\n\nindex = len(summoners_columns_mapper)\nfor role_name in role_index_mapper.keys():\n for column in columns_by_role:\n column_key = role_name + '_' + column\n summoners_columns_mapper[column_key] = index\n index += 1\n\ncolumns_mapper = {}\n\nindex = 0\nmatches_index = 0\n\nwith open('data/raw_data/match_all_merged_sorted.csv', encoding='utf8') as infile:\n for line in infile:\n split = line.rstrip('\\n').split(';')\n if index == 0:\n columns_mapper = {key: value for value, key in enumerate(split)}\n index += 1\n continue\n\n queue_id = float(split[columns_mapper['queueId']])\n if queue_id != 420:\n index += 1\n continue\n\n game_duration = float(split[columns_mapper['gameDuration']])\n\n participant_identities = json.loads(split[columns_mapper['participantIdentities']] \\\n .replace('\\'', '\\\"'))\n\n participants = json.loads(split[columns_mapper['participants']] \\\n .replace('\\'', '\\\"') \\\n .replace('False', '0') \\\n .replace('True', '1'))\n\n champions = []\n for participant in participants:\n champions.append(participant['championId'])\n\n roles = list(get_roles(champion_roles, champions[0:5]).items())\n roles += list(get_roles(champion_roles, champions[5:10]).items())\n\n teams = {\n 100: [None] * 5,\n 200: [None] * 5\n }\n\n win_dict = {}\n\n for participantIdentity, participant, role in zip(participant_identities, participants, roles):\n\n summoner_id = participantIdentity['player']['summonerId']\n team_id = participant['teamId']\n\n role_name = role[0]\n role_index = role_index_mapper[role[0]]\n\n participant_stats = participant['stats']\n win = participant_stats['win']\n kills = participant_stats['kills']\n deaths = participant_stats['deaths']\n assists = participant_stats['assists']\n gold_earned = participant_stats['goldEarned']\n total_damage_dealt_to_champions = participant_stats['totalDamageDealtToChampions']\n total_minions_killed = participant_stats['totalMinionsKilled']\n vision_score = participant_stats['visionScore']\n vision_wards_bought = participant_stats['visionWardsBoughtInGame']\n\n if summoner_id not in summoners:\n summoners[summoner_id] = {key: 0 for key in summoners_columns_mapper}\n\n summoners[summoner_id]['wins'] += win\n summoners[summoner_id]['total_games'] += 1\n summoners[summoner_id][role_name + '_wins'] += win\n summoners[summoner_id][role_name + '_total_games'] += 1\n summoners[summoner_id][role_name + '_kills'] += kills / game_duration * 60\n summoners[summoner_id][role_name + '_deaths'] += deaths / game_duration * 60\n summoners[summoner_id][role_name + '_assists'] += assists / game_duration * 60\n summoners[summoner_id][role_name + '_gold_earned'] += gold_earned / game_duration * 60\n summoners[summoner_id][\n role_name + '_total_damage_dealt_to_champions'] += total_damage_dealt_to_champions / game_duration * 60\n summoners[summoner_id][role_name + '_total_minions_killed'] += total_minions_killed / game_duration * 60\n summoners[summoner_id][role_name + '_vision_score'] += vision_score / game_duration * 60\n summoners[summoner_id][role_name + '_vision_wards_bought'] += vision_wards_bought / game_duration * 60\n\n summoner = copy.deepcopy(summoners[summoner_id])\n for role_label in role_index_mapper.keys():\n total_games = summoner[role_label + '_total_games']\n\n if total_games == 0:\n total_games += 1\n\n summoner[role_label + '_wins'] /= total_games\n summoner[role_label + '_kills'] /= total_games\n summoner[role_label + '_deaths'] /= total_games\n summoner[role_label + '_assists'] /= total_games\n summoner[role_label + '_gold_earned'] /= total_games\n summoner[role_label + '_total_damage_dealt_to_champions'] /= total_games\n summoner[role_label + '_total_minions_killed'] /= total_games\n summoner[role_label + '_vision_score'] /= total_games\n summoner[role_label + '_vision_wards_bought'] /= total_games\n\n teams[team_id][role_index] = summoner\n win_dict[team_id] = participant['stats']['win']\n\n for team, win in zip(teams.values(), win_dict.values()):\n match = {}\n for role, player in zip(role_index_mapper.keys(), team):\n for key, value in player.items():\n match[role + '_' + key] = value\n\n match['win'] = win\n matches[matches_index] = match\n matches_index += 1\n\n clear_output(wait=True)\n print(f'{index} / {NUMBER_OF_LINES}')\n index += 1\n\n\n# 156344\nprint(f'Number of matches: {len(matches)}')\n\nprint('Saving to csv...')\npd.DataFrame.from_dict(data=matches, orient='index').to_csv('data/processed_data/matches_sorted.csv', header=True)\nprint('Saved to \\'data/processed_data/matches_sorted.csv\\'')" ]
[ [ "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
MarceloSabris/PesquisaMestrado
[ "e774ff522fc88d725133fcbe256f763b0d616dd0" ]
[ "GeneratorRepresentEncoderImg.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport h5py\nimport numpy as np\nfrom PIL import Image, ImageDraw\nimport os\nimport argparse\n\nfrom util import log\nfrom vqa_util import *\nimport progressbar\nfrom base64 import decode\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n \nimport glob\nimport os\nimport time\n\nimport numpy as np\n\nfrom input_ops import create_input_ops \nimport argparse\nfrom keras import backend as K\nfrom keras.layers import Input, Dense\nfrom keras.models import Model\n\n\nclass Representation:\n\n def __init__(self, x, y, color, shape):\n self.x = x\n self.y = y\n self.color = color\n self.shape = shape\n\n def print_graph(self):\n for i in range(len(self.x)):\n s = 'circle' if self.shape[i] else 'rectangle'\n print('{} {} at ({}, {})'.format(color2str(self.color[i]),\n s, self.x[i], self.y[i]))\n\n\n\n\n\n\n\ndef SSIMLoss(y_true, y_pred):\n return 1 - tf.reduce_mean(tf.image.ssim(y_true, y_pred,1.0))\n\ndef GenerateEnconder(img): \n inputs = tf.keras.Input(img.shape(), name='input_layer')\n autoencoder = tf.keras.Model(inputs, encoded(inputs))\n optimizer = tf.keras.optimizers.Adam(lr = 0.017)\n autoencoder.compile(optimizer=optimizer, loss=SSIMLoss)\n autoencoder.summary()\n return autoencoder\n\ndef loadWeights(modelLoad,neuralNetworkAdress,neuralweightAdress ):\n model1 = tf.keras.models.load_model(neuralNetworkAdress, compile=False) \n model1.compile(loss=SSIMLoss, optimizer='adam', metrics=SSIMLoss)\n model1.summary()\n model1.load_weights(neuralweightAdress)\n for _layer in modelLoad.layers:\n try:\n modelLoad.get_layer(_layer.name).set_weights(model1.get_layer(_layer.name).get_weights())\n print(_layer.name)\n except:\n print(\"erro\")\n print(_layer.name)\n return modelLoad\n\ndef encoded(inputs): \n # Conv Block 1 -> BatchNorm->leaky Relu\n encoded = tf.keras.layers.Conv2D(30, kernel_size=3, strides= 1, padding='same', name='conv_1')(inputs)\n encoded = tf.keras.layers.BatchNormalization(name='batchnorm_1')(encoded)\n encoded = tf.keras.layers.LeakyReLU(name='leaky_relu_1')(encoded)\n\n #tf.nn.relu X tf.keras.layers.LeakyReLU\n\n # Conv Block 2 -> BatchNorm->leaky Relu\n encoded = tf.keras.layers.Conv2D(15, kernel_size=3, strides= 4, padding='same', name='conv_2')(encoded)\n encoded = tf.keras.layers.BatchNormalization(name='batchnorm_2')(encoded)\n encoded = tf.keras.layers.LeakyReLU(name='leaky_relu_2')(encoded)\n # Conv Block 3 -> BatchNorm->leaky Relu\n encoded = tf.keras.layers.Conv2D(8, kernel_size=3, strides=4, padding='same', name='conv_3')(encoded)\n encoded = tf.keras.layers.BatchNormalization(name='batchnorm_3')(encoded)\n encoded = tf.keras.layers.LeakyReLU(name='leaky_relu_3')(encoded)\n\n encoded = tf.keras.layers.Conv2D(4, kernel_size=3, strides=2, padding='same', name='conv_4a')(encoded)\n encoded = tf.keras.layers.BatchNormalization(name='batchnorm_4a')(encoded)\n encoded = tf.keras.layers.LeakyReLU(name='leaky_relu_4a')(encoded)\n\n \n \n \n return encoded \ndef creatModelDecode(imputShape):\n inputs = tf.keras.Input(imputShape, name='input_layer')\n autoencoded = Model(inputs,encoded(inputs))\n autoencoded.compile(loss=SSIMLoss, optimizer='adam', metrics=SSIMLoss)\n autoencoded.summary()\n return loadWeights(autoencoded,\"C:\\Source\\PesquisaMestrado\\decoder1\\model3\",\"C:\\source\\PesquisaMestrado\\decoder1\\weights4\" )\n\ndef GenerateEnconder(img): \n inputs = tf.keras.Input(img.shape(), name='input_layer')\n autoencoder = tf.keras.Model(inputs, encoded(inputs))\n optimizer = tf.keras.optimizers.Adam(lr = 0.017)\n autoencoder.compile(optimizer=optimizer, loss=SSIMLoss)\n autoencoder.summary()\n return autoencoder\n\n\n \n\n\n\n\ndef generator(config):\n img_size = config.img_size\n dataset_size = config.dataset_size\n dir_name = config.dir_name\n\n block_size = int(img_size*0.9/N_GRID)\n shape_size = int((img_size*0.9/N_GRID)*0.7/2)\n\n def generate_sample(img_size):\n #generate a cod image to represent the shapes \n # the data of this array is respresent below \n # 0 e 1 x ou y \n # 2 - tipo \n # 3 4 5 6 7 8\n codImag = np.zeros((2,2,9))\n # Generate I: [img_size, img_size, 3]\n img = Image.new('RGB', (img_size, img_size), color=BG_COLOR)\n drawer = ImageDraw.Draw(img)\n idx_coor = np.arange(N_GRID*N_GRID)\n np.random.shuffle(idx_coor)\n idx_color_shape = np.arange(NUM_COLOR)\n np.random.shuffle(idx_color_shape)\n coin = np.random.rand(NUM_SHAPE)\n X = []\n Y = []\n j = 0\n k = 0\n for i in range(NUM_SHAPE):\n x = idx_coor[i] % N_GRID\n y = (N_GRID - np.floor(idx_coor[i] / N_GRID) - 1).astype(np.uint8)\n\n \n # sqaure terms are added to remove ambiguity of distance\n position = ((x+0.5)*block_size-shape_size+x**2, (y+0.5)*block_size-shape_size+y**2,\n (x+0.5)*block_size+shape_size+x**2, (y+0.5)*block_size+shape_size+y**2)\n \n \n \n #codImag[j][k][3] = COLOR[idx_color_shape[i]][0] \n #codImag[j][k][4] = COLOR[idx_color_shape[i]][1] \n #codImag[j][k][5] = COLOR[idx_color_shape[i]][2] \n\n #codImag[j][k][3] = ColorRepres[idx_color_shape[i]][0] \n #codImag[j][k][4] = ColorRepres[idx_color_shape[i]][1] \n #codImag[j][k][5] = ColorRepres[idx_color_shape[i]][2] \n\n\n codImag[j][k][3] = ColorRepres2[idx_color_shape[i]][0] \n codImag[j][k][4] = ColorRepres2[idx_color_shape[i]][1] \n codImag[j][k][5] = ColorRepres2[idx_color_shape[i]][2] \n codImag[j][k][6] = ColorRepres2[idx_color_shape[i]][3] \n codImag[j][k][7] = ColorRepres2[idx_color_shape[i]][4] \n codImag[j][k][8] = ColorRepres2[idx_color_shape[i]][5] \n\n X.append((x+0.5)*block_size+x**2)\n Y.append((y+0.5)*block_size+y**2)\n if coin[i] < 0.5:\n codImag[j][k][2] = 0\n drawer.ellipse(position, fill=COLOR[idx_color_shape[i]])\n else:\n codImag[j][k][2] = 1\n drawer.rectangle(position, fill=COLOR[idx_color_shape[i]])\n \n codImag[j][k][0] = (x+0.5)*block_size+x**2\n codImag[j][k][1] = (y+0.5)*block_size+y**2\n if k==1:\n j+=1\n k=0\n else:\n k+=1\n # Generate its representation\n color = idx_color_shape[:NUM_SHAPE]\n shape = coin < 0.5\n rep = Representation(np.stack(X).astype(np.int),\n np.stack(Y).astype(np.int), color, shape)\n return np.array(img), rep , codImag\n\n def generate_question(rep):\n # Generate questions: [# of shape * # of Q, # of color + # of Q]\n Q = np.zeros((NUM_SHAPE*NUM_Q, NUM_COLOR+NUM_Q), dtype=np.bool)\n for i in range(NUM_SHAPE):\n v = np.zeros(NUM_COLOR)\n v[rep.color[i]] = True\n Q[i*NUM_Q:(i+1)*NUM_Q, :NUM_COLOR] = np.tile(v, (NUM_Q, 1))\n Q[i*NUM_Q:(i+1)*NUM_Q, NUM_COLOR:] = np.diag(np.ones(NUM_Q))\n return Q\n\n def generate_answer(rep):\n # Generate answers: [# of shape * # of Q, # of color + 4]\n # # of color + 4: [color 1, color 2, ... , circle, rectangle, yes, no]\n A = np.zeros((NUM_SHAPE*NUM_Q, NUM_COLOR+4), dtype=np.bool)\n for i in range(NUM_SHAPE):\n # Q1: circle or rectangle?\n if rep.shape[i]:\n A[i*NUM_Q, NUM_COLOR] = True\n else:\n A[i*NUM_Q, NUM_COLOR+1] = True\n\n # Q2: bottom?\n if rep.y[i] > int(img_size/2):\n A[i*NUM_Q+1, NUM_COLOR+2] = True\n else:\n A[i*NUM_Q+1, NUM_COLOR+3] = True\n\n # Q3: left?\n if rep.x[i] < int(img_size/2):\n A[i*NUM_Q+2, NUM_COLOR+2] = True\n else:\n A[i*NUM_Q+2, NUM_COLOR+3] = True\n\n distance = 1.1*(rep.y - rep.y[i]) ** 2 + (rep.x - rep.x[i]) ** 2\n idx = distance.argsort()\n # Q4: the color of the nearest object\n min_idx = idx[1]\n A[i*NUM_Q+3, rep.color[min_idx]] = True\n # Q5: the color of the farthest object\n max_idx = idx[-1]\n A[i*NUM_Q+4, rep.color[max_idx]] = True\n return A\n\n # output files\n f = h5py.File(os.path.join(dir_name, 'data.hy'), 'w')\n id_file = open(os.path.join(dir_name, 'id.txt'), 'w')\n idTipo_file = open(os.path.join(dir_name, 'idTipo.txt'), 'w')\n\n\n\n # progress bar\n bar = progressbar.ProgressBar(maxval=100,\n widgets=[progressbar.Bar('=', '[', ']'), ' ',\n progressbar.Percentage()])\n bar.start()\n\n count = 0\n count = 0\n encoded = creatModelDecode((128,128,3))\n somatipoFacil=0\n somatipoDificil =0 \n facil =0\n dificil =0\n while(1):\n I, R,codImag = generate_sample(config.img_size)\n A = generate_answer(R)\n Q = generate_question(R)\n E = encoded.predict(np.reshape(I/255., (1,128, 128, 3)))\n E = E[0]\n \n for j in range(NUM_SHAPE*NUM_Q):\n id = '{}'.format(count)\n id_file.write(id+'\\n')\n cor = 0\n usoucor =0\n questao = 0\n for p in range(len(Q[j, :])) : \n if (Q[j, p] == True) :\n if (usoucor == 0 ) :\n cor = p\n usoucor = 1\n else: \n questao =p \n if questao > 8 :\n tipo =1 \n else: \n tipo =0\n idTipo_file.write(id + \"- Cor: \" +str(cor) + \"/quest:\" + str(questao) + \"/tipo:\" + str(tipo) +'\\n')\n \n if tipo == 1: \n somatipoDificil = somatipoDificil +1\n else :\n somatipoFacil= somatipoFacil+1\n\n grp = f.create_group(id)\n grp['image'] = I\n grp['question'] = Q[j, :]\n grp['answer'] = A[j, :]\n grp['encoded'] = E\n grp['codImag'] = codImag\n grp['codImagOrig'] =codImag\n count += 1\n if count % (dataset_size / 100) == 0:\n bar.update(count / (dataset_size / 100))\n if count >= dataset_size:\n bar.finish()\n f.close()\n id_file.close()\n id_file.close()\n log.info('qtd facil {} qtd dificil {} .'\n .format(somatipoFacil, somatipoDificil))\n log.info('Dataset generated under {} with {} samples.'\n .format(dir_name, dataset_size))\n return\n\n\ndef check_path(path):\n if not os.path.exists(path):\n os.mkdir(path)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--dir_name', type=str,default='Sort-of-CLEVR_teste_decode-image2')\n parser.add_argument('--dataset_size', type=int, default=50000)\n \n parser.add_argument('--img_size', type=int, default=128)\n args = parser.parse_args()\n\n basepath = './datasets'\n check_path(basepath)\n path = os.path.join(basepath, args.dir_name)\n check_path(path)\n args.dir_name = path\n\n generator(args)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.keras.models.load_model", "tensorflow.keras.layers.LeakyReLU", "tensorflow.keras.Input", "numpy.reshape", "numpy.arange", "tensorflow.keras.layers.Conv2D", "numpy.tile", "numpy.random.shuffle", "numpy.ones", "numpy.stack", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.BatchNormalization", "numpy.random.rand", "numpy.floor", "tensorflow.image.ssim", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
OrangePeelFX/Python-Tutorial
[ "0d47f194553666304765f5bbc928374b7aec8a48" ]
[ "Scripts/005_pyo/scripts/tutorial/s043_noise_with_numpy.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nFaire du bruit avec Numpy\n\"\"\"\nfrom pyo import *\nimport numpy as np\n\ns = Server().boot()\n\n# récupère la taille du buffer\nbs = s.getBufferSize()\n\n# Crée une table de la longueur du buffer et la lit en boucle\nt = DataTable(size=bs)\nosc = TableRead(t, freq=t.getRate(), loop=True, mul=0.1).out()\n\n# Partage la mémoire du tableau avec le numpy array\narr = np.asarray(t.getBuffer())\n\ndef white_noise():\n \"Remplit le tableau (donc la table t) avec du bruit blanc\"\n arr[:] = np.random.normal(0.0, 0.5, size=bs)\n\n# Appel la fonction 'white_noise' au début de chaque boucle de traitement.\ns.setCallback(white_noise)\n\n# Démarre le serveur\ns.start()\n\ns.gui(locals())\n" ]
[ [ "numpy.random.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LaudateCorpus1/ambient-gan
[ "60e9ee50b104ea07dd7395e8fc6098112d626f00", "60e9ee50b104ea07dd7395e8fc6098112d626f00" ]
[ "src/commons/measure_utils.py", "src/cifar/amb_measure.py" ]
[ "# pylint: disable = C0103, C0111, C0301, R0913, R0903, R0914, E1101\n\nfrom __future__ import division\n\nimport numpy as np\nfrom scipy import mgrid, ndimage, signal\nimport tensorflow as tf\nimport cvxpy\nimport cv2\n\nimport im_rotate\n\n\ndef get_gaussian_filter(radius, size):\n x, y = mgrid[-(size-1)/2:size/2, -(size-1)/2:size/2]\n g = np.exp(-(x**2/float(2*radius**2) + y**2/float(2*radius**2)))\n g = g / g.sum()\n return g\n\n\ndef blur(hparams, x):\n size = hparams.blur_filter_size # set size=1 for no blurring\n gaussian_filter = get_gaussian_filter(hparams.blur_radius, size)\n gaussian_filter = np.reshape(gaussian_filter, [size, size, 1, 1])\n x_blurred_list = []\n for i in range(hparams.image_dims[-1]):\n x_blurred = tf.nn.conv2d(x[:, :, :, i:i+1], gaussian_filter, strides=[1, 1, 1, 1], padding=\"SAME\")\n x_blurred_list.append(x_blurred)\n x_blurred = tf.concat(x_blurred_list, axis=3)\n return x_blurred\n\n\ndef blur_np(hparams, x):\n size = hparams.blur_filter_size # set size=1 for no blurring\n gaussian_filter = get_gaussian_filter(hparams.blur_radius, size)\n gaussian_filter = np.reshape(gaussian_filter, [1, size, size, 1])\n x_blurred = ndimage.filters.convolve(x, gaussian_filter, mode='constant')\n return x_blurred\n\n\ndef wiener_deconv(hparams, x):\n # https://gist.github.com/danstowell/f2d81a897df9e23cc1da\n\n noise_power = hparams.additive_noise_std**2\n nsr = noise_power / hparams.signal_power # nsr = 1/snr\n\n size = hparams.image_dims[0]\n gaussian_filter = get_gaussian_filter(hparams.blur_radius, size)\n filter_fft = np.fft.fftn(np.fft.fftshift(gaussian_filter))\n filter_fft_conj = np.conj(filter_fft)\n den = filter_fft*filter_fft_conj + nsr + 1e-6\n\n x_deconved = np.zeros_like(x)\n for i in range(x.shape[0]):\n for c in range(x.shape[-1]):\n x_fft = np.fft.fftn(x[i, :, :, c])\n x_deconved_fft = x_fft * filter_fft_conj / den\n x_deconved[i, :, :, c] = np.real(np.fft.ifftn(x_deconved_fft))\n\n x_deconved = np.minimum(np.maximum(x_deconved, hparams.x_min), hparams.x_max)\n\n return x_deconved\n\n\ndef get_inpaint_func_opencv(hparams, inpaint_type):\n x_min = hparams.x_min\n x_max = hparams.x_max\n def inpaint_func(image, mask):\n mask = np.prod(mask, axis=2, keepdims=True)\n unknown = (1-mask).astype(np.uint8)\n image = 255 * (image - x_min) / (x_max - x_min)\n image = image.astype(np.uint8)\n inpainted = cv2.inpaint(image, unknown, 3, inpaint_type)\n inpainted = inpainted.astype(np.float32)\n inpainted = inpainted / 255.0 * (x_max - x_min) + x_min\n inpainted = np.reshape(inpainted, image.shape)\n return inpainted\n return inpaint_func\n\n\ndef get_inpaint_func_tv():\n def inpaint_func(image, mask):\n \"\"\"Total variation inpainting\"\"\"\n inpainted = np.zeros_like(image)\n for c in range(image.shape[2]):\n image_c = image[:, :, c]\n mask_c = mask[:, :, c]\n if np.min(mask_c) > 0:\n # if mask is all ones, no need to inpaint\n inpainted[:, :, c] = image_c\n else:\n h, w = image_c.shape\n inpainted_c_var = cvxpy.Variable(h, w)\n obj = cvxpy.Minimize(cvxpy.tv(inpainted_c_var))\n constraints = [cvxpy.mul_elemwise(mask_c, inpainted_c_var) == cvxpy.mul_elemwise(mask_c, image_c)]\n prob = cvxpy.Problem(obj, constraints)\n # prob.solve(solver=cvxpy.SCS, max_iters=100, eps=1e-2) # scs solver\n prob.solve() # default solver\n inpainted[:, :, c] = inpainted_c_var.value\n return inpainted\n return inpaint_func\n\n\ndef get_blur_func():\n def unmeasure_func(image, mask):\n gaussian_filter = get_gaussian_filter(radius=1, size=5)\n blurred = np.zeros_like(image)\n for c in range(image.shape[2]):\n image_c = image[:, :, c]\n mask_c = mask[:, :, c]\n if np.min(mask_c) > 0:\n # if mask is all ones, no need to blur\n blurred[:, :, c] = image_c\n else:\n blurred[:, :, c] = signal.convolve2d(image_c, gaussian_filter, mode='same')\n return blurred\n return unmeasure_func\n\n\ndef get_padding_ep(hparams):\n \"\"\"Get padding for extract_patch measurements\"\"\"\n k = hparams.patch_size\n if hparams.dataset == 'mnist':\n size = 28\n elif hparams.dataset == 'celebA':\n size = 64\n else:\n raise NotImplementedError\n pad_size = (size - k) // 2\n paddings = [[0, 0], [pad_size, pad_size], [pad_size, pad_size], [0, 0]]\n return paddings\n\n\ndef get_padding_prp(hparams):\n \"\"\"Get padding for pad_rotate_project measurements\"\"\"\n if hparams.dataset == 'mnist':\n paddings = [[0, 0], [6, 6], [6, 6], [0, 0]]\n elif hparams.dataset == 'celebA':\n paddings = [[0, 0], [14, 14], [14, 14], [0, 0]]\n else:\n raise NotImplementedError\n return paddings\n\n\ndef pad(hparams, inputs):\n paddings = get_padding_prp(hparams)\n outputs = tf.pad(inputs, paddings, \"CONSTANT\")\n return outputs\n\n\ndef rotate(inputs, angles):\n outputs = im_rotate.tf_image_rotate(inputs, angles)\n outputs = tf.reshape(outputs, inputs.get_shape())\n return outputs\n\n\ndef project(hparams, inputs):\n outputs = tf.reduce_sum(inputs, axis=2)\n outputs = tf.reshape(outputs, [hparams.batch_size, -1])\n return outputs\n\n\ndef concat(projected, angles):\n angles = tf.reshape(angles, [-1, 1])\n concatenated = tf.concat([projected, angles], 1)\n return concatenated\n", "# pylint: disable = C0103, C0111, C0301, R0913, R0903, R0914, E1101\n\n\"\"\"Implementations of measurement and unmeasurement\"\"\"\n\nfrom __future__ import division\nimport copy\nimport tensorflow as tf\nimport numpy as np\nfrom scipy import signal\n\nimport amb_measure_utils\n\n\ndef get_mdevice(hparams):\n if hparams.measurement_type == 'drop_independent':\n mdevice = DropIndependent(hparams)\n # elif hparams.measurement_type == 'drop_row':\n # mdevice = DropRow(hparams)\n # elif hparams.measurement_type == 'drop_col':\n # mdevice = DropCol(hparams)\n # elif hparams.measurement_type == 'drop_rowcol':\n # mdevice = DropRowCol(hparams)\n # elif hparams.measurement_type == 'drop_patch':\n # mdevice = DropPatch(hparams)\n # elif hparams.measurement_type == 'keep_patch':\n # mdevice = KeepPatch(hparams)\n # elif hparams.measurement_type == 'extract_patch':\n # mdevice = ExtractPatch(hparams)\n elif hparams.measurement_type == 'blur_addnoise':\n mdevice = BlurAddNoise(hparams)\n # elif hparams.measurement_type == 'pad_rotate_project':\n # mdevice = PadRotateProject(hparams)\n # elif hparams.measurement_type == 'pad_rotate_project_with_theta':\n # mdevice = PadRotateProjectWithTheta(hparams)\n else:\n raise NotImplementedError\n return mdevice\n\n\nclass MeasurementDevice(object):\n \"\"\"Base class for measurement devices\"\"\"\n\n def __init__(self, hparams):\n # self.measurement_type = hparams.measurement_type\n self.batch_dims = [hparams.batch_size] + hparams.image_dims\n self.output_type = None # indicate whether image or vector\n\n def get_theta_ph(self, hparams):\n \"\"\"Abstract Method\"\"\"\n # Should return theta_ph\n raise NotImplementedError\n\n def sample_theta(self, hparams):\n \"\"\"Abstract Method\"\"\"\n # Should return theta_val\n raise NotImplementedError\n\n def measure(self, hparams, x, theta_ph):\n \"\"\"Abstract Method\"\"\"\n # Tensorflow implementation of measurement. Must be differentiable wrt x.\n # Should return x_measured\n raise NotImplementedError\n\n def measure_np(self, hparams, x_val, theta_val):\n # Calling tf.Seesion() every time is quite slow\n # x_measured = self.measure(hparams, x_val, theta_val)\n # with tf.Session() as sess:\n # x_measured_val = sess.run(x_measured)\n # return x_measured_val\n raise NotImplementedError\n\n # def unmeasure_np(self, hparams, x_measured_val, theta_val):\n # \"\"\"Abstract Method\"\"\"\n # # Should return x_hat\n # raise NotImplementedError\n\n\nclass DropDevice(MeasurementDevice):\n\n def __init__(self, hparams):\n MeasurementDevice.__init__(self, hparams)\n self.output_type = 'image'\n\n def get_theta_ph(self, hparams, name):\n theta_ph = tf.placeholder(tf.float32, shape=self.batch_dims, name=name)\n return theta_ph\n\n def sample_theta(self, hparams):\n \"\"\"Abstract Method\"\"\"\n # Should return theta_val\n raise NotImplementedError\n\n def measure(self, hparams, x, theta_ph):\n x_measured = tf.multiply(theta_ph, x, name='x_measured')\n return x_measured\n\n def measure_np(self, hparams, x_val, theta_val):\n x_measured_val = theta_val * x_val\n return x_measured_val\n\n def unmeasure_np(self, hparams, x_measured_val, theta_val):\n if hparams.unmeasure_type == 'medfilt':\n unmeasure_func = lambda image, mask: signal.medfilt(image)\n # elif hparams.unmeasure_type == 'inpaint-telea':\n # inpaint_type = cv2.INPAINT_TELEA\n # unmeasure_func = amb_measure_utils.get_inpaint_func_opencv(inpaint_type)\n # elif hparams.unmeasure_type == 'inpaint-ns':\n # inpaint_type = cv2.INPAINT_NS\n # unmeasure_func = amb_measure_utils.get_inpaint_func_opencv(inpaint_type)\n # elif hparams.unmeasure_type == 'inpaint-tv':\n # assert hparams.dataset == 'mnist' # Single channel support only\n # unmeasure_func = amb_measure_utils.get_inpaint_func_tv()\n elif hparams.unmeasure_type == 'blur':\n # TODO(abora): Move radius and size to hparams\n gaussian_filter = amb_measure_utils.get_gaussian_filter(radius=1, size=5)\n def unmeasure_func(image, mask):\n blurred = np.zeros_like(image)\n for c in range(image.shape[2]):\n blurred[:, :, c] = signal.convolve2d(image[:, :, c], gaussian_filter, mode='same')\n return blurred\n else:\n raise NotImplementedError\n\n x_unmeasured_val = np.zeros_like(x_measured_val)\n for i in range(x_measured_val.shape[0]):\n x_unmeasured_val[i] = unmeasure_func(x_measured_val[i], theta_val[i])\n return x_unmeasured_val\n\n\nclass DropMaskType1(DropDevice):\n\n def get_noise_shape(self):\n \"\"\"Abstract Method\"\"\"\n # Should return noise_shape\n raise NotImplementedError\n\n def sample_theta(self, hparams):\n noise_shape = self.get_noise_shape()\n mask = np.random.uniform(size=noise_shape)\n p = hparams.drop_prob\n mask = np.float32(mask >= p) / (1 - p)\n theta_val = np.ones(shape=self.batch_dims)\n theta_val = theta_val * mask\n return theta_val\n\n\nclass DropIndependent(DropMaskType1):\n\n def get_noise_shape(self):\n noise_shape = copy.deepcopy(self.batch_dims)\n noise_shape[3] = 1\n return noise_shape\n\n\n# class DropRow(DropMaskType1):\n\n# def get_noise_shape(self):\n# noise_shape = copy.deepcopy(self.batch_dims)\n# noise_shape[3] = 1\n# noise_shape[2] = 1\n# return noise_shape\n\n\n# class DropCol(DropMaskType1):\n\n# def get_noise_shape(self):\n# noise_shape = copy.deepcopy(self.batch_dims)\n# noise_shape[3] = 1\n# noise_shape[1] = 1\n# return noise_shape\n\n\n# class DropRowCol(DropDevice):\n\n# def sample_theta(self, hparams):\n# drop_row = DropRow(hparams)\n# mask1 = drop_row.sample_theta(hparams)\n# drop_col = DropCol(hparams)\n# mask2 = drop_col.sample_theta(hparams)\n# theta_val = mask1 * mask2\n# return theta_val\n\n\n# class DropMaskType2(DropDevice):\n\n# def sample_theta(self, hparams):\n# raise NotImplementedError\n\n# def patch_mask(self, hparams):\n# k = hparams.drop_patch_k\n# h, w = hparams.image_dims[0:2]\n# patch_mask = np.ones(self.batch_dims)\n# for i in range(hparams.batch_size):\n# x, y = np.random.choice(h-k), np.random.choice(w-k)\n# patch_mask[i, x:x+k, y:y+k, :] = 0\n# return patch_mask\n\n\n# class DropPatch(DropMaskType2):\n\n# def sample_theta(self, hparams):\n# return self.patch_mask(hparams)\n\n\n# class KeepPatch(DropMaskType2):\n\n# def sample_theta(self, hparams):\n# return 1 - self.patch_mask(hparams)\n\n\n# class ExtractPatch(MeasurementDevice):\n\n# def __init__(self, hparams):\n# MeasurementDevice.__init__(self, hparams)\n# self.output_type = 'image'\n\n# def get_theta_ph(self, hparams):\n# theta_ph = tf.placeholder(tf.int32, shape=(hparams.batch_size, 2), name='theta_ph')\n# return theta_ph\n\n# def sample_theta(self, hparams):\n# k = hparams.drop_patch_k\n# h, w = hparams.image_dims[0:2]\n# theta_val = np.zeros([hparams.batch_size, 2])\n# for i in range(hparams.batch_size):\n# x, y = np.random.choice(h-k), np.random.choice(w-k)\n# theta_val[i, :] = [x, y]\n# return theta_val\n\n# def measure(self, hparams, x, theta_ph):\n# k = hparams.drop_patch_k\n# patch_list = []\n# for t in range(hparams.batch_size):\n# i, j = theta_ph[t, 0], theta_ph[t, 1]\n# patch = x[t, i:i+k, j:j+k, :]\n# patch = tf.reshape(patch, [1, k, k, hparams.image_dims[-1]])\n# patch_list.append(patch)\n# patches = tf.concat(patch_list, axis=0)\n# #TODO(abora): Remove padding by using a custom discriminator\n# paddings = amb_measure_utils.get_padding_ep(hparams)\n# x_measured = tf.pad(patches, paddings, \"CONSTANT\", name='x_measured')\n# return x_measured\n\n# def unmeasure_np(self, hparams, x_measured_val, theta_val):\n# # How to implement this?\n# raise NotImplementedError\n\n\nclass BlurAddNoise(MeasurementDevice):\n\n def __init__(self, hparams):\n MeasurementDevice.__init__(self, hparams)\n self.output_type = 'image'\n\n def get_theta_ph(self, hparams, name):\n theta_ph = tf.placeholder(tf.float32, shape=self.batch_dims, name=name)\n return theta_ph\n\n def sample_theta(self, hparams):\n theta_val = hparams.additive_noise_std * np.random.randn(*(self.batch_dims))\n return theta_val\n\n def measure(self, hparams, x, theta_ph):\n x_blurred = amb_measure_utils.blur(hparams, x)\n x_measured = tf.add(x_blurred, theta_ph, name='x_measured')\n return x_measured\n\n def measure_np(self, hparams, x_val, theta_val):\n x_blurred = amb_measure_utils.blur_np(hparams, x_val)\n x_measured = x_blurred + theta_val\n return x_measured\n\n def unmeasure_np(self, hparams, x_measured_val, theta_val):\n if hparams.unmeasure_type == 'wiener':\n x_unmeasured_val = amb_measure_utils.wiener_deconv(hparams, x_measured_val)\n else:\n raise NotImplementedError\n return x_unmeasured_val\n\n\n# class PadRotateProjectDevice(MeasurementDevice):\n\n# def __init__(self, hparams):\n# MeasurementDevice.__init__(self, hparams)\n# self.output_type = 'vector'\n\n# def get_theta_ph(self, hparams):\n# theta_ph = tf.placeholder(tf.float32, shape=[hparams.batch_size, hparams.num_rotate_project], name='theta_ph')\n# return theta_ph\n\n# def sample_theta(self, hparams):\n# theta_val = (2*np.pi)*np.random.random((hparams.batch_size, hparams.num_rotate_project)) - np.pi\n# return theta_val\n\n# def unmeasure_np(self, hparams, x_measured_val, theta_val):\n# raise NotImplementedError\n\n\n# class PadRotateProject(PadRotateProjectDevice):\n\n# def measure(self, hparams, x, theta_ph):\n# x_padded = amb_measure_utils.pad(hparams, x)\n# x_measured_list = []\n# for i in range(hparams.num_rotate_project):\n# angles = theta_ph[:, i]\n# x_rotated = amb_measure_utils.rotate(x_padded, angles)\n# x_measured = amb_measure_utils.project(hparams, x_rotated)\n# x_measured_list.append(x_measured)\n# x_measured = tf.concat(x_measured_list, axis=1, name='x_measured')\n# return x_measured\n\n# def measure_np(self, hparams, x_val, theta_val):\n# raise NotImplementedError\n\n# def unmeasure_np(self, hparams, x_measured_val, theta_val):\n# raise NotImplementedError\n\n\n# class PadRotateProjectWithTheta(PadRotateProjectDevice):\n\n# def measure(self, hparams, x, theta_ph):\n# x_padded = amb_measure_utils.pad(hparams, x)\n# x_measured_list = []\n# for i in range(hparams.num_rotate_project):\n# angles = theta_ph[:, i]\n# x_rotated = amb_measure_utils.rotate(x_padded, angles)\n# x_projected = amb_measure_utils.project(hparams, x_rotated)\n# x_measured = amb_measure_utils.concat(x_projected, angles)\n# x_measured_list.append(x_measured)\n# x_measured = tf.concat(x_measured_list, axis=1, name='x_measured')\n# return x_measured\n\n# def measure_np(self, hparams, x_val, theta_val):\n# raise NotImplementedError\n\n# def unmeasure_np(self, hparams, x_measured_val, theta_val):\n# raise NotImplementedError\n" ]
[ [ "tensorflow.concat", "numpy.conj", "numpy.maximum", "numpy.min", "numpy.reshape", "tensorflow.reduce_sum", "tensorflow.reshape", "numpy.fft.fftn", "numpy.fft.fftshift", "scipy.signal.convolve2d", "numpy.fft.ifftn", "numpy.zeros_like", "tensorflow.pad", "numpy.prod", "scipy.ndimage.filters.convolve", "tensorflow.nn.conv2d" ], [ "tensorflow.multiply", "scipy.signal.medfilt", "tensorflow.placeholder", "scipy.signal.convolve2d", "numpy.ones", "numpy.zeros_like", "tensorflow.add", "numpy.float32", "numpy.random.randn", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.10", "1.3", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16" ], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.0", "1.2" ] } ]
AwakerMhy/moment_neural_network
[ "0889f9c0ca045605ff1e88035a7bb4698d9a9d1c" ]
[ "Mnn_Core/mnn_utils.py" ]
[ "# -*- coding: utf-8 -*-\nimport torch.nn.functional as F\nimport numpy as np\nfrom Mnn_Core import fast_dawson\nimport matplotlib.pyplot as plt\nimport palettable\n\n\nclass Param_Container:\n \"\"\"\n args:\n _vol_rest: the rest voltage of a neuron\n _vol_th: the fire threshold of a neuron\n _t_ref: the refractory time of a neuoron after it fired\n _conductance: the conductance of a neuron's membrane\n _ratio: num Excitation neurons : num Inhibition neurons\n degree: from Balanced network, the in-degree of synaptic connection follows Poisson Distribution\n with mean and variance K\n \"\"\"\n\n def __init__(self):\n self.ratio = 0.0\n self.L = 0.05\n self.t_ref = 5.0\n self.vol_th = 20.0\n self.vol_rest = 0.0\n self.eps = 1e-5\n self.special_factor = 4 / np.sqrt(2 * np.pi * self.L) * 0.8862269251743827\n self.correction_factor = 2 / np.sqrt(2 * self.L)\n self.cut_off = 10.0\n self.ignore_t_ref = True\n self.degree = 100\n\n def get_degree(self):\n return self.degree\n\n def set_degree(self, degree):\n self.degree = degree\n\n def get_ratio(self):\n return self.ratio\n\n def set_ratio(self, ratio):\n self.ratio = ratio\n\n def get_t_ref(self):\n return self.t_ref\n\n def set_t_ref(self, t_ref):\n self.t_ref = t_ref\n\n def get_vol_th(self):\n return self.vol_th\n\n def set_vol_th(self, vol_th):\n self.vol_th = vol_th\n\n def get_vol_rest(self):\n return self.vol_rest\n\n def set_vol_rest(self, vol_rest):\n self.vol_rest = vol_rest\n\n def get_conductance(self):\n return self.L\n\n def set_conductance(self, conductance):\n self.L = conductance\n self.correction_factor = 2 / np.sqrt(2 * self.L)\n self.special_factor = 4 / np.sqrt(2 * np.pi * self.L) * 0.8862269251743827\n\n def get_special_factor(self):\n return self.special_factor\n\n def set_special_factor(self, factor):\n self.special_factor = factor\n\n def set_ignore_t_ref(self, flag: bool = True):\n self.ignore_t_ref = flag\n\n def is_ignore_t_ref(self):\n return self.ignore_t_ref\n\n def get_eps(self):\n return self.eps\n\n def set_eps(self, eps):\n self.eps = eps\n\n def get_cut_off(self):\n return self.cut_off\n\n def set_cut_off(self, cut_off):\n self.cut_off = cut_off\n\n def reset_params(self):\n self.ratio = 0.0\n self.L = 0.05\n self.t_ref = 5.0\n self.vol_th = 20.0\n self.vol_rest = 0.0\n self.eps = 1e-5\n self.special_factor = 4 / np.sqrt(2 * np.pi * self.L) * 0.8862269251743827\n self.correction_factor = 2 / np.sqrt(2 * self.L)\n self.cut_off = 10.0\n self.ignore_t_ref = True\n self.degree = 100\n\n def print_params(self):\n print(\"Voltage threshold:\", self.get_vol_th())\n print(\"Voltage rest:\", self.get_vol_rest())\n print(\"Refractory time:\", self.get_t_ref())\n print(\"Membrane conductance:\", self.get_conductance())\n print(\"E-I ratio:\", self.get_ratio())\n print(\"eps: \", self.get_eps())\n print(\"cut_off:\", self.get_cut_off())\n print(\"degree:\", self.get_degree())\n\n\ndef loss_function_mse(pred_mean, pred_std, target_mean, target_std):\n loss1 = F.mse_loss(pred_mean, target_mean)\n loss2 = F.mse_loss(pred_std, target_std)\n return loss1 + loss2\n\n\nclass Mnn_Core_Func(Param_Container):\n def __init__(self):\n super(Mnn_Core_Func, self).__init__()\n self.Dawson1 = fast_dawson.Dawson1()\n self.Dawson2 = fast_dawson.Dawson2()\n\n # compute the up and low bound of integral\n def compute_bound(self, ubar, sbar):\n indx0 = sbar > 0\n with np.errstate(all=\"raise\"):\n ub = (self.vol_th * self.L - ubar) / (np.sqrt(self.L) * sbar + ~indx0)\n lb = (self.vol_rest * self.L - ubar) / (sbar * np.sqrt(self.L) + ~indx0)\n return ub, lb, indx0\n\n def forward_fast_mean(self, ubar, sbar):\n '''Calculates the mean output firing rate given the mean & std of input firing rate'''\n\n # Divide input domain to several regions\n indx0 = sbar > 0\n indx1 = (self.vol_th * self.L - ubar) < (self.cut_off * np.sqrt(self.L) * sbar)\n indx2 = indx0 & indx1\n\n mean_out = np.zeros(ubar.shape)\n\n # Region 0 is approx zero for sufficiently large cut_off\n # Region 1 is calculate normally\n ub = (self.vol_th * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))\n lb = (self.vol_rest * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))\n\n temp_mean = 2 / self.L * (self.Dawson1.int_fast(ub) - self.Dawson1.int_fast(lb))\n\n mean_out[indx2] = 1 / (temp_mean + self.t_ref)\n\n # Region 2 is calculated with analytical limit as sbar --> 0\n indx3 = np.logical_and(~indx0, ubar <= self.vol_th * self.L)\n indx4 = np.logical_and(~indx0, ubar > self.vol_th * self.L)\n mean_out[indx3] = 0.0\n mean_out[indx4] = 1 / (self.t_ref - 1 / self.L * np.log(1 - 1 / ubar[indx4]))\n\n return mean_out\n\n def backward_fast_mean(self, ubar, sbar, u_a):\n indx0 = sbar > 0\n indx1 = (self.vol_th * self.L - ubar) < (self.cut_off * np.sqrt(self.L) * sbar)\n indx2 = indx0 & indx1\n\n grad_uu = np.zeros(ubar.shape) # Fano factor\n\n # Region 0 is approx zero for sufficiently large cut_off\n # Region 1 is calculate normally\n ub = (self.vol_th * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))\n lb = (self.vol_rest * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))\n\n delta_g = self.Dawson1.dawson1(ub) - self.Dawson1.dawson1(lb)\n grad_uu[indx2] = u_a[indx2] * u_a[indx2] / sbar[indx2] * delta_g * 2 / self.L / np.sqrt(self.L)\n\n # Region 2 is calculated with analytical limit as sbar --> 0\n indx6 = np.logical_and(~indx0, ubar <= 1)\n indx4 = np.logical_and(~indx0, ubar > 1)\n\n grad_uu[indx6] = 0.0\n grad_uu[indx4] = self.vol_th * u_a[indx4] * u_a[indx4] / ubar[indx4] / (ubar[indx4] - self.vol_th * self.L)\n\n # ---------------\n\n grad_us = np.zeros(ubar.shape)\n temp = self.Dawson1.dawson1(ub) * ub - self.Dawson1.dawson1(lb) * lb\n grad_us[indx2] = u_a[indx2] * u_a[indx2] / sbar[indx2] * temp * 2 / self.L\n\n return grad_uu, grad_us\n\n def forward_fast_std(self, ubar, sbar, u_a):\n '''Calculates the std of output firing rate given the mean & std of input firing rate'''\n\n # Divide input domain to several regions\n indx0 = sbar > 0\n indx1 = (self.vol_th * self.L - ubar) < (self.cut_off * np.sqrt(self.L) * sbar)\n indx2 = indx0 & indx1\n\n fano_factor = np.zeros(ubar.shape) # Fano factor\n\n # Region 0 is approx zero for sufficiently large cut_off\n # Region 1 is calculate normally\n ub = (self.vol_th * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))\n lb = (self.vol_rest * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))\n\n # cached mean used\n varT = 8 / self.L / self.L * (self.Dawson2.int_fast(ub) - self.Dawson2.int_fast(lb))\n fano_factor[indx2] = varT * u_a[indx2] * u_a[indx2]\n\n # Region 2 is calculated with analytical limit as sbar --> 0\n fano_factor[~indx0] = (ubar[~indx0] < 1) + 0.0\n with np.errstate(invalid=\"raise\"):\n try:\n std_out = np.sqrt(fano_factor * u_a)\n except FloatingPointError:\n print(\"========min batch norm input ubar & sbar ========\")\n print(ubar.shape, sbar.shape)\n print(np.min(ubar), np.min(sbar), sep=\"\\n\")\n print(ubar, sbar, sep=\"\\n\")\n print(\"==========activate u and fano factor===============\")\n print(u_a.shape, fano_factor.shape)\n print(np.min(u_a), np.min(fano_factor))\n print(u_a, fano_factor, sep=\"\\n\")\n raise FloatingPointError\n\n return std_out\n\n def backward_fast_std(self, ubar, sbar, u_a, s_a):\n '''Calculates the gradient of the std of the firing rate with respect to the mean & std of input firing rate'''\n\n # Divide input domain to several regions\n indx0 = sbar > 0\n indx1 = (self.vol_th * self.L - ubar) < (self.cut_off * np.sqrt(self.L) * sbar)\n indx2 = indx0 & indx1\n\n ub = (self.vol_th * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))\n lb = (self.vol_rest * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))\n\n grad_su = np.zeros(ubar.shape)\n\n delta_g = self.Dawson1.dawson1(ub) - self.Dawson1.dawson1(lb)\n delta_h = self.Dawson2.dawson2(ub) - self.Dawson2.dawson2(lb)\n delta_H = self.Dawson2.int_fast(ub) - self.Dawson2.int_fast(lb)\n\n temp1 = 3 / self.L / np.sqrt(self.L) * s_a[indx2] / sbar[indx2] * u_a[indx2] * delta_g\n temp2 = - 1 / 2 / np.sqrt(self.L) * s_a[indx2] / sbar[indx2] * delta_h / delta_H\n\n grad_su[indx2] = temp1 + temp2\n\n # -----------\n\n grad_ss = np.zeros(ubar.shape)\n\n temp_dg = self.Dawson1.dawson1(ub) * ub - self.Dawson1.dawson1(lb) * lb\n temp_dh = self.Dawson2.dawson2(ub) * ub - self.Dawson2.dawson2(lb) * lb\n\n grad_ss[indx2] = 3 / self.L * s_a[indx2] / sbar[indx2] * u_a[indx2] * temp_dg \\\n - 1 / 2 * s_a[indx2] / sbar[indx2] * temp_dh / delta_H\n\n indx4 = np.logical_and(~indx0, ubar > 1)\n\n grad_ss[indx4] = 1 / np.sqrt(2 * self.L) * np.power(u_a[indx4], 1.5) * np.sqrt(\n 1 / (1 - ubar[indx4]) / (1 - ubar[indx4]) - 1 / ubar[indx4] / ubar[indx4])\n\n return grad_su, grad_ss\n\n def forward_fast_chi(self, ubar, sbar, u_a, s_a):\n \"\"\"\n Calculates the linear response coefficient of output firing rate given the mean & std of input firing rate\n \"\"\"\n\n # Divide input domain to several regions\n indx0 = sbar > 0\n indx1 = (self.vol_th * self.L - ubar) < (self.cut_off * np.sqrt(self.L) * sbar)\n indx2 = indx0 & indx1\n\n chi = np.zeros(ubar.shape)\n\n # Region 0 is approx zero for sufficiently large cut_off\n # Region 1 is calculate normally\n ub = (self.vol_th * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))\n lb = (self.vol_rest * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))\n\n delta_g = self.Dawson1.dawson1(ub) - self.Dawson1.dawson1(lb)\n chi[indx2] = u_a[indx2] * u_a[indx2] / s_a[indx2] * delta_g * 2 / self.L / np.sqrt(self.L)\n\n # delta_H = self.ds2.int_fast(ub) - self.ds2.int_fast(lb)\n # X[indx2] = np.sqrt(self.u[indx2])*delta_g/np.sqrt(delta_H)/np.sqrt(2*self.L) # alternative method\n\n # Region 2 is calculated with analytical limit as sbar --> 0\n indx3 = np.logical_and(~indx0, ubar <= self.vol_th * self.L)\n indx4 = np.logical_and(~indx0, ubar > self.vol_th * self.L)\n\n chi[indx3] = 0.0\n chi[indx4] = np.sqrt(2 / self.L) / np.sqrt(self.t_ref - 1 / self.L * np.log(1 - 1 / ubar[indx4])) / np.sqrt(\n 2 * ubar[indx4] - 1)\n\n return chi\n\n def backward_fast_chi(self, ubar, sbar, u_a, chi):\n \"\"\"\n Calculates the gradient of the linear response coefficient with respect to the mean & std of input firing rate\n \"\"\"\n grad_uu, grad_us = self.backward_fast_mean(ubar, sbar, u_a)\n\n # Divide input domain to several regions\n indx0 = sbar > 0\n indx1 = (self.vol_th * self.L - ubar) < (self.cut_off * np.sqrt(self.L) * sbar)\n indx2 = indx0 & indx1\n\n ub = (self.vol_th * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))\n lb = (self.vol_rest * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))\n\n grad_chu = np.zeros(ubar.shape)\n\n tmp1 = self.Dawson1.dawson1(ub) * ub - self.Dawson1.dawson1(lb) * lb\n delta_g = self.Dawson1.dawson1(ub) - self.Dawson1.dawson1(lb)\n delta_H = self.Dawson2.int_fast(ub) - self.Dawson2.int_fast(lb)\n delta_h = self.Dawson2.dawson2(ub) - self.Dawson2.dawson2(lb)\n\n grad_chu[indx2] = 0.5 * chi[indx2] / u_a[indx2] * grad_uu[indx2] \\\n - np.sqrt(2) / self.L * np.sqrt(u_a[indx2] / delta_H) * tmp1 / sbar[indx2] \\\n + chi[indx2] * delta_h / delta_H / 2 / np.sqrt(self.L) / sbar[indx2]\n\n indx4 = np.logical_and(~indx0, ubar > 1)\n\n tmp_grad_uu = self.vol_th * u_a[indx4] * u_a[indx4] / ubar[indx4] / (ubar[indx4] - self.vol_th * self.L)\n\n grad_chu[indx4] = 1 / np.sqrt(2 * self.L) / np.sqrt(u_a[indx4] * (2 * ubar[indx4] - 1)) * tmp_grad_uu \\\n - np.sqrt(2 / self.L) / (self.vol_th * self.L) * np.sqrt(u_a[indx4]) * np.power(\n 2 * ubar[indx4] - 1, -1.5)\n\n # -----------\n\n grad_chs = np.zeros(ubar.shape)\n\n temp_dg = 2 * self.Dawson1.dawson1(ub) * ub * ub - 2 * self.Dawson1.dawson1(lb) * lb * lb \\\n + self.vol_th * self.L / np.sqrt(self.L) / sbar[indx2]\n temp_dh = self.Dawson2.dawson2(ub) * ub - self.Dawson2.dawson2(lb) * lb\n # temp_dH = self.ds2.int_fast(ub)*ub - self.ds2.int_fast(lb)*lb\n\n grad_chs[indx2] = 0.5 * chi[indx2] / u_a[indx2] * grad_us[indx2] + \\\n - chi[indx2] / sbar[indx2] * (temp_dg / delta_g) \\\n + 0.5 * chi[indx2] / sbar[indx2] / delta_H * temp_dh\n\n return grad_chu, grad_chs\n\n\nclass Debug_Utils:\n @staticmethod\n def mnn_map_visualization(ubar=np.arange(-10, 10, 0.1), sbar=np.arange(0.0, 30, 0.1)):\n cmap = palettable.cmocean.sequential.Ice_4.mpl_colormap\n mnn_func = Mnn_Core_Func()\n uv, sv = np.meshgrid(ubar, sbar)\n shape = uv.shape\n uv = uv.flatten()\n sv = sv.flatten()\n\n u = mnn_func.forward_fast_mean(uv, sv)\n s = mnn_func.forward_fast_std(uv, sv, u)\n chi = mnn_func.forward_fast_chi(uv, sv, u, s)\n grad_uu, grad_us = mnn_func.backward_fast_mean(uv, sv, u)\n grad_su, grad_ss = mnn_func.backward_fast_std(uv, sv, u, s)\n grad_chu, grad_chs = mnn_func.backward_fast_chi(uv, sv, u, chi)\n\n u = u.reshape(shape)\n s = s.reshape(shape)\n chi = chi.reshape(shape)\n uv = uv.reshape(shape)\n sv = sv.reshape(shape)\n grad_uu = grad_uu.reshape(shape)\n grad_us = grad_us.reshape(shape)\n grad_su = grad_su.reshape(shape)\n grad_ss = grad_ss.reshape(shape)\n grad_chu = grad_chu.reshape(shape)\n grad_chs = grad_chs.reshape(shape)\n\n fig = plt.figure(figsize=(21, 21))\n fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, hspace=0.1, wspace=0.1)\n ax1 = fig.add_subplot(3, 3, 1, projection=\"3d\")\n ax1.set_xlabel(r\"$\\overline{\\mu}$\", fontsize=16)\n ax1.set_ylabel(r\"$\\overline{\\sigma}$\", fontsize=16)\n ax1.set_zlabel(r\"$\\mu$\", fontsize=16)\n ax1.grid(False)\n ax1.invert_xaxis()\n ax1.plot_surface(uv, sv, u, cmap=cmap)\n\n ax1 = fig.add_subplot(3, 3, 2, projection=\"3d\")\n ax1.plot_surface(uv, sv, s, cmap=cmap)\n ax1.set_xlabel(r\"$\\overline{\\mu}$\", fontsize=16)\n ax1.set_ylabel(r\"$\\overline{\\sigma}$\", fontsize=16)\n ax1.set_zlabel(r\"$\\sigma$\", fontsize=16)\n ax1.invert_xaxis()\n ax1.grid(False)\n\n ax1 = fig.add_subplot(3, 3, 3, projection=\"3d\")\n ax1.plot_surface(uv, sv, chi, cmap=cmap)\n ax1.invert_xaxis()\n ax1.set_xlabel(r\"$\\overline{\\mu}$\", fontsize=16)\n ax1.set_ylabel(r\"$\\overline{\\sigma}$\", fontsize=16)\n ax1.set_zlabel(r\"$\\chi$\", fontsize=16)\n ax1.grid(False)\n\n ax1 = fig.add_subplot(3, 3, 4, projection=\"3d\")\n ax1.plot_wireframe(uv, sv, grad_uu, alpha=0.7, cmap=cmap, rstride=10, cstride=80)\n ax1.invert_xaxis()\n ax1.set_xlabel(r\"$\\overline{\\mu}$\", fontsize=16)\n ax1.set_ylabel(r\"$\\overline{\\sigma}$\", fontsize=16)\n ax1.set_zlabel(r\"$\\frac{\\partial \\mu}{\\partial \\overline{\\mu}}$\", fontsize=16)\n ax1.grid(False)\n ax1.set_zlim(-0.3, 0.3)\n\n ax1 = fig.add_subplot(3, 3, 5, projection=\"3d\")\n ax1.plot_wireframe(uv, sv, grad_su, alpha=0.7, cmap=cmap, rstride=10, cstride=80)\n ax1.invert_xaxis()\n ax1.set_xlabel(r\"$\\overline{\\mu}$\", fontsize=16)\n ax1.set_ylabel(r\"$\\overline{\\sigma}$\", fontsize=16)\n ax1.set_zlabel(r\"$\\frac{\\partial \\sigma}{\\partial \\overline{\\mu}}$\", fontsize=16)\n ax1.grid(False)\n ax1.set_zlim(-0.3, 0.3)\n\n ax1 = fig.add_subplot(3, 3, 6, projection=\"3d\")\n ax1.plot_wireframe(uv, sv, grad_chu, alpha=0.7, cmap=cmap, rstride=10, cstride=80)\n ax1.invert_xaxis()\n ax1.set_xlabel(r\"$\\overline{\\mu}$\", fontsize=16)\n ax1.set_ylabel(r\"$\\overline{\\sigma}$\", fontsize=16)\n ax1.set_zlabel(r\"$\\frac{\\partial \\chi}{\\partial \\overline{\\mu}}$\", fontsize=16)\n ax1.grid(False)\n ax1.set_zlim(-0.3, 0.3)\n\n ax1 = fig.add_subplot(3, 3, 7, projection=\"3d\")\n ax1.plot_wireframe(uv, sv, grad_us, alpha=0.7, cmap=cmap, rstride=10, cstride=80)\n ax1.invert_xaxis()\n ax1.set_xlabel(r\"$\\overline{\\mu}$\", fontsize=16)\n ax1.set_ylabel(r\"$\\overline{\\sigma}$\", fontsize=16)\n ax1.set_zlabel(r\"$\\frac{\\partial \\mu}{\\partial \\overline{\\sigma}}$\", fontsize=16)\n ax1.grid(False)\n ax1.set_zlim(-0.1, 0.1)\n\n ax1 = fig.add_subplot(3, 3, 8, projection=\"3d\")\n ax1.plot_wireframe(uv, sv, grad_ss, alpha=0.7, cmap=cmap, rstride=10, cstride=80)\n ax1.invert_xaxis()\n ax1.set_xlabel(r\"$\\overline{\\mu}$\", fontsize=16)\n ax1.set_ylabel(r\"$\\overline{\\sigma}$\", fontsize=16)\n ax1.set_zlabel(r\"$\\frac{\\partial \\sigma}{\\partial \\overline{\\sigma}}$\", fontsize=16)\n ax1.grid(False)\n ax1.set_zlim(-0.1, 0.1)\n\n ax1 = fig.add_subplot(3, 3, 9, projection=\"3d\")\n ax1.plot_wireframe(uv, sv, grad_chs, alpha=0.7, cmap=cmap, rstride=10, cstride=80)\n ax1.invert_xaxis()\n ax1.set_xlabel(r\"$\\overline{\\mu}$\", fontsize=16)\n ax1.set_ylabel(r\"$\\overline{\\sigma}$\", fontsize=16)\n ax1.set_zlabel(r\"$\\frac{\\partial \\chi}{\\partial \\overline{\\sigma}}$\", fontsize=16)\n ax1.grid(False)\n ax1.set_zlim(-0.3, 0.3)\n\n fig.savefig(\"activate_map.png\", dpi=300)\n plt.show()\n\n @staticmethod\n def batch_3d_plot(fig_size, layout, x, y, z, xlable=None, ylable=None, zlable=None, save=None, subtitle=None,\n suptitle=None,\n invert_x=False, invert_y=False, invert_z=False, cmap=\"rainbow\"):\n fig = plt.figure(figsize=fig_size)\n if suptitle is not None:\n plt.suptitle(suptitle)\n if subtitle is not None:\n for i in range(layout[0] * layout[1]):\n print(i)\n ax = fig.add_subplot(layout[0], layout[1], i + 1, projection=\"3d\")\n ax.plot_surface(x, y, z[i], cmap=cmap)\n if invert_x:\n ax.invert_xaxis()\n if invert_y:\n ax.invert_yaxis()\n if invert_z:\n ax.invert_zaxis()\n if xlable is not None:\n ax.set_xlabel(eval(xlable))\n if ylable is not None:\n ax.set_ylabel(eval(ylable))\n if zlable is not None:\n ax.set_zlabel(eval(zlable[i]))\n ax.set_title(eval(subtitle[i]))\n if save is not None:\n fig.savefig(save)\n plt.show()\n\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.meshgrid", "numpy.power", "numpy.min", "numpy.arange", "torch.nn.functional.mse_loss", "numpy.errstate", "matplotlib.pyplot.suptitle", "numpy.logical_and", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JiesiZhao077/mmf
[ "e20f0d29638c5d05e3e0c385fe67a9bfeef0f921" ]
[ "mmf/utils/build.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport logging\nimport os\nimport warnings\nfrom enum import Enum\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport mmf\nimport pytorch_lightning as pl\nimport torch\nfrom mmf.common.meter import Meter\nfrom mmf.common.registry import registry\nfrom mmf.datasets.iteration_strategies import (\n ConstantIterationStrategy,\n IterationStrategy,\n SizeProportionalIterationStrategy,\n)\nfrom mmf.datasets.processors.processors import Processor\nfrom mmf.utils.configuration import Configuration, get_global_config\nfrom mmf.utils.distributed import is_dist_initialized, is_master, is_xla, synchronize\nfrom mmf.utils.general import get_optimizer_parameters\nfrom omegaconf import DictConfig, OmegaConf\n\n\ntry:\n import torch_xla.core.xla_model as xm # noqa\n import torch_xla.distributed.parallel_loader as xla_pl # noqa\nexcept ImportError:\n xm = None\n\nProcessorDict = Dict[str, Processor]\nlogger = logging.getLogger(__name__)\n\n\ndef build_config(configuration: Configuration, *args, **kwargs) -> DictConfig:\n \"\"\"Builder function for config. Freezes the configuration and registers\n configuration object and config DictConfig object to registry.\n\n Args:\n configuration (Configuration): Configuration object that will be\n used to create the config.\n\n Returns:\n (DictConfig): A config which is of type omegaconf.DictConfig\n \"\"\"\n configuration.freeze()\n config = configuration.get_config()\n registry.register(\"config\", config)\n registry.register(\"configuration\", configuration)\n\n return config\n\n\ndef build_trainer(config: DictConfig) -> Any:\n \"\"\"Builder function for creating a trainer class. Trainer class name\n is picked from the config.\n\n Args:\n config (DictConfig): Configuration that will be used to create\n the trainer.\n\n Returns:\n (BaseTrainer): A trainer instance\n \"\"\"\n trainer_type = config.training.trainer\n trainer_cls = registry.get_trainer_class(trainer_type)\n trainer_obj = trainer_cls(config)\n\n return trainer_obj\n\n\ndef build_model(\n config: Union[DictConfig, \"mmf.models.base_model.BaseModel.Config\"]\n) -> \"mmf.models.base_model.BaseModel\":\n from mmf.models.base_model import BaseModel\n\n # If it is not an OmegaConf object, create the object\n if not isinstance(config, DictConfig) and isinstance(config, BaseModel.Config):\n config = OmegaConf.structured(config)\n\n model_name = config.model\n model_class = registry.get_model_class(model_name)\n\n if model_class is None:\n raise RuntimeError(f\"No model registered for name: {model_name}\")\n model = model_class(config)\n\n if hasattr(model, \"build\"):\n \"\"\"Model build involves checkpoint loading\n If the checkpoint is not available the underlying\n methods try to download it.\n Let master build the model (download the checkpoints) while\n other ranks wait for the sync message\n Once the master has downloaded the checkpoint and built the\n model it sends the sync message, completing the synchronization\n now other cores can proceed to build the model\n using already downloaded checkpoint.\n \"\"\"\n if is_master():\n model.load_requirements()\n model.build()\n synchronize()\n else:\n synchronize()\n model.build()\n model.init_losses()\n\n return model\n\n\ndef build_dataset(\n dataset_key: str, config=None, dataset_type=\"train\"\n) -> torch.utils.data.Dataset:\n \"\"\"Builder function for creating a dataset. If dataset_key is passed\n the dataset is created from default config of the dataset and thus is\n disable config even if it is passed. Otherwise, we use MultiDatasetLoader to\n build and return an instance of dataset based on the config\n\n Args:\n dataset_key (str): Key of dataset to build.\n config (DictConfig, optional): Configuration that will be used to create\n the dataset. If not passed, dataset's default config will be used.\n Defaults to {}.\n dataset_type (str, optional): Type of the dataset to build, train|val|test.\n Defaults to \"train\".\n\n Returns:\n (torch.utils.data.Dataset): A dataset instance of type torch Dataset\n \"\"\"\n from mmf.datasets.base_dataset_builder import BaseDatasetBuilder\n from mmf.utils.configuration import load_yaml_with_defaults\n\n datamodule_instance = build_datamodule(dataset_key)\n # If config is not provided, we take it from default one\n if not config:\n config_path = datamodule_instance.config_path()\n if config_path is None:\n # If config path wasn't defined, send an empty config path\n # but don't force dataset to define a config\n warnings.warn(\n f\"Config path not defined for {dataset_key}, \"\n + \"continuing with empty config\"\n )\n config = OmegaConf.create()\n else:\n config = load_yaml_with_defaults(config_path)\n config = OmegaConf.select(config, f\"dataset_config.{dataset_key}\")\n if config is None:\n config = OmegaConf.create()\n OmegaConf.set_struct(config, True)\n elif dataset_key in config:\n # Handle Global config\n config = config[dataset_key]\n\n datamodule_instance.build_dataset(config)\n dataset = datamodule_instance.load_dataset(config, dataset_type)\n if hasattr(datamodule_instance, \"update_registry_for_model\"):\n datamodule_instance.update_registry_for_model(config)\n\n return dataset\n\n\n# TODO: move dataset_type enum to typings\ndef build_datasets(\n dataset_list: List[str], dataset_config: DictConfig, dataset_type=\"train\"\n) -> List[torch.utils.data.Dataset]:\n datasets = []\n for dataset in dataset_list:\n if dataset in dataset_config:\n dataset_config = dataset_config[dataset]\n else:\n warnings.warn(\n f\"Dataset {dataset} is missing from dataset_config\"\n + \" in config. Proceeding with empty config.\"\n )\n dataset_config = OmegaConf.create()\n\n dataset_instance = build_dataset(dataset, dataset_config, dataset_type)\n if dataset_instance is None:\n continue\n datasets.append(dataset_instance)\n\n return datasets\n\n\ndef build_datamodule(dataset_key) -> pl.LightningDataModule:\n dataset_builder = registry.get_builder_class(dataset_key)\n assert dataset_builder, (\n f\"Key {dataset_key} doesn't have a registered \" + \"dataset builder\"\n )\n builder_instance: pl.LightningDataModule = dataset_builder()\n return builder_instance\n\n\ndef build_multiple_datamodules(\n dataset_list: List[str], all_dataset_config: DictConfig\n) -> Dict[str, pl.LightningDataModule]:\n datamodules: Dict[str, pl.LightningDataModule] = {}\n for dataset in dataset_list:\n datamodule_instance = build_datamodule(dataset)\n if dataset in all_dataset_config:\n dataset_config = all_dataset_config[dataset]\n else:\n warnings.warn(\n f\"Dataset {dataset} is missing from dataset_config\"\n + \" in config. Proceeding with empty config.\"\n )\n dataset_config = OmegaConf.create()\n\n if is_master():\n datamodule_instance.prepare_data(dataset_config)\n\n synchronize()\n datamodule_instance.setup(config=dataset_config)\n if hasattr(datamodule_instance, \"update_registry_for_model\"):\n datamodule_instance.update_registry_for_model(dataset_config)\n datamodules[dataset] = datamodule_instance\n return datamodules\n\n\ndef build_dataloader_and_sampler(\n dataset_instance: torch.utils.data.Dataset, datamodule_config: DictConfig\n) -> Tuple[torch.utils.data.DataLoader, Optional[torch.utils.data.Sampler]]:\n \"\"\"Builds and returns a dataloader along with its sample\n\n Args:\n dataset_instance (torch.utils.data.Dataset): Instance of dataset for which\n dataloader has to be created\n datamodule_config (omegaconf.DictConfig): Datamodule configuration; required\n for infering params for dataloader\n\n Returns:\n Tuple[torch.utils.data.DataLoader, Optional[torch.utils.data.Sampler]]:\n Tuple of Dataloader and Sampler instance\n \"\"\"\n from mmf.common.batch_collator import BatchCollator\n\n training_config = get_global_config(\"training\")\n # Support params coming in from dataloader params\n other_args = {\n \"num_workers\": datamodule_config.get(\n \"num_workers\", training_config.get(\"num_workers\", 4)\n ),\n \"pin_memory\": datamodule_config.get(\n \"pin_memory\", training_config.get(\"pin_memory\", False)\n ),\n \"shuffle\": datamodule_config.get(\"shuffle\", None),\n \"batch_size\": datamodule_config.get(\"batch_size\", None),\n }\n\n # IterableDataset returns batches directly, so no need to add Sampler\n # or batch size as user is expected to control those. This is a fine\n # assumption for now to not support single item based IterableDataset\n # as it will add unnecessary complexity and config parameters\n # to the codebase\n if not isinstance(dataset_instance, torch.utils.data.IterableDataset):\n other_args = _add_extra_args_for_dataloader(dataset_instance, other_args)\n else:\n other_args.pop(\"shuffle\")\n\n loader = torch.utils.data.DataLoader(\n dataset=dataset_instance,\n collate_fn=BatchCollator(\n dataset_instance.dataset_name, dataset_instance.dataset_type\n ),\n drop_last=is_xla(), # see also MultiDatasetLoader.__len__\n **other_args,\n )\n\n if is_xla():\n device = xm.xla_device()\n loader = xla_pl.MpDeviceLoader(loader, device)\n\n if other_args[\"num_workers\"] >= 0:\n # Suppress leaking semaphore warning\n os.environ[\"PYTHONWARNINGS\"] = \"ignore:semaphore_tracker:UserWarning\"\n\n loader.dataset_type = dataset_instance.dataset_type\n\n return loader, other_args.get(\"sampler\", None)\n\n\ndef build_test_reporter(\n datamodules: List[pl.LightningDataModule],\n config: DictConfig = None,\n dataset_type: str = \"train\",\n):\n test_reporter_key = \"default\"\n if config:\n test_reporter_key = config.get(\"type\", \"default\")\n test_reporter_class = registry.get_test_rerporter_class(test_reporter_key)\n assert (\n test_reporter_class\n ), f\"Key {test_reporter_key} doesn't have a registered test_reporter class\"\n\n if not config:\n warnings.warn(\n f\"Config not provided for {test_reporter_key}, test_reporter\"\n + \"continuing with empty config\"\n )\n params_config = OmegaConf.create()\n else:\n params_config = config.params\n\n return test_reporter_class(datamodules, params_config, dataset_type)\n\n\ndef _add_extra_args_for_dataloader(\n dataset_instance: torch.utils.data.Dataset, other_args: Dict[str, Any] = None\n) -> Dict[str, Any]:\n from mmf.utils.general import get_batch_size\n\n dataset_type = dataset_instance.dataset_type\n\n if other_args[\"shuffle\"] is None:\n other_args[\"shuffle\"] = False\n if dataset_type != \"test\":\n other_args[\"shuffle\"] = True\n\n # In distributed mode, we use DistributedSampler from PyTorch\n if is_dist_initialized():\n other_args[\"sampler\"] = torch.utils.data.DistributedSampler(\n dataset_instance, shuffle=other_args[\"shuffle\"]\n )\n # Shuffle is mutually exclusive with sampler, let DistributedSampler\n # take care of shuffle and pop from main args\n other_args.pop(\"shuffle\")\n\n if is_xla():\n other_args[\"sampler\"] = torch.utils.data.DistributedSampler(\n dataset_instance,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal(),\n shuffle=other_args[\"shuffle\"],\n )\n other_args.pop(\"shuffle\")\n\n if other_args[\"batch_size\"] is None:\n other_args[\"batch_size\"] = get_batch_size()\n\n return other_args\n\n\ndef build_optimizer(model, config):\n optimizer_config = config.optimizer\n if \"type\" not in optimizer_config:\n raise ValueError(\n \"Optimizer attributes must have a 'type' key \"\n \"specifying the type of optimizer. \"\n \"(Custom or PyTorch, e.g. 'adam_w' or 'SGD')\"\n )\n optimizer_type = optimizer_config.type\n\n if \"params\" not in optimizer_config:\n warnings.warn(\"optimizer attributes has no params defined, defaulting to {}.\")\n\n params = optimizer_config.get(\"params\", {})\n\n if hasattr(torch.optim, optimizer_type):\n optimizer_class = getattr(torch.optim, optimizer_type)\n else:\n optimizer_class = registry.get_optimizer_class(optimizer_type)\n if optimizer_class is None:\n raise ValueError(\n \"No optimizer class of type {} present in \"\n \"either torch or registered to registry\"\n )\n\n parameters = get_optimizer_parameters(model, config)\n\n if optimizer_config.get(\"enable_state_sharding\", False):\n # TODO(vedanuj): Remove once OSS is moved to PT upstream\n try:\n from fairscale.optim.oss import OSS\n except ImportError:\n print(\n \"Optimizer state sharding requires fairscale. \"\n + \"Install using pip install fairscale.\"\n )\n raise\n\n assert (\n is_dist_initialized()\n ), \"Optimizer state sharding can only be used in distributed mode.\"\n\n is_fp16 = config.get(\"training\", {}).get(\"fp16\", False)\n optimizer = OSS(\n params=parameters, optim=optimizer_class, broadcast_fp16=is_fp16, **params\n )\n else:\n optimizer = optimizer_class(parameters, **params)\n return optimizer\n\n\ndef build_lightning_optimizers(model, config):\n optimizer = build_optimizer(model, config)\n\n if config.training.lr_scheduler:\n lr_scheduler = build_scheduler(optimizer, config)\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\"scheduler\": lr_scheduler, \"interval\": \"step\"},\n }\n else:\n return optimizer\n\n\ndef build_scheduler(optimizer, config):\n scheduler_config = config.get(\"scheduler\", {})\n\n if \"type\" not in scheduler_config:\n warnings.warn(\n \"No type for scheduler specified even though lr_scheduler is True, \"\n \"setting default to 'Pythia'\"\n )\n scheduler_type = scheduler_config.get(\"type\", \"pythia\")\n\n if \"params\" not in scheduler_config:\n warnings.warn(\"scheduler attributes has no params defined, defaulting to {}.\")\n params = scheduler_config.get(\"params\", {})\n scheduler_class = registry.get_scheduler_class(scheduler_type)\n scheduler = scheduler_class(optimizer, **params)\n\n return scheduler\n\n\ndef build_classifier_layer(config, *args, **kwargs):\n from mmf.modules.layers import ClassifierLayer\n\n classifier = ClassifierLayer(config.type, *args, **config.params, **kwargs)\n return classifier.module\n\n\ndef build_text_encoder(config, *args, **kwargs):\n \"\"\"Deprecated, please do not use\"\"\"\n try:\n from mmf.modules.fb.encoders import TextEncoderFactory\n except ImportError:\n from mmf.modules.encoders import TextEncoderFactory\n\n text_encoder = TextEncoderFactory(config, *args, **kwargs)\n return text_encoder.module\n\n\ndef build_image_encoder(config, direct_features=False, **kwargs):\n \"\"\"Deprecated, please do not use\"\"\"\n from mmf.modules.encoders import ImageEncoderFactory, ImageFeatureEncoderFactory\n\n if direct_features:\n module = ImageFeatureEncoderFactory(config)\n else:\n module = ImageEncoderFactory(config)\n return module.module\n\n\ndef build_encoder(config: Union[DictConfig, \"mmf.modules.encoders.Encoder.Config\"]):\n from mmf.modules.encoders import Encoder\n\n # If it is not an OmegaConf object, create the object\n if not isinstance(config, DictConfig) and isinstance(config, Encoder.Config):\n config = OmegaConf.structured(config)\n\n if \"type\" in config:\n # Support config initialization in form of\n # encoder:\n # type: identity # noqa\n # params:\n # in_dim: 256\n name = config.type\n if isinstance(name, Enum):\n name = name.value\n params = config.get(\"params\", None)\n else:\n # Structured Config support\n name = config.name\n params = config\n\n encoder_cls = registry.get_encoder_class(name)\n\n # If params were not passed, try generating them from encoder\n # class's default config\n if params is None:\n params = OmegaConf.structured(getattr(encoder_cls, \"Config\", {}))\n\n return encoder_cls(params)\n\n\ndef build_processors(\n processors_config: DictConfig, registry_key: str = None, *args, **kwargs\n) -> ProcessorDict:\n \"\"\"Given a processor config, builds the processors present and returns back\n a dict containing processors mapped to keys as per the config\n\n Args:\n processors_config (omegaconf.DictConfig): OmegaConf DictConfig describing\n the parameters and type of each processor passed here\n\n registry_key (str, optional): If passed, function would look into registry for\n this particular key and return it back. .format with processor_key will\n be called on this string. Defaults to None.\n\n Returns:\n ProcessorDict: Dictionary containing key to\n processor mapping\n \"\"\"\n from mmf.datasets.processors.processors import Processor\n\n processor_dict = {}\n\n for processor_key, processor_params in processors_config.items():\n if not processor_params:\n continue\n\n processor_instance = None\n if registry_key is not None:\n full_key = registry_key.format(processor_key)\n processor_instance = registry.get(full_key, no_warning=True)\n\n if processor_instance is None:\n processor_instance = Processor(processor_params, *args, **kwargs)\n # We don't register back here as in case of hub interface, we\n # want the processors to be instantiate every time. BaseDataset\n # can register at its own end\n processor_dict[processor_key] = processor_instance\n\n return processor_dict\n\n\ndef build_iteration_strategy(\n config: DictConfig,\n dataloaders: Dict[str, torch.utils.data.DataLoader],\n *args,\n **kwargs,\n) -> IterationStrategy:\n if not config.get(\"enabled\", True):\n return ConstantIterationStrategy.from_params(dataloaders, *args, **kwargs)\n else:\n assert (\n \"type\" in config\n ), \"multitasking config must define 'type' attribute if enabled\"\n # This assumes all dataloaders will have same dataset type\n iteration_strategy_class = registry.get_iteration_strategy_class(config.type)\n config = config.get(\"params\", {})\n dataset_type = dataloaders[list(dataloaders.keys())[0]].dataset.dataset_type\n if dataset_type != \"train\":\n logger.info(\n f\"{iteration_strategy_class.__name__} updated to size \"\n + f\"proportional for {dataset_type}\"\n )\n return SizeProportionalIterationStrategy.from_params(\n dataloaders, *args, **kwargs\n )\n else:\n return iteration_strategy_class(config, dataloaders, *args, **kwargs)\n\n\ndef build_meters(run_type: str) -> List[Meter]:\n train_meter, val_meter, test_meter = None, None, None\n if \"train\" in run_type:\n train_meter = Meter()\n # val_meter used for validation after training loop\n val_meter = Meter()\n elif \"val\" in run_type or \"inference\" in run_type:\n val_meter = Meter()\n\n if \"test\" in run_type:\n test_meter = Meter()\n\n return train_meter, val_meter, test_meter\n" ]
[ [ "torch.utils.data.DistributedSampler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
danielk333/SORTS
[ "f8454901fda405c0e8ce6553366553c3f043da0b", "f8454901fda405c0e8ce6553366553c3f043da0b" ]
[ "examples/observing_passes.py", "examples/e3d_demo_planner.py" ]
[ "#!/usr/bin/env python\n\n'''\nObserving a set of passes\n================================\n\n'''\nimport pathlib\nfrom tabulate import tabulate\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport sorts\neiscat3d = sorts.radars.eiscat3d\nfrom sorts.controller import Tracker\nfrom sorts.scheduler import StaticList, ObservedParameters\nfrom sorts import SpaceObject\nfrom sorts.profiling import Profiler\n\nfrom sorts.propagator import SGP4\nProp_cls = SGP4\nProp_opts = dict(\n settings = dict(\n out_frame='ITRF',\n ),\n)\nprop = Prop_cls(**Prop_opts)\n\n\nobjs = [\n SpaceObject(\n Prop_cls,\n propagator_options = Prop_opts,\n a = 7200e3, \n e = 0.1, \n i = 75, \n raan = 79,\n aop = 0,\n mu0 = mu0,\n epoch = 53005.0,\n parameters = dict(\n d = 1.0,\n ),\n )\n for mu0 in [62.0, 61.9]\n]\n\nfor obj in objs: print(obj)\n\nt = sorts.equidistant_sampling(\n orbit = objs[0].state, \n start_t = 0, \n end_t = 3600*6, \n max_dpos=1e3,\n)\n\nprint(f'Temporal points: {len(t)}')\nstates0 = objs[0].get_state(t)\nstates1 = objs[1].get_state(t)\n\n#set cache_data = True to save the data in local coordinates \n#for each pass inside the Pass instance, setting to false saves RAM\npasses0 = eiscat3d.find_passes(t, states0, cache_data = False) \npasses1 = eiscat3d.find_passes(t, states1, cache_data = False)\n\n\n#just create a controller for observing 10 points of the first pass\nps = passes0[0][0][0]\nuse_inds = np.arange(0,len(ps.inds),len(ps.inds)//10)\ne3d_tracker = Tracker(radar = eiscat3d, t=t[ps.inds[use_inds]], ecefs=states0[:3,ps.inds[use_inds]])\ne3d_tracker.meta['target'] = 'Cool object 1'\n\nclass MyStaticList(StaticList, ObservedParameters):\n\n def __init__(self, radar, controllers, profiler=None, logger=None):\n super().__init__(\n radar=radar, \n controllers=controllers, \n profiler=profiler,\n logger=logger,\n )\n\n def generate_schedule(self, t, generator):\n data = np.empty((len(t),len(self.radar.rx)*2+1), dtype=np.float64)\n data[:,0] = t\n names = []\n targets = []\n for ind,mrad in enumerate(generator):\n radar, meta = mrad\n names.append(meta['controller_type'].__name__)\n targets.append(meta['target'])\n for ri, rx in enumerate(radar.rx):\n data[ind,1+ri*2] = rx.beam.azimuth\n data[ind,2+ri*2] = rx.beam.elevation\n data = data.T.tolist() + [names, targets]\n data = list(map(list, zip(*data)))\n return data\n\n\np = Profiler()\n\nscheduler = MyStaticList(radar = eiscat3d, controllers=[e3d_tracker], profiler=p)\n\nsched_data = scheduler.schedule()\n\nrx_head = [f'rx{i} {co}' for i in range(len(scheduler.radar.rx)) for co in ['az', 'el']]\nsched_tab = tabulate(sched_data, headers=[\"t [s]\"] + rx_head + ['Controller', 'Target'])\n\nprint(sched_tab)\n\np.start('total')\ndata0 = scheduler.observe_passes(passes0, space_object = objs[0], snr_limit=False)\np.stop('total')\nprint(p.fmt(normalize='total'))\n\ndata1 = scheduler.observe_passes(passes1, space_object = objs[1], snr_limit=False)\n\n#create a tdm file example\n# pth = pathlib.Path(__file__).parent / 'data' / 'test_tdm.tdm'\n# print(f'Writing TDM data to: {pth}')\n\n# dat = data0[0][0][0]\n# sorts.io.write_tdm(\n# pth,\n# dat['t'],\n# dat['range'],\n# dat['range_rate'],\n# np.ones(dat['range'].shape),\n# np.ones(dat['range_rate'].shape),\n# freq=eiscat3d.tx[0].beam.frequency,\n# tx_ecef=eiscat3d.tx[0].ecef,\n# rx_ecef=eiscat3d.rx[0].ecef,\n# tx_name=\"EISCAT 3D Skiboten\",\n# rx_name=\"EISCAT 3D Skiboten\",\n# oid=\"Some cool space object\",\n# tdm_type=\"track\",\n# )\n\n\nfig = plt.figure(figsize=(15,15))\naxes = [\n [\n fig.add_subplot(221, projection='3d'),\n fig.add_subplot(222),\n ],\n [\n fig.add_subplot(223),\n fig.add_subplot(224),\n ],\n]\n\nfor tx in scheduler.radar.tx:\n axes[0][0].plot([tx.ecef[0]],[tx.ecef[1]],[tx.ecef[2]], 'or')\nfor rx in scheduler.radar.rx:\n axes[0][0].plot([rx.ecef[0]],[rx.ecef[1]],[rx.ecef[2]], 'og')\n\nfor pi in range(len(passes0[0][0])):\n dat = data0[0][0][pi]\n dat2 = data1[0][0][pi]\n if dat is not None:\n axes[0][0].plot(states0[0,passes0[0][0][pi].inds], states0[1,passes0[0][0][pi].inds], states0[2,passes0[0][0][pi].inds], '-', label=f'pass-{pi}')\n axes[0][1].plot(dat['t']/3600.0, dat['range'], '-', label=f'pass-{pi}')\n axes[1][0].plot(dat['t']/3600.0, dat['range_rate'], '-', label=f'pass-{pi}')\n axes[1][1].plot(dat['t']/3600.0, 10*np.log10(dat['snr']), '-', label=f'pass-{pi}')\n if dat2 is not None:\n axes[0][0].plot(states1[0,passes1[0][0][pi].inds], states1[1,passes1[0][0][pi].inds], states1[2,passes1[0][0][pi].inds], '-', label=f'obj2 pass-{pi}')\n axes[0][1].plot(dat2['t']/3600.0, dat2['range'], '-', label=f'obj2 pass-{pi}')\n axes[1][0].plot(dat2['t']/3600.0, dat2['range_rate'], '-', label=f'obj2 pass-{pi}')\n axes[1][1].plot(dat2['t']/3600.0, 10*np.log10(dat2['snr']), '-', label=f'obj2 pass-{pi}')\n\naxes[0][1].legend()\nplt.show()", "#!/usr/bin/env python\n\n'''\nE3D Demonstrator SST planner\n================================\n\n'''\nimport pathlib\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tabulate import tabulate\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom astropy.time import Time, TimeDelta\n\nimport sorts\n\nfrom sorts.scheduler import Tracking, ObservedParameters\n\n\n# The Tracking scheduler takes in a series of SpaceObjects and finds all the passes of the objects \n# over the radar when \"update\" is called. Then \"set_measurements\", which is an abstract method,\n# is used to determine when along those passes measurements should be done\n# \n# ObservedParameters implements radar observation of space objects based on the radar schedule\n# and calculates the observed parameters (like range, range rate, RCS, SNR, ..)\n# and can be used in case we want to predict what data we will measure\n#\n# The generate_schedule is not implemented and needs to be defined to generate a schedule output\n# as the standard format for outputting a radar schedule in SORTS is to have a list of \"radar\"\n# instance with the exact configuration of the radar for each radar action\n#\nclass ObservedTracking(Tracking, ObservedParameters):\n \n def set_measurements(self):\n dw = self.controller_args['dwell']\n\n #we probably need to make sure we do not have overlapping measurements\n #this is a very \"stupid\" scheduler but we can do at least that!\n #So create a vector of all scheduled measurements\n t_all = []\n\n for ind, so in enumerate(self.space_objects):\n #This is a list of all passes times\n t_vec = []\n \n for txi in range(len(self.radar.tx)):\n for rxi in range(len(self.radar.rx)):\n for ps in self.passes[ind][txi][rxi]:\n #lets just measure it all! From rise to fall\n __t = np.arange(ps.start(), ps.end(), dw)\n\n #Check for overlap\n\n #to keep from this pass\n t_keep = np.full(__t.shape, True, dtype=np.bool)\n #to remove (index form) from all previous scheduled\n t_all_del = []\n\n #this creates a matrix of all possible time differences\n t_diff = np.array(t_all)[:,None] - __t[None,:]\n\n #find the ones that overlap with previously selected measurements\n inds = np.argwhere(np.logical_and(t_diff <= 0, t_diff >= -dw ))\n\n #just keep every other, so we are \"fair\"\n first_one = True\n for bad_samp in inds:\n if first_one:\n t_keep[bad_samp[1]] = False\n else:\n t_all_del.append(bad_samp[0])\n first_one = not first_one\n\n __t = __t[t_keep]\n\n #slow and ugly but does the job (filter away measurements)\n t_all = [t_all[x] for x in range(len(t_all)) if x not in t_all_del]\n\n t_vec += [__t]\n t_all += __t.tolist()\n\n if self.logger is not None:\n self.logger.info(f'Propagating {sum(len(t) for t in t_vec)} measurement states for object {ind}')\n\n #epoch difference\n dt = (self.space_objects[ind].epoch - self.epoch).to_value('sec')\n\n if self.collect_passes:\n t_vec = np.concatenate(t_vec)\n\n self.states[ind] = so.get_state(t_vec - dt)\n self.states_t[ind] = t\n else:\n self.states[ind] = [so.get_state(t - dt) for t in t_vec]\n self.states_t[ind] = t_vec\n\n def generate_schedule(self, t, generator, group_target=False):\n data = np.empty((len(t),len(self.radar.tx)*2+len(self.radar.rx)*3+1), dtype=np.float64)\n\n #here we get a time vector of radar events and the generator that gives the \"radar\" and meta data for that event\n #Use that to create a schedule table\n\n all_targets = dict()\n\n data[:,0] = t\n targets = []\n experiment = []\n passid = []\n for ind,mrad in enumerate(generator):\n radar, meta = mrad\n targets.append(meta['target'])\n \n if meta['target'] in all_targets:\n all_targets[meta['target']] += [ind]\n else:\n all_targets[meta['target']] = [ind]\n\n experiment.append('SST')\n passid.append(meta['pass'])\n\n for ti, tx in enumerate(radar.tx):\n data[ind,1+ti*2] = tx.beam.azimuth\n data[ind,2+ti*2] = tx.beam.elevation\n\n for ri, rx in enumerate(radar.rx):\n data[ind,len(radar.tx)*2+1+ri*3] = rx.beam.azimuth\n data[ind,len(radar.tx)*2+2+ri*3] = rx.beam.elevation\n data[ind,len(radar.tx)*2+3+ri*3] = rx.pointing_range*1e-3 #to km\n\n data = data.T.tolist() + [experiment, targets, passid]\n data = list(map(list, zip(*data)))\n\n if group_target:\n #Create a dict of tables instead\n data_ = dict()\n for key in all_targets:\n for ind in all_targets[key]:\n if key in data_:\n data_[key] += [data[ind]]\n else:\n data_[key] = [data[ind]]\n else:\n data_ = data\n\n return data_\n\n\n\n######## RUNNING ########\n\nfrom sorts.population import tle_catalog\n\ne3d_demo = sorts.radars.eiscat3d_demonstrator_interp\n#############\n# CHOOSE OBJECTS\n#############\n\nobjects = [ #NORAD ID\n 27386, #Envisat\n 35227,\n 35245,\n]\nepoch = Time('2020-09-08 00:24:51.759', format='iso', scale='utc')\nt_start = 0.0\nt_end = 12.0*3600.0 #end time of tracking scheduling\nt_step = 10.0 #time step for finding passes\ndwell = 10.0 #the time between re-pointing beam, i.e. \"radar actions\" or \"time slices\"\n\nprofiler = sorts.profiling.Profiler()\nlogger = sorts.profiling.get_logger()\n\ntry:\n pth = pathlib.Path(__file__).parent / 'data' / 'space_track_tle.txt'\nexcept NameError:\n import os\n pth = 'data' + os.path.sep + 'space_track_tle.txt'\n\npop = tle_catalog(pth, kepler=True)\n\npop.propagator_options['settings']['out_frame'] = 'ITRS' #output states in ECEF\n\n#Get the space objects to track\nspace_objects = []\nfor obj in objects:\n ind = np.argwhere(pop.data['oid'] == obj)\n if len(ind) > 0:\n space_objects.append(pop.get_object(ind[0]))\n\nlogger.always(f'Found {len(space_objects)} objects to track')\n\n#Initialize the scheduler\nscheduler = ObservedTracking(\n radar = e3d_demo, \n epoch = epoch,\n space_objects = space_objects, \n end_time = t_end, \n start_time = t_start, \n controller_args = dict(return_copy=True, dwell=dwell),\n max_dpos = 1e3,\n profiler = profiler, \n logger = logger,\n use_pass_states = False,\n)\n\n#update the passes\nscheduler.update()\n\n#set the measurements using the current passes\nscheduler.set_measurements()\n\n#Generate the schedule, grouped by target\ngrouped_data = scheduler.schedule(group_target=True)\n\nfor key in grouped_data:\n pass_id = np.array([x[-1] for x in grouped_data[key]], dtype=np.int) #pass index is last variable\n passes = np.unique(pass_id)\n\n tv = np.array([x[0] for x in grouped_data[key]]) #we put t at index 0\n az = np.array([x[1] for x in grouped_data[key]]) #we put az at index 1\n el = np.array([x[2] for x in grouped_data[key]]) #and el at index 2\n\n fig, ax = plt.subplots(1,1)\n for pi, ps in enumerate(passes):\n ax = sorts.plotting.local_tracking(\n az[pass_id == ps], \n el[pass_id == ps], \n ax=ax, \n t=epoch + TimeDelta(tv, format='sec'),\n add_track = pi > 0, #if there are more then one, dont redraw all the extra, just add the track\n )\n ax.set_title(key)\n\n#Generate a combined schedule\ndata = scheduler.schedule()\n\n#Format and print schedule\nrx_head = [f'TX{i}-{co}' for i in range(len(e3d_demo.tx)) for co in ['az [deg]', 'el [deg]']]\nrx_head += [f'RX{i}-{co}' for i in range(len(e3d_demo.rx)) for co in ['az [deg]', 'el [deg]', 'r [km]']]\nsched_tab = tabulate(data, headers=[\"t [s]\"] + rx_head + ['Experiment', 'Target', 'Pass'])\n\nprint(sched_tab)\n\n#Plot radar pointing diagram\nfig = plt.figure(figsize=(15,15))\nax = fig.add_subplot(211)\nax.plot([x[0]/3600.0 for x in data], [x[1] for x in data], \".b\")\nax.set_xlabel('Time [h]')\nax.set_ylabel('TX Azimuth [deg]')\n\nax = fig.add_subplot(212)\nax.plot([x[0]/3600.0 for x in data], [x[2] for x in data], \".b\")\nax.set_xlabel('Time [h]')\nax.set_ylabel('TX Elevation [deg]')\n\nplt.show()\n" ]
[ [ "numpy.log10", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.logical_and", "numpy.unique", "matplotlib.pyplot.subplots", "numpy.argwhere", "numpy.concatenate", "numpy.full", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bwohlberg/xdesign
[ "b40a50e596d54501106dbc8052ae05a5ae47bfdb", "b40a50e596d54501106dbc8052ae05a5ae47bfdb", "b40a50e596d54501106dbc8052ae05a5ae47bfdb" ]
[ "src/xdesign/geometry/area.py", "src/xdesign/geometry/line.py", "tests/test_material.py" ]
[ "\"\"\"Define two dimensional geometric entities.\"\"\"\n\n__author__ = \"Daniel Ching, Doga Gursoy\"\n__copyright__ = \"Copyright (c) 2016, UChicago Argonne, LLC.\"\n__docformat__ = 'restructuredtext en'\n__all__ = [\n 'Curve',\n 'Circle',\n 'Polygon',\n 'RegularPolygon',\n 'Triangle',\n 'Rectangle',\n 'Square',\n 'Mesh',\n]\n\nfrom copy import deepcopy\nimport logging\nimport warnings\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.path import Path\nfrom cached_property import cached_property\n\nfrom xdesign.geometry.entity import *\nfrom xdesign.geometry.line import *\nfrom xdesign.geometry.point import *\n\nlogger = logging.getLogger(__name__)\n\n\nclass Curve(Entity):\n \"\"\"The base class for closed manifolds defined by a single equation. e.g.\n :class:`.Circle`, :class:`.Sphere`, or :class:`.Torus`.\n\n Attributes\n ----------\n center : Point\n \"\"\"\n\n def __init__(self, center):\n if not isinstance(center, Point):\n raise TypeError(\"center must be a Point.\")\n super(Curve, self).__init__()\n self.center = center\n\n def __repr__(self):\n return \"{}(center={})\".format(type(self).__name__, repr(self.center))\n\n def translate(self, vector):\n \"\"\"Translates the Curve along a vector.\"\"\"\n if not isinstance(vector, (Point, list, np.array)):\n raise TypeError(\"vector must be point, list, or array.\")\n self.center.translate(vector)\n\n def rotate(self, theta, point=None, axis=None):\n \"\"\"Rotates the Curve by theta radians around an axis which passes\n through a point radians.\"\"\"\n self.center.rotate(theta, point, axis)\n\n\nclass Superellipse(Curve):\n \"\"\"A Superellipse in 2D cartesian space.\n\n Attributes\n ----------\n center : Point\n a : scalar\n b : scalar\n n : scalar\n \"\"\"\n\n def __init__(self, center, a, b, n):\n super(Superellipse, self).__init__(center)\n self.a = float(a)\n self.b = float(b)\n self.n = float(n)\n\n def __repr__(self):\n return \"Superellipse(center={}, a={}, b={}, n={})\".format(\n repr(self.center), repr(self.a), repr(self.b), repr(self.n)\n )\n\n @property\n def list(self):\n \"\"\"Return list representation.\"\"\"\n return [self.center.x, self.center.y, self.a, self.b, self.n]\n\n def scale(self, val):\n \"\"\"Scale.\"\"\"\n self.a *= val\n self.b *= val\n\n\nclass Ellipse(Superellipse):\n \"\"\"Ellipse in 2-D cartesian space.\n\n Attributes\n ----------\n center : Point\n a : scalar\n b : scalar\n \"\"\"\n\n def __init__(self, center, a, b):\n super(Ellipse, self).__init__(center, a, b, 2)\n\n def __repr__(self):\n return \"Ellipse(center={}, a={}, b={})\".format(\n repr(self.center), repr(self.a), repr(self.b)\n )\n\n @property\n def list(self):\n \"\"\"Return list representation.\"\"\"\n return [self.center.x, self.center.y, self.a, self.b]\n\n @property\n def area(self):\n \"\"\"Return area.\"\"\"\n return np.pi * self.a * self.b\n\n def scale(self, val):\n \"\"\"Scale.\"\"\"\n self.a *= val\n self.b *= val\n\n\nclass Circle(Curve):\n \"\"\"Circle in 2D cartesian space.\n\n Attributes\n ----------\n center : Point\n The center point of the circle.\n radius : scalar\n The radius of the circle.\n sign : int (-1 or 1)\n The sign of the area\n \"\"\"\n\n def __init__(self, center, radius, sign=1):\n super(Circle, self).__init__(center)\n self.radius = float(radius)\n self.sign = sign\n self._dim = 2\n\n def __repr__(self):\n return \"Circle(center={}, radius={}, sign={})\".format(\n repr(self.center), repr(self.radius), repr(self.sign)\n )\n\n def __str__(self):\n \"\"\"Return the analytical equation.\"\"\"\n return \"(x-%s)^2 + (y-%s)^2 = %s^2\" % (\n self.center.x, self.center.y, self.radius\n )\n\n def __eq__(self, circle):\n return ((self.x, self.y,\n self.radius) == (circle.x, circle.y, circle.radius))\n\n def __neg__(self):\n copE = deepcopy(self)\n copE.sign = -copE.sign\n return copE\n\n @property\n def list(self):\n \"\"\"Return list representation for saving to files.\"\"\"\n return [self.center.x, self.center.y, self.radius]\n\n @property\n def circumference(self):\n \"\"\"Returns the circumference.\"\"\"\n return 2 * np.pi * self.radius\n\n @property\n def diameter(self):\n \"\"\"Returns the diameter.\"\"\"\n return 2 * self.radius\n\n @property\n def area(self):\n \"\"\"Return the area.\"\"\"\n return self.sign * np.pi * self.radius**2\n\n @property\n def patch(self):\n \"\"\"Returns a matplotlib patch.\"\"\"\n return plt.Circle((self.center.y, self.center.x), self.radius)\n\n @property\n def bounding_box(self):\n \"\"\"Return the axis-aligned bounding box as two numpy vectors.\"\"\"\n xmin = np.array(self.center._x - self.radius)\n xmax = np.array(self.center._x + self.radius)\n\n return xmin, xmax\n\n # def scale(self, val):\n # \"\"\"Scale.\"\"\"\n # raise NotImplementedError\n # self.center.scale(val)\n # self.rad *= val\n\n def contains(self, other):\n \"\"\"Return whether `other` is a proper subset.\n\n Return one boolean for all geometric entities. Return an array of\n boolean for array input.\n \"\"\"\n if isinstance(other, Point):\n x = other._x\n elif isinstance(other, np.ndarray):\n x = other\n elif isinstance(other, Mesh):\n for face in other.faces:\n if not self.contains(face) and face.sign == 1:\n return False\n return True\n else:\n if self.sign == 1:\n if other.sign == -1:\n # Closed shape cannot contain infinite one\n return False\n else:\n assert other.sign == 1\n # other is within A\n if isinstance(other, Circle):\n return (\n other.center.distance(self.center) + other.radius <\n self.radius\n )\n elif isinstance(other, Polygon):\n x = _points_to_array(other.vertices)\n return np.all(self.contains(x))\n\n elif self.sign == -1:\n if other.sign == 1:\n # other is outside A and not around\n if isinstance(other, Circle):\n return (\n other.center.distance(self.center) - other.radius >\n self.radius\n )\n elif isinstance(other, Polygon):\n x = _points_to_array(other.vertices)\n return (\n np.all(self.contains(x))\n and not other.contains(-self)\n )\n\n else:\n assert other.sign is -1\n # other is around A\n if isinstance(other, Circle):\n return (\n other.center.distance(self.center) + self.radius <\n other.radius\n )\n elif isinstance(other, Polygon):\n return (-other).contains(-self)\n\n x = np.atleast_2d(x)\n\n if self.sign == 1:\n return np.sum((x - self.center._x)**2, axis=1) < self.radius**2\n else:\n return np.sum((x - self.center._x)**2, axis=1) > self.radius**2\n\n\ndef _points_to_array(points):\n a = np.zeros((len(points), points[0].dim))\n\n for i in range(len(points)):\n a[i] = points[i]._x\n\n return np.atleast_2d(a)\n\n\nclass Polygon(Entity):\n \"\"\"A convex polygon in 2D cartesian space.\n\n It is defined by a number of distinct vertices of class :class:`.Point`.\n Superclasses include :class:`.Square`, :class:`.Triangle`, etc.\n\n Attributes\n ----------\n vertices : List of Points\n sign : int (-1 or 1)\n The sign of the area\n\n Raises\n ------\n ValueError : If the number of vertices is less than three.\n \"\"\"\n\n def __init__(self, vertices, sign=1):\n for v in vertices:\n if not isinstance(v, Point):\n raise TypeError(\"vertices must be of type Point.\")\n if len(vertices) < 3:\n raise ValueError(\"A Polygon has at least three vertices.\")\n super(Polygon, self).__init__()\n self.vertices = vertices\n self._dim = vertices[0].dim\n self.sign = sign\n\n def __repr__(self):\n return \"Polygon(vertices={}, sign={})\".format(\n repr(self.vertices), repr(self.sign)\n )\n\n def __str__(self):\n return \"{}({})\".format(type(self).__name__, str(self.numpy))\n\n def __neg__(self):\n copE = deepcopy(self)\n copE.sign = -copE.sign\n return copE\n\n @property\n def numverts(self):\n return len(self.vertices)\n\n @property\n def list(self):\n \"\"\"Return list representation.\"\"\"\n lst = []\n for m in range(self.numverts):\n lst.append(self.vertices[m].list)\n return lst\n\n @property\n def numpy(self):\n \"\"\"Return Numpy representation.\"\"\"\n return _points_to_array(self.vertices)\n\n @property\n def patch(self):\n \"\"\"Returns a matplotlib patch.\"\"\"\n points = self.vertices\n a = np.zeros((len(points), points[0].dim))\n for i in range(len(points)):\n a[i] = np.flip(points[i]._x, 0)\n return plt.Polygon(a)\n\n # Cached Properties\n @property\n def bounds(self):\n \"\"\"Returns a 4-tuple (xmin, ymin, xmax, ymax) representing the\n bounding rectangle for the Polygon.\n \"\"\"\n warnings.warn(\n \"Polygon.bounds is deprecated; use Polygon.bounding_box instead.\",\n DeprecationWarning)\n xs = [p.x for p in self.vertices]\n ys = [p.y for p in self.vertices]\n return (min(xs), min(ys), max(xs), max(ys))\n\n @property\n def bounding_box(self):\n \"\"\"Return the axis-aligned bounding box as two numpy vectors.\"\"\"\n xs = [p.x for p in self.vertices]\n ys = [p.y for p in self.vertices]\n return np.array([min(xs), min(ys)]), np.array([max(xs), max(ys)])\n\n @property\n def edges(self):\n \"\"\"Return a list of lines connecting the points of the Polygon.\"\"\"\n edges = []\n\n for i in range(self.numverts):\n edges.append(\n Segment(\n self.vertices[i], self.vertices[(i + 1) % self.numverts]\n )\n )\n\n return edges\n\n @cached_property\n def area(self):\n \"\"\"Return the area of the Polygon.\n\n References\n ----------\n https://en.wikipedia.org/wiki/Shoelace_formula\n https://stackoverflow.com/a/30408825\n \"\"\"\n a = _points_to_array(self.vertices)\n x = a[:, 0]\n y = a[:, 1]\n return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))\n\n @cached_property\n def perimeter(self):\n \"\"\"Return the perimeter of the Polygon.\"\"\"\n perimeter = 0\n verts = self.vertices\n points = verts + [verts[0]]\n for m in range(self.numverts):\n perimeter += points[m].distance(points[m + 1])\n return perimeter\n\n @cached_property\n def center(self):\n \"\"\"The center of the bounding circle.\"\"\"\n center = Point(np.zeros(self._dim))\n for v in self.vertices:\n center += v\n return center / self.numverts\n\n @cached_property\n def radius(self):\n \"\"\"The radius of the bounding circle.\"\"\"\n r = 0\n c = self.center\n for m in range(self.numverts):\n r = max(r, self.vertices[m].distance(c))\n return r\n\n @cached_property\n def half_space(self):\n \"\"\"Returns the half space polytope respresentation of the polygon.\"\"\"\n assert (self.dim > 0), self.dim\n A = np.ndarray((self.numverts, self.dim))\n B = np.ndarray(self.numverts)\n\n for i in range(0, self.numverts):\n edge = Line(\n self.vertices[i], self.vertices[(i + 1) % self.numverts]\n )\n A[i, :], B[i] = edge.standard\n\n # test for positive or negative side of line\n if self.center._x.dot(A[i, :]) > B[i]:\n A[i, :] = -A[i, :]\n B[i] = -B[i]\n\n return A, B\n\n # Methods\n def translate(self, vector):\n \"\"\"Translates the polygon by a vector.\"\"\"\n for v in self.vertices:\n v.translate(vector)\n\n if 'center' in self.__dict__:\n self.center.translate(vector)\n\n # if 'bounds' in self.__dict__:\n # self.bounds.translate(vector)\n\n if 'half_space' in self.__dict__:\n self.half_space = self.half_space.translation(vector)\n\n def rotate(self, theta, point=None, axis=None):\n \"\"\"Rotates the Polygon around an axis which passes through a point by\n theta radians.\"\"\"\n for v in self.vertices:\n v.rotate(theta, point, axis)\n\n if 'center' in self.__dict__:\n self.center.rotate(theta, point, axis)\n\n # if 'bounds' in self.__dict__:\n # self.bounds.rotate(theta, point, axis)\n\n if 'half_space' in self.__dict__:\n if point is None:\n d = 0\n else:\n d = point._x\n self.half_space = self.half_space.translation(-d)\n self.half_space = self.half_space.rotation(0, 1, theta)\n self.half_space = self.half_space.translation(d)\n\n def contains(self, other):\n \"\"\"Return whether this Polygon contains the other.\"\"\"\n\n if isinstance(other, Point):\n x = other._x\n elif isinstance(other, np.ndarray):\n x = other\n elif isinstance(other, Mesh):\n for face in other.faces:\n if not self.contains(face) and face.sign == 1:\n return False\n return True\n else:\n if self.sign == 1:\n if other.sign == -1:\n # Closed shape cannot contain infinite one\n return False\n else:\n assert other.sign == 1\n # other is within A\n if isinstance(other, Circle):\n if self.contains(other.center):\n for edge in self.edges:\n if other.center.distance(edge) < other.radius:\n return False\n return True\n return False\n elif isinstance(other, Polygon):\n x = _points_to_array(other.vertices)\n return np.all(self.contains(x))\n\n elif self.sign == -1:\n if other.sign == 1:\n # other is outside A and not around\n if isinstance(other, Circle):\n if self.contains(other.center):\n for edge in self.edges:\n if other.center.distance(edge) < other.radius:\n return False\n return True and not other.contains(-self)\n return False\n elif isinstance(other, Polygon):\n x = _points_to_array(other.vertices)\n return (\n np.all(self.contains(x))\n and not other.contains(-self)\n )\n\n else:\n assert other.sign is -1\n # other is around A\n if isinstance(other, Circle) or isinstance(other, Polygon):\n return (-other).contains(-self)\n\n border = Path(self.numpy)\n\n if self.sign == 1:\n return border.contains_points(np.atleast_2d(x))\n else:\n return np.logical_not(border.contains_points(np.atleast_2d(x)))\n\n\nclass RegularPolygon(Polygon):\n \"\"\"A regular polygon in 2D cartesian space.\n\n It is defined by the polynomial center, order, and radius.\n\n By default (i.e. when the ``angle`` parameter is zero), the regular\n polygon is oriented so that one of the vertices is at coordinates\n :math:`(x + r, x)` where :math:`x` is the x-coordinate of\n ``center`` and :math:`r` = ``radius``. The ``angle`` parameter is\n only meaningful modulo :math:`2\\pi /` ``order`` since rotation by\n :math:`2\\pi /` ``order`` gives a result equivalent to no rotation.\n\n Parameters\n ----------\n center : :class:`Point`\n The center of the polygon\n radius : float\n Distance from polygon center to vertices\n order : int\n Order of the polygon (e.g. order 6 is a hexagon).\n angle : float\n Optional rotation angle in radians.\n sign : int (-1 or 1)\n Optional sign of the area (see :class:`Polygon`)\n \"\"\"\n\n def __init__(self, center, radius, order, angle=0, sign=1):\n vertex_angles = (np.linspace(0, 2 * np.pi, order, endpoint=False) +\n angle)\n vertices = [\n Point([radius * np.cos(theta), radius * np.sin(theta)]) + center\n for theta in vertex_angles\n ]\n super(RegularPolygon, self).__init__(vertices, sign=sign)\n\n\nclass Triangle(Polygon):\n \"\"\"Triangle in 2D cartesian space.\n\n It is defined by three distinct points.\n \"\"\"\n\n def __init__(self, p1, p2, p3):\n super(Triangle, self).__init__([p1, p2, p3])\n\n def __repr__(self):\n return \"Triangle({}, {}, {})\".format(\n self.vertices[0], self.vertices[1], self.vertices[2]\n )\n\n @cached_property\n def center(self):\n center = Point([0, 0])\n for v in self.vertices:\n center += v\n return center / 3\n\n @cached_property\n def area(self):\n A = self.vertices[0] - self.vertices[1]\n B = self.vertices[0] - self.vertices[2]\n return self.sign * 1 / 2 * np.abs(np.cross([A.x, A.y], [B.x, B.y]))\n\n\nclass Rectangle(Polygon):\n \"\"\"Rectangle in 2D cartesian space.\n\n Defined by a point and a vector to enforce perpendicular sides.\n\n Parameters\n ----------\n side_lengths : array\n The lengths of the sides\n \"\"\"\n\n def __init__(self, center, side_lengths):\n\n s = np.array(side_lengths) / 2\n self.side_lengths = np.array(side_lengths)\n\n p1 = Point([center.x + s[0], center.y + s[1]])\n p2 = Point([center.x - s[0], center.y + s[1]])\n p3 = Point([center.x - s[0], center.y - s[1]])\n p4 = Point([center.x + s[0], center.y - s[1]])\n\n super(Rectangle, self).__init__([p1, p2, p3, p4])\n\n def __repr__(self):\n return \"Rectangle({}, {})\".format(\n repr(self.center), repr(self.side_lengths.tolist())\n )\n\n @cached_property\n def area(self):\n return self.sign * (\n self.vertices[0].distance(self.vertices[1]) *\n self.vertices[1].distance(self.vertices[2])\n )\n\n\nclass Square(Rectangle):\n \"\"\"Square in 2D cartesian space.\n\n Defined by a point and a length to enforce perpendicular sides.\n \"\"\"\n\n def __init__(self, center, side_length=None, radius=None):\n\n if radius is not None:\n # side_length = np.sqrt(2) * radius\n side_length = 2 * radius\n\n side_lengths = [side_length] * 2\n\n super(Square, self).__init__(center, side_lengths)\n\n\nclass Mesh(Entity):\n \"\"\"A collection of Entities\n\n Attributes\n ----------\n faces : :py:obj:`list`\n A list of the Entities\n area : float\n The total area of the Entities\n population : int\n The number entities in the Mesh\n radius : float\n The radius of a bounding circle\n\n \"\"\"\n\n def __init__(self, obj=None, faces=[]):\n self.faces = []\n self.area = 0\n self.population = 0\n self.radius = 0\n self._dim = 2\n\n if obj is not None:\n assert not faces\n self.import_triangle(obj)\n else:\n assert obj is None\n for face in faces:\n self.append(face)\n\n def __str__(self):\n return \"Mesh(\" + str(self.center) + \")\"\n\n def __repr__(self):\n return \"Mesh(faces={})\".format(repr(self.faces))\n\n def import_triangle(self, obj):\n \"\"\"Loads mesh data from a Python Triangle dict.\n \"\"\"\n for face in obj['triangles']:\n p0 = Point(obj['vertices'][face[0], 0], obj['vertices'][face[0], 1])\n p1 = Point(obj['vertices'][face[1], 0], obj['vertices'][face[1], 1])\n p2 = Point(obj['vertices'][face[2], 0], obj['vertices'][face[2], 1])\n t = Triangle(p0, p1, p2)\n self.append(t)\n\n @property\n def center(self):\n center = Point([0, 0])\n if self.area > 0:\n for f in self.faces:\n center += f.center * f.area\n center /= self.area\n return center\n\n @property\n def bounding_box(self):\n \"\"\"Return the axis-aligned bounding box as two numpy vectors.\"\"\"\n xmin = np.full(self.dim, np.nan)\n xmax = np.full(self.dim, np.nan)\n\n for f in self.faces:\n fmin, fmax = f.bounding_box\n with np.errstate(invalid='ignore'):\n xmin = np.fmin(xmin, fmin)\n xmax = np.fmax(xmax, fmax)\n\n return xmin, xmax\n\n def append(self, t):\n \"\"\"Add a triangle to the mesh.\"\"\"\n self.population += 1\n # self.center = ((self.center * self.area + t.center * t.area) /\n # (self.area + t.area))\n self.area += t.area\n\n if isinstance(t, Polygon):\n for v in t.vertices:\n self.radius = max(self.radius, self.center.distance(v))\n else:\n self.radius = max(\n self.radius,\n self.center.distance(t.center) + t.radius\n )\n\n self.faces.append(t)\n\n def pop(self, i=-1):\n \"\"\"Pop i-th triangle from the mesh.\"\"\"\n self.population -= 1\n self.area -= self.faces[i].area\n try:\n del self.__dict__['center']\n except KeyError:\n pass\n return self.faces.pop(i)\n\n def translate(self, vector):\n \"\"\"Translate entity.\"\"\"\n for t in self.faces:\n t.translate(vector)\n\n def rotate(self, theta, point=None, axis=None):\n \"\"\"Rotate entity around an axis which passes through a point by theta\n radians.\"\"\"\n for t in self.faces:\n t.rotate(theta, point, axis)\n\n def scale(self, vector):\n \"\"\"Scale entity.\"\"\"\n for t in self.faces:\n t.scale(vector)\n\n def contains(self, other):\n \"\"\"Return whether this Mesh contains other.\n\n FOR ALL `x`,\n THERE EXISTS a face of the Mesh that contains `x`\n AND (ALL cut outs that contain `x` or THERE DOES NOT EXIST a cut out).\n \"\"\"\n if isinstance(other, Point):\n x = other._x\n elif isinstance(other, np.ndarray):\n x = other\n elif isinstance(other, Polygon):\n x = _points_to_array(other.vertices)\n return np.all(self.contains(x))\n elif isinstance(other, Circle):\n warnings.warn(\"Didn't check that Mesh contains Circle.\")\n return True\n else:\n raise NotImplementedError(\"Mesh.contains({})\".format(type(other)))\n\n x = np.atleast_2d(x)\n\n # keep track of whether each point is contained in a face\n x_in_face = np.zeros(x.shape[0], dtype=bool)\n x_in_cut = np.zeros(x.shape[0], dtype=bool)\n has_cuts = False\n\n for f in self.faces:\n if f.sign < 0:\n has_cuts = True\n x_in_cut = np.logical_or(x_in_cut, f.contains(x))\n else:\n x_in_face = np.logical_or(x_in_face, f.contains(x))\n\n if has_cuts:\n return np.logical_and(x_in_face, x_in_cut)\n else:\n return x_in_face\n\n @property\n def patch(self):\n patches = []\n for f in self.faces:\n patches.append(f.patch)\n return patches\n", "\"\"\"Define one dimensional geometric entities.\"\"\"\n\n__author__ = \"Daniel Ching, Doga Gursoy\"\n__copyright__ = \"Copyright (c) 2016, UChicago Argonne, LLC.\"\n__docformat__ = 'restructuredtext en'\n__all__ = [\n 'Line',\n 'Segment',\n]\n\nimport logging\nfrom math import sqrt\nimport numpy as np\n\nfrom xdesign.geometry.entity import *\nfrom xdesign.geometry.point import *\n\nlogger = logging.getLogger(__name__)\n\n\nclass LinearEntity(Entity):\n \"\"\"Define a base class for linear entities.\n\n e.g. :class:`.Line`, :class:`.Segment`, and :class:`.Ray`.\n\n The constructor takes two unique :class:`.Point`.\n\n Attributes\n ----------\n p1 : Point\n p2 : Point\n\n \"\"\"\n\n def __init__(self, p1, p2):\n if not isinstance(p1, Point) or not isinstance(p2, Point):\n raise TypeError(\"p1 and p2 must be Points\")\n if p1 == p2:\n raise ValueError('Requires two unique Points.')\n if p1.dim != p2.dim:\n raise ValueError('Two Points must have same dimensionality.')\n self.p1 = p1\n self.p2 = p2\n self._dim = p1.dim\n\n def __repr__(self):\n return \"{}({}, {})\".format(\n type(self).__name__, repr(self.p1), repr(self.p2)\n )\n\n @property\n def vertical(self):\n \"\"\"Return True if line is vertical.\"\"\"\n return self.p1.x == self.p2.x\n\n @property\n def horizontal(self):\n \"\"\"Return True if line is horizontal.\"\"\"\n return self.p1.y == self.p2.y\n\n @property\n def slope(self):\n \"\"\"Return the slope of the line.\"\"\"\n if self.vertical:\n return np.inf\n else:\n return ((self.p2.y - self.p1.y) / (self.p2.x - self.p1.x))\n\n @property\n def points(self):\n \"\"\"Return the 2-tuple of points defining this linear entity.\"\"\"\n return (self.p1, self.p2)\n\n @property\n def length(self):\n \"\"\"Return the length of the segment between p1 and p2.\"\"\"\n return self.p1.distance(self.p2)\n\n @property\n def tangent(self):\n \"\"\"Return the unit tangent vector.\"\"\"\n dx = (self.p2._x - self.p1._x) / self.length\n return Point(dx)\n\n @property\n def normal(self):\n \"\"\"Return the unit normal vector.\"\"\"\n dx = (self.p2._x - self.p1._x) / self.length\n R = np.array([[0, 1], [-1, 0]])\n n = np.dot(R, dx)\n return Point(n)\n\n @property\n def numpy(self):\n \"\"\"Return row-size numpy array of p1 and p2.\"\"\"\n return np.stack((self.p1._x, self.p2._x), axis=0)\n\n @property\n def list(self):\n \"\"\"Return an list of coordinates where p1 is the first D coordinates\n and p2 is the next D coordinates.\"\"\"\n return np.concatenate((self.p1._x, self.p2._x), axis=0)\n\n def translate(self, vector):\n \"\"\"Translate the :class:`.LinearEntity` by the given vector.\"\"\"\n self.p1.translate(vector)\n self.p2.translate(vector)\n\n def rotate(self, theta, point=None, axis=None):\n \"\"\"Rotate the :class:`.LinearEntity` by theta radians around an axis\n defined by an axis and a point.\"\"\"\n self.p1.rotate(theta, point, axis)\n self.p2.rotate(theta, point, axis)\n\n\nclass Line(LinearEntity):\n \"\"\"Line in 2D cartesian space.\n\n The constructor takes two unique :class:`.Point`.\n\n Attributes\n ----------\n p1 : Point\n p2 : Point\n \"\"\"\n\n def __init__(self, p1, p2):\n super(Line, self).__init__(p1, p2)\n\n def __str__(self):\n \"\"\"Return line equation.\"\"\"\n if self.vertical:\n return \"x = %s\" % self.p1.x\n elif self.dim == 2:\n return \"y = %sx + %s\" % (self.slope, self.yintercept)\n else:\n A, B = self.standard\n return \"%sx \" % '+ '.join([str(n) for n in A]) + \"= \" + str(B)\n\n def __eq__(self, line):\n return (self.slope, self.yintercept) == (line.slope, line.yintercept)\n\n def intercept(self, n):\n \"\"\"Calculates the intercept for the nth dimension.\"\"\"\n if n > self._dim:\n return 0\n else:\n A, B = self.standard\n if A[n] == 0:\n return np.inf\n else:\n return B / A[n]\n\n @property\n def xintercept(self):\n \"\"\"Return the x-intercept.\"\"\"\n if self.horizontal:\n return np.inf\n else:\n return self.p1.x - 1 / self.slope * self.p1.y\n\n @property\n def yintercept(self):\n \"\"\"Return the y-intercept.\"\"\"\n if self.vertical:\n return np.inf\n else:\n return self.p1.y - self.slope * self.p1.x\n\n @property\n def standard(self):\n \"\"\"Returns coeffients for the first N-1 standard equation coefficients.\n The Nth is returned separately.\"\"\"\n A = np.stack([self.p1._x, self.p2._x], axis=0)\n return calc_standard(A)\n\n def distance(self, other):\n \"\"\"Returns the closest distance between entities.\"\"\"\n # REF: http://geomalgorithms.com/a02-_lines.html\n if not isinstance(other, Point):\n raise NotImplementedError(\"Line to point distance only.\")\n d = np.cross(self.tangent._x, other._x - self.p1._x)\n if self.dim > 2:\n return sqrt(d.dot(d))\n else:\n return abs(d)\n\n\nclass Ray(Line):\n \"\"\"Ray in 2-D cartesian space.\n\n It is defined by two distinct points.\n\n Attributes\n ----------\n p1 : Point (source)\n p2 : Point (point direction)\n \"\"\"\n\n def __init__(self, p1, p2):\n super(Ray, self).__init__(p1, p2)\n\n @property\n def source(self):\n \"\"\"The point from which the ray emanates.\"\"\"\n return self.p1\n\n @property\n def direction(self):\n \"\"\"The direction in which the ray emanates.\"\"\"\n return self.p2 - self.p1\n\n def distance(self, other):\n # REF: http://geomalgorithms.com/a02-_lines.html\n v = self.p2._x - self.p1._x\n w = other._x - self.p1._x\n\n c1 = np.dot(w, v)\n\n if c1 <= 0:\n return self.p1.distance(other)\n else:\n return super(Ray, self).distance(other)\n\n\nclass Segment(Line):\n \"\"\"Defines a finite line segment from two unique points.\"\"\"\n\n def __init__(self, p1, p2):\n super(Segment, self).__init__(p1, p2)\n\n @property\n def midpoint(self):\n \"\"\"Return the midpoint of the line segment.\"\"\"\n return Point.midpoint(self.p1, self.p2)\n\n def distance(self, other):\n \"\"\"Return the distance to the other.\"\"\"\n # REF: http://geomalgorithms.com/a02-_lines.html\n v = self.p2._x - self.p1._x\n w = other._x - self.p1._x\n\n c1 = np.dot(w, v)\n c2 = np.dot(v, v)\n\n if c1 <= 0:\n return self.p1.distance(other)\n elif c2 <= c1:\n return self.p2.distance(other)\n else:\n return super(Segment, self).distance(other)\n", "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n# #########################################################################\r\n# Copyright (c) 2016, UChicago Argonne, LLC. All rights reserved. #\r\n# #\r\n# Copyright 2015. UChicago Argonne, LLC. This software was produced #\r\n# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #\r\n# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #\r\n# U.S. Department of Energy. The U.S. Government has rights to use, #\r\n# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #\r\n# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #\r\n# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #\r\n# modified to produce derivative works, such modified software should #\r\n# be clearly marked, so as not to confuse it with the version available #\r\n# from ANL. #\r\n# #\r\n# Additionally, redistribution and use in source and binary forms, with #\r\n# or without modification, are permitted provided that the following #\r\n# conditions are met: #\r\n# #\r\n# * Redistributions of source code must retain the above copyright #\r\n# notice, this list of conditions and the following disclaimer. #\r\n# #\r\n# * Redistributions in binary form must reproduce the above copyright #\r\n# notice, this list of conditions and the following disclaimer in #\r\n# the documentation and/or other materials provided with the #\r\n# distribution. #\r\n# #\r\n# * Neither the name of UChicago Argonne, LLC, Argonne National #\r\n# Laboratory, ANL, the U.S. Government, nor the names of its #\r\n# contributors may be used to endorse or promote products derived #\r\n# from this software without specific prior written permission. #\r\n# #\r\n# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #\r\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\r\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #\r\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #\r\n# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #\r\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #\r\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #\r\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #\r\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #\r\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #\r\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #\r\n# POSSIBILITY OF SUCH DAMAGE. #\r\n# #########################################################################\r\n\r\nimport numpy as np\r\nfrom numpy.testing import assert_allclose, assert_raises, assert_equal\r\nimport matplotlib.pyplot as plt\r\nimport warnings\r\nimport os.path\r\n\r\nfrom xdesign.phantom import *\r\nfrom xdesign.material import *\r\nfrom xdesign.plot import *\r\n\r\n__author__ = \"Daniel Ching\"\r\n__copyright__ = \"Copyright (c) 2016, UChicago Argonne, LLC.\"\r\n__docformat__ = 'restructuredtext en'\r\n\r\n\r\ndef _plot_both(ref, target):\r\n \"\"\"Plot two images to compare them.\"\"\"\r\n plt.figure()\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(ref, cmap='viridis')\r\n plt.colorbar()\r\n plt.subplot(1, 2, 2)\r\n plt.imshow(target, cmap='viridis')\r\n plt.colorbar()\r\n # plt.show(block=False)\r\n\r\n\r\ndef _save_and_load(phantom_class, args=[]):\r\n \"\"\"Test whether the saved and loaded phantoms match.\"\"\"\r\n saved_phantom = '{}{}.txt'.format(phantom_class.__name__, args)\r\n\r\n np.random.seed(0)\r\n p0 = phantom_class(*args)\r\n\r\n if not os.path.isfile(saved_phantom):\r\n save_phantom(p0, saved_phantom)\r\n\r\n p1 = load_phantom(saved_phantom)\r\n\r\n refere = discrete_phantom(p0, 200, uniform=False)\r\n target = discrete_phantom(p1, 200, uniform=False)\r\n\r\n _plot_both(refere, target)\r\n\r\n assert_equal(target, refere,\r\n \"{}({}) changes on load.\".format(phantom_class.__name__,\r\n args))\r\n\r\n\r\ndef test_HyperbolicCocentric():\r\n _save_and_load(HyperbolicConcentric)\r\n\r\n\r\ndef test_DynamicRange():\r\n warnings.filterwarnings(\"ignore\", \"The Square*\", UserWarning)\r\n _save_and_load(DynamicRange, [10, True])\r\n _save_and_load(DynamicRange, [10, False])\r\n\r\n\r\ndef test_Soil():\r\n warnings.filterwarnings(\"ignore\", \"Reached*\", RuntimeWarning)\r\n _save_and_load(Soil)\r\n\r\n\r\n# def test_Foam():\r\n# warnings.filterwarnings(\"ignore\", \"Reached*\", RuntimeWarning)\r\n# _save_and_load(Foam)\r\n\r\n\r\ndef test_XDesignDefault():\r\n _save_and_load(XDesignDefault)\r\n p = XDesignDefault()\r\n sidebyside(p)\r\n\r\n\r\nif __name__ == '__main__':\r\n test_XDesignDefault()\r\n plt.show(block=True)\r\n" ]
[ [ "numpy.linspace", "numpy.ndarray", "numpy.cross", "numpy.roll", "numpy.full", "matplotlib.pyplot.Circle", "numpy.sin", "matplotlib.pyplot.Polygon", "numpy.zeros", "matplotlib.path.Path", "numpy.atleast_2d", "numpy.fmax", "numpy.errstate", "numpy.logical_and", "numpy.flip", "numpy.array", "numpy.sum", "numpy.fmin", "numpy.cos" ], [ "numpy.dot", "numpy.stack", "numpy.concatenate", "numpy.cross", "numpy.array" ], [ "matplotlib.pyplot.imshow", "numpy.random.seed", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ScottHull/Masters-Thesis-Code
[ "46d46eb26267feec4dcd280fa227897c12faee72" ]
[ "Thesis_Code_3/radioactivity2.py" ]
[ "import matplotlib as mpl\nmpl.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom math import log, exp\nimport numpy as np\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\n\nplt.rcParams.update({'font.size': 16})\n\n\ndef decay(half_life, curr_nuclii, max_time, timestep, current_time, original_nuclii, rad_list=[]):\n decay_const = log(0.5) / half_life\n if current_time <= max_time:\n remaining_nuclii = curr_nuclii * exp(decay_const * timestep)\n rad_list.append((remaining_nuclii / original_nuclii))\n return decay(half_life=half_life, curr_nuclii=remaining_nuclii, max_time=max_time, timestep=timestep,\n current_time=current_time + timestep, original_nuclii=original_nuclii, rad_list=rad_list)\n else:\n return rad_list\n\ndef avg_vals_time(list_of_lists):\n avgs = []\n z_list = list(zip(*list_of_lists))\n for i in z_list:\n avg = sum(i) / len(i)\n avgs.append(avg)\n return avgs\n\n\nhf_half_life = 8.9 * 10**6\nal_half_life = 7.17 * 10**5\nfe_half_life = 3.0 * 10**5\nw_182_w_184_terrestrial = 0.864900 # Kleine & Walker 2017 Tungsten Isotopes in Planets\nw_182_w_184_terrestrial_old = 0.864680 # Kleine et al. 2002 Eucrites\nmax_time = 100 * 10**6\noriginal_hf = 100\noriginal_al = 100\noriginal_fe = 100\ntimestep = 1 * 10**6\ntime_list = [i / (1 * 10**6) for i in np.arange(0, max_time + timestep, timestep)]\nMy_5_index = time_list.index(5)\n\nhf_decay = decay(half_life=hf_half_life, curr_nuclii=original_hf, max_time=max_time, timestep=timestep,\n current_time=timestep, original_nuclii=original_hf, rad_list=[original_hf / original_hf])\nal_decay = decay(half_life=al_half_life, curr_nuclii=original_al, max_time=max_time, timestep=timestep,\n current_time=timestep, original_nuclii=original_al, rad_list=[original_al / original_al])\nfe_decay = decay(half_life=fe_half_life, curr_nuclii=original_fe, max_time=max_time, timestep=timestep,\n current_time=timestep, original_nuclii=original_fe, rad_list=[original_fe / original_fe])\n\nw_abundance = [1 - i for i in hf_decay]\n\nhf_rel_at_5My = hf_decay[My_5_index]\nal_rel_at_5My = al_decay[My_5_index]\nfe_rel_at_5My = fe_decay[My_5_index]\n\n\nprint(1 - hf_decay[My_5_index], w_abundance[My_5_index])\n\nhf_at_5 = hf_decay[My_5_index]\nal_at_5 = al_decay[My_5_index]\nfe_at_5 = fe_decay[My_5_index]\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(time_list, [i * 100 for i in hf_decay], color='black', linewidth=2.0, label='$^{182}$Hf')\nax.plot(time_list, [i * 100 for i in al_decay], linewidth=2.0, label='$^{26}$Al')\nax.plot(time_list, [i * 100 for i in fe_decay], linewidth=2.0, label='$^{60}$Fe')\nax.plot(time_list, [i * 100 for i in w_abundance], color='black', linewidth=2.0, linestyle=\"--\", label=\"$^{182}$W\")\nax.axvspan(0, 5, alpha=0.2, color='red', label='Core Formation Period')\nax.set_xlabel(\"Time (Ma)\")\nax.set_ylabel(\"Relative Isotope Abundance (%)\")\nax.set_title(\"Isotope Decay over Time\")\nax.legend(loc='center right')\nminorLocator = MultipleLocator((timestep * 5) / 10**6)\nax.xaxis.set_minor_locator(minorLocator)\nax.grid()\n\neucrite_df = pd.read_excel(\"eucrites_kleine_2002.xlsx\")\n\nsample_name_list = []\nw_182_w_184_list = [] # absolute ratio over time\nw_182_w_184_eucrite_relative_list = [] # relative ratio to the final measured eucrite abundance\nhf_180_w_184_list = []\nepsilon_w_list = []\n\nMy_5_w_182_w_184 = w_abundance[My_5_index]\n\nfor row in eucrite_df.index:\n sample_name = eucrite_df['Sample'][row]\n w_182_w_184 = eucrite_df['182W/184W'][row]\n hf_180_w_184 = eucrite_df['180Hf/184W'][row]\n epsilon_w = eucrite_df['epsilon_W'][row]\n\n w_182_w_184_time = [i * float(w_182_w_184) for i in w_abundance]\n w_182_w_184_time_rel = [i / float(w_182_w_184) for i in w_182_w_184_time]\n epsilon_w_time = [((i / w_182_w_184_terrestrial) - 1) * (10**4) for i in w_182_w_184_time]\n\n sample_name_list.append(sample_name)\n w_182_w_184_list.append(w_182_w_184_time)\n w_182_w_184_eucrite_relative_list.append(My_5_w_182_w_184)\n epsilon_w_list.append(epsilon_w_time)\n\nw_182_w_184_list_avgs = avg_vals_time(list_of_lists=w_182_w_184_list)\n\n\nfig2 = plt.figure()\nax2 = fig2.add_subplot(111)\nax2.axvspan(0, 5, alpha=0.2, color='red')\nax2.set_xlabel(\"Time (My)\")\nax2.set_ylabel(\"Relative 182W/184W\")\nax2.set_title(\"182W/184W In Eucrites Over Time\")\nminorLocator = MultipleLocator((timestep * 5) / 10**6)\nax2.xaxis.set_minor_locator(minorLocator)\nax2.grid()\nfor index, i in enumerate(sample_name_list):\n ax2.plot(time_list, w_182_w_184_list[index], label=\"182W/184W ({})\".format(i))\n ax2.axhline(w_182_w_184_list_avgs[My_5_index], linestyle=\"--\", color='black')\n ax2.annotate(\"Avg. 182W/184W (5 My) = {}\".format(round(float(w_182_w_184_list_avgs[My_5_index]), 6)),\n (time_list[My_5_index], w_182_w_184_list_avgs[My_5_index]), xytext=(20.2, 0.42),\n arrowprops=dict(facecolor='black', shrink=0.05))\n ax2.annotate(\"Avg. 182W/184W (100 My) = {}\".format(round(float(w_182_w_184_list_avgs[-1]), 6)),\n (time_list[-1], w_182_w_184_list_avgs[-1]), xytext=(70.2, 0.62),\n arrowprops=dict(facecolor='black', shrink=0.05))\n\nax2.plot(time_list, w_182_w_184_list_avgs, label=\"Average 182W/184W\", linestyle=\"--\")\n\nax2.legend(loc='lower right')\n\nfig3 = plt.figure()\nax3 = fig3.add_subplot(111)\nax3.axvspan(0, 5, alpha=0.2, color='red')\nax3.set_xlabel(\"Time (My)\")\nax3.set_ylabel(\"Epsilon 182W\")\nax3.set_title(\"Epsilon 182W In Eucrites Over Time\")\nminorLocator = MultipleLocator((timestep * 5) / 10**6)\nax3.xaxis.set_minor_locator(minorLocator)\nax3.grid()\n\nfor index, i in enumerate(sample_name_list):\n ax3.plot(time_list, epsilon_w_list[index], label=\"Epsilon 182W ({})\".format(i))\n\nax3.legend(loc='center right')\n\nfig4 = plt.figure()\nax4 = fig4.add_subplot(111)\nax4.axvspan(0, 5, alpha=0.2, color='red')\nax4.set_xlabel(\"Time (My)\")\nax4.set_ylabel(\"Relative $^{182}$W/$^{184}$W\")\nax4.set_title(\"Relative $^{182}$W/$^{184}$W In Eucrites Over Time\")\nminorLocator = MultipleLocator((timestep * 5) / 10**6)\nax4.xaxis.set_minor_locator(minorLocator)\nax4.grid()\nfor index, i in enumerate(sample_name_list):\n ax4.plot(time_list, [j / w_182_w_184_list[index][-1] for j in w_182_w_184_list[index]], label=\"182W/184W ({})\".format(i))\n ax4.axhline(w_182_w_184_list_avgs[My_5_index] / w_182_w_184_list[index][-1], linestyle=\"--\", color='black')\n ax4.annotate(\"Avg. 182W/184W (5 My) = {}\".format(round(float(w_182_w_184_list_avgs[My_5_index] / w_182_w_184_list_avgs[-1]), 6)),\n (time_list[My_5_index], w_182_w_184_list_avgs[My_5_index] / w_182_w_184_list_avgs[-1]), xytext=(20.2, 0.42),\n arrowprops=dict(facecolor='black', shrink=0.05))\n ax4.annotate(\"Avg. 182W/184W (100 My) = {}\".format(round(float(w_182_w_184_list_avgs[-1] / w_182_w_184_list_avgs[-1]), 6)),\n (time_list[-1], w_182_w_184_list_avgs[-1] / w_182_w_184_list_avgs[-1]), xytext=(80.2, 0.82),\n arrowprops=dict(facecolor='black', shrink=0.05))\n\nax4.plot(time_list, [j / w_182_w_184_list_avgs[-1] for j in w_182_w_184_list_avgs], label=\"Average 182W/184W\", linestyle=\"--\")\n\nax4.legend(loc='center right')\n\nprint(\"***{}\".format(w_182_w_184_list_avgs[My_5_index]))\n\n# fig4 = plt.figure()\n# ax4 = fig4.add_subplot(111)\n# ax4.axvspan(0, 5, alpha=0.2, color='red')\n# ax4.set_xlabel(\"Time (My)\")\n# ax4.set_ylabel(\"Relative 182W/184W\")\n# ax4.set_title(\"182W/184W In Eucrites Over Time\")\n# minorLocator = MultipleLocator((timestep * 5) / 10**6)\n# ax4.xaxis.set_minor_locator(minorLocator)\n# ax4.grid()\n#\n# for index, i in enumerate(sample_name_list):\n# ax4.plot(time_list, w_182_w_184_eucrite_relative_list[index], label=\"182W/184W ({})\".format(i))\n#\n# ax4.legend(loc='center right')\n\nplt.show()\n\n" ]
[ [ "matplotlib.ticker.MultipleLocator", "pandas.read_excel", "matplotlib.use", "numpy.arange", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ejgenc/bachelors_paper_proof_of_concept
[ "c00dc7b59d5e3295df89d080e7d49792c15fa760" ]
[ "tests/data_quality_tests/test_review_sentences_cleaned_data_quality.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n------ What is this file? ------\n\nThis test module contains some data quality tests for the review_sentences_cleaned.csv file.\nThe file can be found at:\n ../../data/cleaned/review_sentences_cleaned.csv\n\n\"\"\"\n#%% --- Import required packages ---\n\nimport os\nfrom pathlib import Path # To wrap around filepaths\nimport pandas as pd\n\n#%% --- Set proper directory to assure integration with doit ---\n\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\n\n#%% --- Import data ---\n\nimport_fp = Path(\"../../data/cleaned/review_sentences_cleaned.csv\")\ntest_target = pd.read_csv(import_fp)\n\n#%% --- Quality test: check if there are any null values ---\n\nclass TestNullValues(object):\n def test_total_null_values(self):\n expected = 0\n actual = test_target.isnull().sum().sum()\n error_message = \"Dataset contains null values. Expected {} null values, got {}\".format(expected,actual)\n assert expected == actual, error_message\n \n#%% --- Quality test: check data type agreements within columns and data types ---\n\nclass TestDataTypes(object):\n def test_data_type_agreement_within_columns(self):\n for column_name in test_target.columns:\n expected_dtype = type(test_target[column_name][0])\n value_index = 0\n while value_index < len(test_target[column_name]):\n value_type = type(test_target[column_name][value_index])\n error_message = \"Values in column \\\"{}\\\" are not all of same type. Value at index {} is type {}, expected type {}\".format(column_name, value_index, value_type, expected_dtype)\n assert value_type == expected_dtype, error_message\n value_index += 1\n \n def test_if_selected_columns_are_of_correct_dtype(self):\n dtype_dict = {\n \"book_id\": \"object\",\n \"review_id\": \"object\",\n \"sentence_id\": \"object\",\n \"sent_mentions_original\": \"bool\",\n \"sent_mentions_trans\": \"bool\",\n \"review_sentence\": \"object\"}\n for column, dtype in dtype_dict.items():\n expected = dtype\n actual = str(test_target[column].dtype)\n error_message = \"Column {} is of wrong data type. Expected {}, got {}\".format(column, expected, actual)\n assert expected == actual, error_message\n\n#%% --- Quality test: check if each review_sentence is at least 3 words long.\n\nclass TestLength(object):\n def test_review_sentence_length(self):\n threshold = 3\n actual = len(test_target[\"review_sentence\"].str.split())\n error_message = \"Expected all review sentences to be equal to or longer than {} words. Got a review sentence that is {} word(s) long.\".format(threshold,actual)\n assert actual >= threshold, error_message" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
raiv-toulouse/RPLidar
[ "f372e7f874322300aded144c8aeb7a283a426918" ]
[ "examplePyqtgraph.py" ]
[ "#!/usr/bin/env python\n\nfrom PyQt5 import QtCore, QtWidgets\nimport pyqtgraph as pg\nimport numpy as np\n\n\nclass MyWidget(pg.GraphicsWindow):\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n\n self.mainLayout = QtWidgets.QVBoxLayout()\n self.setLayout(self.mainLayout)\n\n self.timer = QtCore.QTimer(self)\n self.timer.setInterval(100) # in milliseconds\n self.timer.start()\n self.timer.timeout.connect(self.onNewData)\n\n self.plotItem = self.addPlot(title=\"Lidar points\")\n\n self.plotDataItem = self.plotItem.plot([], pen=None,\n symbolBrush=(255,0,0), symbolSize=5, symbolPen=None)\n\n\n def setData(self, x, y):\n self.plotDataItem.setData(x, y)\n\n\n def onNewData(self):\n numPoints = 1000\n x = np.random.normal(size=numPoints)\n y = np.random.normal(size=numPoints)\n self.setData(x, y)\n\n\ndef main():\n app = QtWidgets.QApplication([])\n\n pg.setConfigOptions(antialias=False) # True seems to work as well\n\n win = MyWidget()\n win.show()\n win.resize(800,600)\n win.raise_()\n app.exec_()\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.random.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
knowledgetechnologyuhh/lannro-gym
[ "ac3afcf7d8ed854d75368135b023edf055644dd2" ]
[ "main.py" ]
[ "import gym\nimport os\nimport numpy as np\nimport lanro\nimport argparse\nimport glfw\n\nDEBUG = int(\"DEBUG\" in os.environ and os.environ[\"DEBUG\"])\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--interactive', action='store_true', dest='interactive', help='Start interactive mode')\n parser.add_argument('-t', '--test', action='store_true', dest='test', help='Start test mode')\n parser.add_argument('-r', '--reward', action='store_true', dest='reward', help='Print the reward.')\n parser.add_argument('-a', '--action', action='store_true', dest='action', help='Print the action.')\n parser.add_argument('--full', action='store_true', dest='full', help='Print everything')\n parser.add_argument('--keyboard',\n action='store_true',\n dest='keyboard_control',\n help='Activates keyboard control for interactive mode.')\n parser.add_argument('--metrics', action='store_true', help='Print environment metrics.')\n parser.add_argument('--action_type', type=str, default='absolute_joints', help='Action type to control the robot.')\n parser.add_argument(\n '-e',\n '--env',\n default='PandaNLReach2-v0',\n help=\n f\"Available envs: {', '.join([envspec.id for envspec in gym.envs.registry.all() if 'Panda' in envspec.id or 'UR5' in envspec.id])}\"\n )\n return parser.parse_args()\n\n\ndef log_step(env, action, args):\n obs, reward, done, info = env.step(action)\n if args.reward:\n print(f\"reward: {reward} success: {info['is_success']}\")\n if args.action:\n print(action)\n if args.full:\n print(obs, reward, done, info)\n if args.metrics:\n print(env.get_metrics())\n if DEBUG and info['is_success'] or 'hindsight_instruction' in info.keys():\n import ipdb\n ipdb.set_trace()\n return done or info['is_success']\n\n\ndef test(env, args):\n for _ in range(100):\n env.reset()\n done = False\n while not done:\n action = env.action_space.sample()\n done = log_step(env, action, args)\n env.render(mode=\"human\")\n\n\nkey_events = {\n 65297: \"forward\",\n 65298: \"backward\",\n 65295: \"straight_left\",\n 65296: \"straight_right\",\n glfw.KEY_MINUS: \"close_gripper\",\n glfw.KEY_5: \"open_gripper\",\n 43: \"open_gripper\",\n glfw.KEY_8: \"up\",\n glfw.KEY_2: \"down\",\n glfw.KEY_1: \"yaw_left\",\n glfw.KEY_3: \"yaw_right\",\n glfw.KEY_6: \"pitch_right\",\n glfw.KEY_4: \"pitch_left\",\n glfw.KEY_7: \"roll_left\",\n glfw.KEY_9: \"roll_right\",\n}\n\n\ndef interactive(args):\n env = gym.make(args.env, render=True, action_type=args.action_type)\n\n if not args.keyboard_control:\n controls = env.robot.get_xyz_rpy_controls()\n\n for _ in range(10):\n env.reset()\n done = False\n action = np.zeros(shape=env.action_space.shape)\n key_control_gain = 0.01\n for idx, val in enumerate(env.robot.get_default_controls().values()):\n if len(action) > idx:\n action[idx] = val\n\n while True:\n if args.keyboard_control:\n keys = env.getKeyboardEvents()\n if keys:\n key_str = ''.join(\n [key_events[_pressed] for _pressed in keys.keys() if _pressed in key_events.keys()])\n if \"forward\" in key_str:\n action[3] += 1 * key_control_gain\n if \"backward\" in key_str:\n action[3] += -1 * key_control_gain\n if \"straight_left\" in key_str:\n action[0] += 1 * key_control_gain\n if \"straight_right\" in key_str:\n action[0] += -1 * key_control_gain\n if \"up\" in key_str:\n action[1] += -1 * key_control_gain\n if \"down\" in key_str:\n action[1] += 1 * key_control_gain\n if not env.robot.fixed_gripper:\n if \"close_gripper\" in key_str:\n action[-1] += 1 * key_control_gain\n if \"open_gripper\" in key_str:\n action[-1] += -1 * key_control_gain\n if env.action_space.shape[0] > 4:\n if \"roll_left\" in key_str:\n action[2] += 1 * key_control_gain\n if \"roll_right\" in key_str:\n action[2] += -1 * key_control_gain\n if \"pitch_left\" in key_str:\n action[4] += 1 * key_control_gain\n if \"pitch_right\" in key_str:\n action[4] += -1 * key_control_gain\n if \"yaw_left\" in key_str:\n action[5] += -1 * key_control_gain\n if \"yaw_right\" in key_str:\n action[5] += 1 * key_control_gain\n else:\n action = np.zeros(shape=env.action_space.shape)\n for idx, ctrl_id in enumerate(controls):\n try:\n action[idx] = env.sim.bclient.readUserDebugParameter(ctrl_id)\n except Exception as e:\n print(e)\n continue\n\n done = log_step(env, np.array(action), args)\n env.render(mode='human')\n if args.metrics and done:\n break\n\n\ndef main():\n args = parse_args()\n if args.test:\n env = gym.make(args.env, render=True)\n env.reset()\n test(env, args)\n env.close()\n elif args.interactive:\n interactive(args)\n else:\n raise ValueError(\"No valid mode found: use -t/--test (test mode) or -i/--interactive (interactive mode)\")\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
teamtact/tact-developer-similarity
[ "2282022fd84f8f98ca29854c3061a36cc643b5ab" ]
[ "org/tact/dev-similarity/criteria-5/euc_developers_5_criteria.py" ]
[ "# !/usr/bin/env python\n# \n#\n# Author : \n# Date: May 19 2018\n# About: \nfrom unittest.mock import inplace\n\n'''\n\nhttps://www.dataquest.io/blog/k-nearest-neighbors-in-python/\nhttps://stackoverflow.com/questions/29530232/python-pandas-check-if-any-value-is-nan-in-dataframe\nhttps://stackoverflow.com/questions/40393663/how-do-i-define-a-dataframe-in-python\nhttps://stackoverflow.com/questions/18689823/pandas-dataframe-replace-nan-values-with-average-of-columns\nhttps://stackoverflow.com/questions/31323499/sklearn-error-valueerror-input-contains-nan-infinity-or-a-value-too-large-for\n\nTact-id:836\n\n'''\n\nimport pandas as pd\nimport math\nimport sys\nimport numpy as np\nfrom scipy.spatial import distance\n\n\n\n\"\"\"\n This method will find the euclidean distance\n\"\"\"\n'''\ndef euclidean_distance(row):\n \"\"\"\n A simple euclidean distance function\n \"\"\"\n \n inner_value = 0\n for k in columns:\n inner_value += (row[k] - selected_player[k]) ** 2\n return math.sqrt(inner_value)\n'''\n\n\n\"\"\"\n This method will find the top similar developers\n max: range\n \n\"\"\"\ndef find_top_similar_entitties(max, nba, distance_frame, primary_column):\n \n for i in range(max):\n \n current_farthest = distance_frame.iloc[i][\"idx\"]\n #print('closest player index: '+str(int(current_farthest)))\n close_to_the_top_founder = nba.loc[int(current_farthest)][primary_column]\n\n current_distance = distance_frame.iloc[i][\"dist\"]\n percentile = 100 - (100 / 18.9714833602) * current_distance\n \n current_distance = round(current_distance, 2)\n \n if percentile <0:\n percentile = 0 \n \n percentile = round(percentile, 2) \n\n print('similar '+str(i)+' : '+str(close_to_the_top_founder) + ' - distance : '+str(current_distance) + \", Percentile : \"+ (str(percentile)))\n \n \n\n\"\"\"\n This method will find the similar developers using KNN\n\"\"\"\ndef find_similar_developers(filepath, columns, primary_column, given_entity_name):\n \n with open(filepath, 'r') as csvfile:\n dev_df = pd.read_csv(csvfile)\n\n #print the column name\n #print(dev_df.columns.values)\n \n # apply mean on NaN vlaues\n dev_df.fillna(round(dev_df.mean()), inplace=True)\n \n #print(dev_df) \n \n # remove duplicate index (https://stackoverflow.com/questions/27236275/what-does-valueerror-cannot-reindex-from-a-duplicate-axis-mean)\n #dev_df = dev_df[~dev_df.index.duplicated()] \n \n\n selected_player = dev_df[dev_df[primary_column] == given_entity_name].iloc[0] \n\n nba_numeric = dev_df[columns] #it select the only numeric column\n #print(nba_numeric)\n \n #print('mean : \\n'+str(nba_numeric.mean()))\n \n #abc = nba_numeric - nba_numeric.mean()\n #return\n\n\n # the normalization calculation\n nba_normalized = (nba_numeric - nba_numeric.mean()) / nba_numeric.std()\n #print(nba_normalized) #print the value\n\n # Not sure whether it should be mean or zero (need to verify with ore sources)\n #nba_normalized.fillna(0, inplace=True)\n nba_normalized.fillna(round(nba_normalized.mean()), inplace=True)\n\n top_founder_normalized = nba_normalized[dev_df[primary_column] == given_entity_name]\n #print(top_founder_normalized)\n\n euclidean_distances = nba_normalized.apply(lambda row: distance.euclidean(row, top_founder_normalized), axis=1)\n #print(euclidean_distances)\n #return\n\n distance_frame = pd.DataFrame(data={\"dist\": euclidean_distances, \"idx\": euclidean_distances.index})\n\n distance_frame.sort_values(by=[\"dist\"], inplace=True)\n\n second_smallest = distance_frame.iloc[1][\"idx\"]\n most_nearer_entity = dev_df.loc[int(second_smallest)][primary_column]\n \n print('Direct similarity : '+most_nearer_entity)\n\n print('Top 10 Similar developer to '+given_entity_name)\n find_top_similar_entitties(10, dev_df, distance_frame, primary_column) \n \n print('\\nTop 5 developers Sorted')\n find_top_similar_entitties(5, dev_df, distance_frame, primary_column)\n\ndef test_dummy():\n filepath = \"developer_score_5_criteria.csv\"\n \n columns = [\n 'LinkedIn content',\n 'Public coding activities',\n 'Github Analysis',\n 'Stackoverflow Analysis', \n 'Tech Keys involved'\n ]\n \n primary_column = \"Name\"\n top_developer_name = 'https://www.linkedin.com/in/geekopedia/'\n \n find_similar_developers(filepath, columns, primary_column, top_developer_name)\n\nif __name__ == '__main__':\n test_dummy()\n \n \n'''\n Variables: \n LinkedIn content\n Public coding activities\n Github Analysis\n Stackoverflow Analysis \n Tech Keys involved \n \n'''" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "scipy.spatial.distance.euclidean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
201528014227051/ARNet
[ "e7779d6af1a8990712d8e8e4a72e4c1ed138f60e" ]
[ "image_captioning/prepro_build_vocab.py" ]
[ "import os\nimport re\nimport numpy as np\nfrom six.moves import cPickle\nimport time\nimport opts\n\n\n# -----------------------------------------------------------------\n# Borrowed this function from NeuralTalk:\n# https://github.com/karpathy/neuraltalk/blob/master/driver.py#L16\n# -----------------------------------------------------------------\ndef preProBuildWordVocab(sentence_iterator, word_count_threshold=5):\n print('Preprocessing word counts and creating vocab based on word count threshold %d' % word_count_threshold)\n t0 = time.time()\n\n word_counts = {}\n nsents = 0\n for sent in sentence_iterator:\n nsents += 1\n sent = sent.lower()\n sent = sent.replace(',', ' ,')\n sent = sent.replace('\\n', '').replace('\"', '')\n sent = sent.replace('.', '').replace('?', '').replace('!', '')\n sent = sent.replace('``', '').replace('`', '').replace(\"''\", '')\n sent = sent.replace(':', '').replace('-', '').replace('--', '')\n sent = sent.replace('...', '').replace(';', '')\n sent = sent.replace('(', '').replace(')', '').replace('[', '').replace(']', '')\n sent = sent.replace('@', '').replace('#', '').replace('$', '').replace('&', '').replace('*', '')\n sent = sent.replace('\\\\', '').replace('/', '')\n sent = sent.replace('1', '').replace('2', '').replace('3', '').replace('4', '').replace('5', '')\n sent = sent.replace('6', '').replace('7', '').replace('8', '').replace('9', '').replace('10', '')\n\n sent = 'BOS ' + sent + ' EOS'\n sent = re.sub('\\s+', ' ', sent).strip()\n tmp_sent = sent.split(' ')\n\n if ' ' in tmp_sent: tmp_sent.remove(' ')\n if '\\n' in tmp_sent: tmp_sent.remove('\\n')\n if '\"' in tmp_sent: tmp_sent.remove('\"')\n\n for w in tmp_sent:\n word_counts[w] = word_counts.get(w, 0) + 1\n\n # calculate the number of word, UNK\n unk_count = 0\n for w, c in word_counts.items():\n if c < word_count_threshold:\n unk_count = unk_count + c\n word_counts['UNK'] = unk_count\n\n # filter the word less than the threshold\n vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]\n print('Filter words from %d to %d in %0.2fs' % (len(word_counts), len(vocab), time.time()-t0))\n\n ixtoword = {}\n wordtoix = {}\n for idx, w in enumerate(vocab):\n wordtoix[w] = idx+1\n ixtoword[idx+1] = w\n\n bias_init_vector = np.array([1.0 * word_counts[ixtoword[i]] for i in ixtoword])\n bias_init_vector /= np.sum(bias_init_vector) # normalize to frequencies\n bias_init_vector = np.log(bias_init_vector)\n bias_init_vector -= np.max(bias_init_vector) # shift to nice numeric range\n\n return wordtoix, ixtoword, bias_init_vector\n\n\n# -------------------------------------------\n# generate mapping between words and indices\n# -------------------------------------------\ndef generate_train_index(images_captions):\n print(\"change the word of each image captions to index by word_to_idx ...\")\n count = 0\n train_images_captions_index = {}\n\n for each_img, sents in images_captions.items():\n sents_index = np.zeros([len(sents), opt.lstm_step], dtype=np.int32)\n for idy, sent in enumerate(sents):\n sent = sent.lower()\n sent = sent.replace(',', ' ,')\n sent = sent.replace('\\n', '').replace('\"', '')\n sent = sent.replace('.', '').replace('?', '').replace('!', '')\n sent = sent.replace('``', '').replace('`', '').replace(\"''\", '')\n sent = sent.replace(':', '').replace('-', '').replace('--', '')\n sent = sent.replace('...', '').replace(';', '')\n sent = sent.replace('(', '').replace(')', '').replace('[', '').replace(']', '')\n sent = sent.replace('@', '').replace('#', '').replace('$', '').replace('&', '').replace('*', '')\n sent = sent.replace('\\\\', '').replace('/', '')\n sent = sent.replace('1', '').replace('2', '').replace('3', '').replace('4', '').replace('5', '')\n sent = sent.replace('6', '').replace('7', '').replace('8', '').replace('9', '').replace('0', '')\n\n sent = 'BOS ' + sent + ' EOS'\n sent = re.sub('\\s+', ' ', sent).strip()\n tmp_sent = sent.split(' ')\n\n if ' ' in tmp_sent: tmp_sent.remove(' ')\n if '\\n' in tmp_sent: tmp_sent.remove('\\n')\n if '\"' in tmp_sent: tmp_sent.remove('\"')\n\n for idx, word in enumerate(tmp_sent):\n if idx == opt.lstm_step-1:\n sents_index[idy, idx] = word_to_idx['EOS']\n break\n if word in word_to_idx:\n sents_index[idy, idx] = word_to_idx[word]\n if word not in word_to_idx:\n sents_index[idy, idx] = word_to_idx[\"UNK\"]\n\n train_images_captions_index[each_img] = sents_index\n count += 1\n print(\"{} {} {}\".format(count, each_img, len(sents)))\n return train_images_captions_index\n\n\nif __name__ == \"__main__\":\n opt = opts.parse_opt()\n\n with open(opt.official_train_captions_path, 'r') as train_fr:\n train_images_captions = cPickle.load(train_fr)\n\n with open(opt.official_val_captions_path, 'r') as val_fr:\n val_images_captions = cPickle.load(val_fr)\n\n # combine all sentences in captions\n all_sents = []\n for image, sents in train_images_captions.items():\n for each_sent in sents:\n all_sents.append(each_sent)\n\n for image, sents in val_images_captions.items():\n for each_sent in sents:\n all_sents.append(each_sent)\n\n word_to_idx, idx_to_word, bias_init_vector = preProBuildWordVocab(all_sents, word_count_threshold=5)\n\n images_captions = dict(train_images_captions, **val_images_captions)\n train_images_captions_index = generate_train_index(images_captions)\n\n # save\n with open(opt.idx_to_word_path, 'w') as fw:\n cPickle.dump(idx_to_word, fw)\n\n with open(opt.word_to_idx_path, 'w') as fw:\n cPickle.dump(word_to_idx, fw)\n\n np.save(opt.bias_init_vector_path, bias_init_vector)\n\n with open(opt.train_images_captions_index, 'w') as f:\n cPickle.dump(train_images_captions_index, f)\n" ]
[ [ "numpy.log", "numpy.save", "numpy.max", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kagemeka/python
[ "486ce39d97360b61029527bacf00a87fdbcf552c", "486ce39d97360b61029527bacf00a87fdbcf552c", "486ce39d97360b61029527bacf00a87fdbcf552c", "486ce39d97360b61029527bacf00a87fdbcf552c", "486ce39d97360b61029527bacf00a87fdbcf552c" ]
[ "src/kgmk/ds/img/pose_estimation/posenet/model.py", "src/kgmk/dsa/misc/compress_array/jit.py", "src/kgmk/dsa/topology/maximum_flow/dinic/matrix/non_recursive/jit.py", "src/kgmk/dsa/math/fft/cooley_turkey/jit.py", "tests/prime_number/sieve_of_eratosthenes/np.py" ]
[ "import tensorflow as tf\nprint(tf.__version__)\nimport os\nfrom . import converter\n\n\n\nDEBUG_OUTPUT = False\n\n\ndef model_id_to_ord(model_id):\n if 0 <= model_id < 4:\n return model_id # id is already ordinal\n elif model_id == 50:\n return 0\n elif model_id == 75:\n return 1\n elif model_id == 100:\n return 2\n else: # 101\n return 3\n\n\ndef load_config(model_ord):\n converter_cfg = converter.config.load_config()\n checkpoints = converter_cfg['checkpoints']\n output_stride = converter_cfg['outputStride']\n checkpoint_name = checkpoints[model_ord]\n\n model_cfg = {\n 'output_stride': output_stride,\n 'checkpoint_name': checkpoint_name,\n }\n return model_cfg\n\n\ndef load_model(model_id, sess, model_dir):\n model_ord = model_id_to_ord(model_id)\n model_cfg = load_config(model_ord)\n model_path = os.path.join(model_dir, 'model-%s.pb' % model_cfg['checkpoint_name'])\n if not os.path.exists(model_path):\n print('Cannot find model file %s, converting from tfjs...' % model_path)\n from .converter.tfjs2python import convert\n convert(model_ord, model_dir, check=False)\n assert os.path.exists(model_path)\n\n with tf.io.gfile.GFile(model_path, 'rb') as f:\n graph_def = tf.compat.v1.GraphDef()\n\n graph_def.ParseFromString(f.read())\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name='')\n\n if DEBUG_OUTPUT:\n graph_nodes = [n for n in graph_def.node]\n names = []\n for t in graph_nodes:\n names.append(t.name)\n print('Loaded graph node:', t.name)\n\n offsets = sess.graph.get_tensor_by_name('offset_2:0')\n displacement_fwd = sess.graph.get_tensor_by_name('displacement_fwd_2:0')\n displacement_bwd = sess.graph.get_tensor_by_name('displacement_bwd_2:0')\n heatmaps = sess.graph.get_tensor_by_name('heatmap:0')\n\n return model_cfg, [heatmaps, offsets, displacement_fwd, displacement_bwd]\n", "import typing \nimport numpy as np \nimport numba as nb \n\n\n\[email protected] \ndef compress_array(\n a: np.ndarray,\n) -> typing.Tuple[(np.ndarray, ) * 2]:\n v = np.unique(a)\n i = np.searchsorted(v, a)\n return i, v\n", "import numpy as np \nimport numba as nb \n\n\[email protected] \ndef maximum_flow_dinic(\n g: np.ndarray,\n src: int,\n sink: int,\n) -> int:\n n = len(g)\n g = g.copy()\n inf = 1 << 60\n level = np.full(n, -1, np.int64)\n \n def update_level():\n level[:] = -1\n level[src] = 0\n fifo_que = [src]\n for u in fifo_que:\n for v in range(n):\n if level[v] != -1 or g[u, v] <= 0: continue\n level[v] = level[u] + 1\n fifo_que.append(v)\n\n flow_in = np.zeros(n, np.int64)\n flow_out = np.zeros(n, np.int64)\n prev = np.full(n, -1, np.int64)\n\n def compute_flow():\n flow_in[:] = 0\n flow_in[src] = inf\n flow_out[:] = 0\n prev[:] = -1\n st = [src]\n while st:\n u = st.pop()\n if u < 0:\n u = ~u\n if u == src: return flow_out[src]\n p = prev[u]\n f = flow_out[u]\n flow_out[p] += f\n flow_in[p] -= f\n g[p, u] -= f\n g[u, p] += f\n flow_in[u] = flow_out[u] = 0\n continue\n st.append(~u)\n p = prev[u]\n if u != src:\n flow_in[u] = min(flow_in[p], g[p, u])\n if u == sink:\n flow_out[u] = flow_in[u]\n continue\n if flow_in[u] == 0: continue\n for v in range(n - 1, -1, -1):\n if g[u, v] == 0 or level[v] <= level[u]: continue\n prev[v] = u\n st.append(v)\n\n flow = 0\n while 1:\n update_level()\n if level[sink] == -1: return flow\n flow += compute_flow()", "import numpy as np\nimport numba as nb\n\n\n\[email protected]((nb.c16[:], nb.optional(nb.b1)))\ndef fft(\n a: np.ndarray,\n inverse: bool=False\n) -> np.ndarray:\n n = a.size\n h = 1\n while 1 << h < n: h += 1\n assert 1 << h == n\n \n def _reverse_bits():\n idx = np.empty(n, dtype=np.int64)\n for i in range(n):\n j = 0\n for k in range(h):\n j |= (i >> k & 1) << (h - 1 - k)\n idx[i] = j\n nonlocal a\n a = a[idx]\n \n def _butterfly():\n sign = -1 + 2 * inverse\n b = 1\n while b < n:\n for j in range(b):\n w = np.exp(sign * np.pi / b * j * 1j)\n for k in range(0, n, 2 * b):\n s, t = a[k + j], a[k + j + b] * w\n a[k + j], a[k + j + b] = s + t, s - t \n b <<= 1\n \n _reverse_bits()\n _butterfly()\n if inverse: a /= n\n return a", "from kgmk.dsa.number_theory.sieve_of_eratosthenes.np import (\n SieveOfEratosthenes,\n)\nimport numpy as np\n\n\n\ndef test():\n fn = SieveOfEratosthenes()\n a = fn(1000000)\n print(a)\n print(np.flatnonzero(a))\n a = fn.gpf(10000)\n print(a)\n a = fn.lpf(10000)\n print(a)\n\n\nif __name__ == '__main__':\n test()" ]
[ [ "tensorflow.compat.v1.GraphDef", "tensorflow.io.gfile.GFile", "tensorflow.import_graph_def" ], [ "numpy.searchsorted", "numpy.unique" ], [ "numpy.zeros", "numpy.full" ], [ "numpy.exp", "numpy.empty" ], [ "numpy.flatnonzero" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.4", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Loonride/deeplens-cv
[ "9e5b31c1a269d364e4912ba8266415fa04277e11", "9e5b31c1a269d364e4912ba8266415fa04277e11", "9e5b31c1a269d364e4912ba8266415fa04277e11", "9e5b31c1a269d364e4912ba8266415fa04277e11" ]
[ "dlcv/object_detection/tensorflow_detect/builders/matcher_builder_test.py", "dlcv/object_detection/tensorflow_detect/core/region_similarity_calculator.py", "dlcv/object_detection/tensorflow_detect/utils/per_image_vrd_evaluation.py", "dlcv/object_detection/tensorflow_detect/utils/variables_helper_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for matcher_builder.\"\"\"\n\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom object_detection.tensorflow_detect.builders import matcher_builder\nfrom object_detection.tensorflow_detect.matchers import argmax_matcher\nfrom object_detection.tensorflow_detect.matchers import bipartite_matcher\nfrom object_detection.tensorflow_detect.protos import matcher_pb2\n\n\nclass MatcherBuilderTest(tf.test.TestCase):\n\n def test_build_arg_max_matcher_with_defaults(self):\n matcher_text_proto = \"\"\"\n argmax_matcher {\n }\n \"\"\"\n matcher_proto = matcher_pb2.Matcher()\n text_format.Merge(matcher_text_proto, matcher_proto)\n matcher_object = matcher_builder.build(matcher_proto)\n self.assertTrue(isinstance(matcher_object, argmax_matcher.ArgMaxMatcher))\n self.assertAlmostEqual(matcher_object._matched_threshold, 0.5)\n self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.5)\n self.assertTrue(matcher_object._negatives_lower_than_unmatched)\n self.assertFalse(matcher_object._force_match_for_each_row)\n\n def test_build_arg_max_matcher_without_thresholds(self):\n matcher_text_proto = \"\"\"\n argmax_matcher {\n ignore_thresholds: true\n }\n \"\"\"\n matcher_proto = matcher_pb2.Matcher()\n text_format.Merge(matcher_text_proto, matcher_proto)\n matcher_object = matcher_builder.build(matcher_proto)\n self.assertTrue(isinstance(matcher_object, argmax_matcher.ArgMaxMatcher))\n self.assertEqual(matcher_object._matched_threshold, None)\n self.assertEqual(matcher_object._unmatched_threshold, None)\n self.assertTrue(matcher_object._negatives_lower_than_unmatched)\n self.assertFalse(matcher_object._force_match_for_each_row)\n\n def test_build_arg_max_matcher_with_non_default_parameters(self):\n matcher_text_proto = \"\"\"\n argmax_matcher {\n matched_threshold: 0.7\n unmatched_threshold: 0.3\n negatives_lower_than_unmatched: false\n force_match_for_each_row: true\n use_matmul_gather: true\n }\n \"\"\"\n matcher_proto = matcher_pb2.Matcher()\n text_format.Merge(matcher_text_proto, matcher_proto)\n matcher_object = matcher_builder.build(matcher_proto)\n self.assertTrue(isinstance(matcher_object, argmax_matcher.ArgMaxMatcher))\n self.assertAlmostEqual(matcher_object._matched_threshold, 0.7)\n self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.3)\n self.assertFalse(matcher_object._negatives_lower_than_unmatched)\n self.assertTrue(matcher_object._force_match_for_each_row)\n self.assertTrue(matcher_object._use_matmul_gather)\n\n def test_build_bipartite_matcher(self):\n matcher_text_proto = \"\"\"\n bipartite_matcher {\n }\n \"\"\"\n matcher_proto = matcher_pb2.Matcher()\n text_format.Merge(matcher_text_proto, matcher_proto)\n matcher_object = matcher_builder.build(matcher_proto)\n self.assertTrue(\n isinstance(matcher_object, bipartite_matcher.GreedyBipartiteMatcher))\n\n def test_raise_error_on_empty_matcher(self):\n matcher_text_proto = \"\"\"\n \"\"\"\n matcher_proto = matcher_pb2.Matcher()\n text_format.Merge(matcher_text_proto, matcher_proto)\n with self.assertRaises(ValueError):\n matcher_builder.build(matcher_proto)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Region Similarity Calculators for BoxLists.\n\nRegion Similarity Calculators compare a pairwise measure of similarity\nbetween the boxes in two BoxLists.\n\"\"\"\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\nimport tensorflow as tf\n\nfrom object_detection.tensorflow_detect.core import standard_fields as fields, \\\n box_list_ops\n\n\nclass RegionSimilarityCalculator(object):\n \"\"\"Abstract base class for region similarity calculator.\"\"\"\n __metaclass__ = ABCMeta\n\n def compare(self, boxlist1, boxlist2, scope=None):\n \"\"\"Computes matrix of pairwise similarity between BoxLists.\n\n This op (to be overridden) computes a measure of pairwise similarity between\n the boxes in the given BoxLists. Higher values indicate more similarity.\n\n Note that this method simply measures similarity and does not explicitly\n perform a matching.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n scope: Op scope name. Defaults to 'Compare' if None.\n\n Returns:\n a (float32) tensor of shape [N, M] with pairwise similarity score.\n \"\"\"\n with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope:\n return self._compare(boxlist1, boxlist2)\n\n @abstractmethod\n def _compare(self, boxlist1, boxlist2):\n pass\n\n\nclass IouSimilarity(RegionSimilarityCalculator):\n \"\"\"Class to compute similarity based on Intersection over Union (IOU) metric.\n\n This class computes pairwise similarity between two BoxLists based on IOU.\n \"\"\"\n\n def _compare(self, boxlist1, boxlist2):\n \"\"\"Compute pairwise IOU similarity between the two BoxLists.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n\n Returns:\n A tensor with shape [N, M] representing pairwise iou scores.\n \"\"\"\n return box_list_ops.iou(boxlist1, boxlist2)\n\n\nclass NegSqDistSimilarity(RegionSimilarityCalculator):\n \"\"\"Class to compute similarity based on the squared distance metric.\n\n This class computes pairwise similarity between two BoxLists based on the\n negative squared distance metric.\n \"\"\"\n\n def _compare(self, boxlist1, boxlist2):\n \"\"\"Compute matrix of (negated) sq distances.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n\n Returns:\n A tensor with shape [N, M] representing negated pairwise squared distance.\n \"\"\"\n return -1 * box_list_ops.sq_dist(boxlist1, boxlist2)\n\n\nclass IoaSimilarity(RegionSimilarityCalculator):\n \"\"\"Class to compute similarity based on Intersection over Area (IOA) metric.\n\n This class computes pairwise similarity between two BoxLists based on their\n pairwise intersections divided by the areas of second BoxLists.\n \"\"\"\n\n def _compare(self, boxlist1, boxlist2):\n \"\"\"Compute pairwise IOA similarity between the two BoxLists.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n\n Returns:\n A tensor with shape [N, M] representing pairwise IOA scores.\n \"\"\"\n return box_list_ops.ioa(boxlist1, boxlist2)\n\n\nclass ThresholdedIouSimilarity(RegionSimilarityCalculator):\n \"\"\"Class to compute similarity based on thresholded IOU and score.\n\n This class computes pairwise similarity between two BoxLists based on IOU and\n a 'score' present in boxlist1. If IOU > threshold, then the entry in the\n output pairwise tensor will contain `score`, otherwise 0.\n \"\"\"\n\n def __init__(self, iou_threshold=0):\n \"\"\"Initialize the ThresholdedIouSimilarity.\n\n Args:\n iou_threshold: For a given pair of boxes, if the IOU is > iou_threshold,\n then the comparison result will be the foreground probability of\n the first box, otherwise it will be zero.\n \"\"\"\n self._iou_threshold = iou_threshold\n\n def _compare(self, boxlist1, boxlist2):\n \"\"\"Compute pairwise IOU similarity between the two BoxLists and score.\n\n Args:\n boxlist1: BoxList holding N boxes. Must have a score field.\n boxlist2: BoxList holding M boxes.\n\n Returns:\n A tensor with shape [N, M] representing scores threholded by pairwise\n iou scores.\n \"\"\"\n ious = box_list_ops.iou(boxlist1, boxlist2)\n scores = boxlist1.get_field(fields.BoxListFields.scores)\n scores = tf.expand_dims(scores, axis=1)\n row_replicated_scores = tf.tile(scores, [1, tf.shape(ious)[-1]])\n thresholded_ious = tf.where(ious > self._iou_threshold,\n row_replicated_scores, tf.zeros_like(ious))\n\n return thresholded_ious\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Evaluates Visual Relations Detection(VRD) result evaluation on an image.\n\nAnnotate each VRD result as true positives or false positive according to\na predefined IOU ratio. Multi-class detection is supported by default.\nBased on the settings, per image evaluation is performed either on phrase\ndetection subtask or on relation detection subtask.\n\"\"\"\nimport numpy as np\n\nfrom object_detection.tensorflow_detect.utils import np_box_list\nfrom object_detection.tensorflow_detect.utils import np_box_list_ops\n\n\nclass PerImageVRDEvaluation(object):\n \"\"\"Evaluate vrd result of a single image.\"\"\"\n\n def __init__(self, matching_iou_threshold=0.5):\n \"\"\"Initialized PerImageVRDEvaluation by evaluation parameters.\n\n Args:\n matching_iou_threshold: A ratio of area intersection to union, which is\n the threshold to consider whether a detection is true positive or not;\n in phrase detection subtask.\n \"\"\"\n self.matching_iou_threshold = matching_iou_threshold\n\n def compute_detection_tp_fp(self, detected_box_tuples, detected_scores,\n detected_class_tuples, groundtruth_box_tuples,\n groundtruth_class_tuples):\n \"\"\"Evaluates VRD as being tp, fp from a single image.\n\n Args:\n detected_box_tuples: A numpy array of structures with shape [N,],\n representing N tuples, each tuple containing the same number of named\n bounding boxes.\n Each box is of the format [y_min, x_min, y_max, x_max].\n detected_scores: A float numpy array of shape [N,], representing\n the confidence scores of the detected N object instances.\n detected_class_tuples: A numpy array of structures shape [N,],\n representing the class labels of the corresponding bounding boxes and\n possibly additional classes.\n groundtruth_box_tuples: A float numpy array of structures with the shape\n [M,], representing M tuples, each tuple containing the same number\n of named bounding boxes.\n Each box is of the format [y_min, x_min, y_max, x_max].\n groundtruth_class_tuples: A numpy array of structures shape [M,],\n representing the class labels of the corresponding bounding boxes and\n possibly additional classes.\n\n Returns:\n scores: A single numpy array with shape [N,], representing N scores\n detected with object class, sorted in descentent order.\n tp_fp_labels: A single boolean numpy array of shape [N,], representing N\n True/False positive label, one label per tuple. The labels are sorted\n so that the order of the labels matches the order of the scores.\n result_mapping: A numpy array with shape [N,] with original index of each\n entry.\n \"\"\"\n\n scores, tp_fp_labels, result_mapping = self._compute_tp_fp(\n detected_box_tuples=detected_box_tuples,\n detected_scores=detected_scores,\n detected_class_tuples=detected_class_tuples,\n groundtruth_box_tuples=groundtruth_box_tuples,\n groundtruth_class_tuples=groundtruth_class_tuples)\n\n return scores, tp_fp_labels, result_mapping\n\n def _compute_tp_fp(self, detected_box_tuples, detected_scores,\n detected_class_tuples, groundtruth_box_tuples,\n groundtruth_class_tuples):\n \"\"\"Labels as true/false positives detection tuples across all classes.\n\n Args:\n detected_box_tuples: A numpy array of structures with shape [N,],\n representing N tuples, each tuple containing the same number of named\n bounding boxes.\n Each box is of the format [y_min, x_min, y_max, x_max]\n detected_scores: A float numpy array of shape [N,], representing\n the confidence scores of the detected N object instances.\n detected_class_tuples: A numpy array of structures shape [N,],\n representing the class labels of the corresponding bounding boxes and\n possibly additional classes.\n groundtruth_box_tuples: A float numpy array of structures with the shape\n [M,], representing M tuples, each tuple containing the same number\n of named bounding boxes.\n Each box is of the format [y_min, x_min, y_max, x_max]\n groundtruth_class_tuples: A numpy array of structures shape [M,],\n representing the class labels of the corresponding bounding boxes and\n possibly additional classes.\n\n Returns:\n scores: A single numpy array with shape [N,], representing N scores\n detected with object class, sorted in descentent order.\n tp_fp_labels: A single boolean numpy array of shape [N,], representing N\n True/False positive label, one label per tuple. The labels are sorted\n so that the order of the labels matches the order of the scores.\n result_mapping: A numpy array with shape [N,] with original index of each\n entry.\n \"\"\"\n unique_gt_tuples = np.unique(\n np.concatenate((groundtruth_class_tuples, detected_class_tuples)))\n result_scores = []\n result_tp_fp_labels = []\n result_mapping = []\n\n for unique_tuple in unique_gt_tuples:\n detections_selector = (detected_class_tuples == unique_tuple)\n gt_selector = (groundtruth_class_tuples == unique_tuple)\n\n selector_mapping = np.where(detections_selector)[0]\n\n detection_scores_per_tuple = detected_scores[detections_selector]\n detection_box_per_tuple = detected_box_tuples[detections_selector]\n\n sorted_indices = np.argsort(detection_scores_per_tuple)\n sorted_indices = sorted_indices[::-1]\n\n tp_fp_labels = self._compute_tp_fp_for_single_class(\n detected_box_tuples=detection_box_per_tuple[sorted_indices],\n groundtruth_box_tuples=groundtruth_box_tuples[gt_selector])\n result_scores.append(detection_scores_per_tuple[sorted_indices])\n result_tp_fp_labels.append(tp_fp_labels)\n result_mapping.append(selector_mapping[sorted_indices])\n\n if result_scores:\n result_scores = np.concatenate(result_scores)\n result_tp_fp_labels = np.concatenate(result_tp_fp_labels)\n result_mapping = np.concatenate(result_mapping)\n else:\n result_scores = np.array([], dtype=float)\n result_tp_fp_labels = np.array([], dtype=bool)\n result_mapping = np.array([], dtype=int)\n\n sorted_indices = np.argsort(result_scores)\n sorted_indices = sorted_indices[::-1]\n\n return result_scores[sorted_indices], result_tp_fp_labels[\n sorted_indices], result_mapping[sorted_indices]\n\n def _get_overlaps_and_scores_relation_tuples(self, detected_box_tuples,\n groundtruth_box_tuples):\n \"\"\"Computes overlaps and scores between detected and groundtruth tuples.\n\n Both detections and groundtruth boxes have the same class tuples.\n\n Args:\n detected_box_tuples: A numpy array of structures with shape [N,],\n representing N tuples, each tuple containing the same number of named\n bounding boxes.\n Each box is of the format [y_min, x_min, y_max, x_max]\n groundtruth_box_tuples: A float numpy array of structures with the shape\n [M,], representing M tuples, each tuple containing the same number\n of named bounding boxes.\n Each box is of the format [y_min, x_min, y_max, x_max]\n\n Returns:\n result_iou: A float numpy array of size\n [num_detected_tuples, num_gt_box_tuples].\n \"\"\"\n\n result_iou = np.ones(\n (detected_box_tuples.shape[0], groundtruth_box_tuples.shape[0]),\n dtype=float)\n for field in detected_box_tuples.dtype.fields:\n detected_boxlist_field = np_box_list.BoxList(detected_box_tuples[field])\n gt_boxlist_field = np_box_list.BoxList(groundtruth_box_tuples[field])\n iou_field = np_box_list_ops.iou(detected_boxlist_field, gt_boxlist_field)\n result_iou = np.minimum(iou_field, result_iou)\n return result_iou\n\n def _compute_tp_fp_for_single_class(self, detected_box_tuples,\n groundtruth_box_tuples):\n \"\"\"Labels boxes detected with the same class from the same image as tp/fp.\n\n Detection boxes are expected to be already sorted by score.\n Args:\n detected_box_tuples: A numpy array of structures with shape [N,],\n representing N tuples, each tuple containing the same number of named\n bounding boxes.\n Each box is of the format [y_min, x_min, y_max, x_max]\n groundtruth_box_tuples: A float numpy array of structures with the shape\n [M,], representing M tuples, each tuple containing the same number\n of named bounding boxes.\n Each box is of the format [y_min, x_min, y_max, x_max]\n\n Returns:\n tp_fp_labels: a boolean numpy array indicating whether a detection is a\n true positive.\n \"\"\"\n if detected_box_tuples.size == 0:\n return np.array([], dtype=bool)\n\n min_iou = self._get_overlaps_and_scores_relation_tuples(\n detected_box_tuples, groundtruth_box_tuples)\n\n num_detected_tuples = detected_box_tuples.shape[0]\n tp_fp_labels = np.zeros(num_detected_tuples, dtype=bool)\n\n if min_iou.shape[1] > 0:\n max_overlap_gt_ids = np.argmax(min_iou, axis=1)\n is_gt_tuple_detected = np.zeros(min_iou.shape[1], dtype=bool)\n for i in range(num_detected_tuples):\n gt_id = max_overlap_gt_ids[i]\n if min_iou[i, gt_id] >= self.matching_iou_threshold:\n if not is_gt_tuple_detected[gt_id]:\n tp_fp_labels[i] = True\n is_gt_tuple_detected[gt_id] = True\n\n return tp_fp_labels\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.track_utils.variables_helper.\"\"\"\nimport os\n\nimport tensorflow as tf\n\nfrom object_detection.tensorflow_detect.utils import variables_helper\n\n\nclass FilterVariablesTest(tf.test.TestCase):\n\n def _create_variables(self):\n return [tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights'),\n tf.Variable(1.0, name='FeatureExtractor/InceptionV3/biases'),\n tf.Variable(1.0, name='StackProposalGenerator/weights'),\n tf.Variable(1.0, name='StackProposalGenerator/biases')]\n\n def test_return_all_variables_when_empty_regex(self):\n variables = self._create_variables()\n out_variables = variables_helper.filter_variables(variables, [''])\n self.assertItemsEqual(out_variables, variables)\n\n def test_return_variables_which_do_not_match_single_regex(self):\n variables = self._create_variables()\n out_variables = variables_helper.filter_variables(variables,\n ['FeatureExtractor/.*'])\n self.assertItemsEqual(out_variables, variables[2:])\n\n def test_return_variables_which_do_not_match_any_regex_in_list(self):\n variables = self._create_variables()\n out_variables = variables_helper.filter_variables(variables, [\n 'FeatureExtractor.*biases', 'StackProposalGenerator.*biases'\n ])\n self.assertItemsEqual(out_variables, [variables[0], variables[2]])\n\n def test_return_variables_matching_empty_regex_list(self):\n variables = self._create_variables()\n out_variables = variables_helper.filter_variables(\n variables, [''], invert=True)\n self.assertItemsEqual(out_variables, [])\n\n def test_return_variables_matching_some_regex_in_list(self):\n variables = self._create_variables()\n out_variables = variables_helper.filter_variables(\n variables,\n ['FeatureExtractor.*biases', 'StackProposalGenerator.*biases'],\n invert=True)\n self.assertItemsEqual(out_variables, [variables[1], variables[3]])\n\n\nclass MultiplyGradientsMatchingRegexTest(tf.test.TestCase):\n\n def _create_grads_and_vars(self):\n return [(tf.constant(1.0),\n tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')),\n (tf.constant(2.0),\n tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')),\n (tf.constant(3.0),\n tf.Variable(3.0, name='StackProposalGenerator/weights')),\n (tf.constant(4.0),\n tf.Variable(4.0, name='StackProposalGenerator/biases'))]\n\n def test_multiply_all_feature_extractor_variables(self):\n grads_and_vars = self._create_grads_and_vars()\n regex_list = ['FeatureExtractor/.*']\n multiplier = 0.0\n grads_and_vars = variables_helper.multiply_gradients_matching_regex(\n grads_and_vars, regex_list, multiplier)\n exp_output = [(0.0, 1.0), (0.0, 2.0), (3.0, 3.0), (4.0, 4.0)]\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n output = sess.run(grads_and_vars)\n self.assertItemsEqual(output, exp_output)\n\n def test_multiply_all_bias_variables(self):\n grads_and_vars = self._create_grads_and_vars()\n regex_list = ['.*/biases']\n multiplier = 0.0\n grads_and_vars = variables_helper.multiply_gradients_matching_regex(\n grads_and_vars, regex_list, multiplier)\n exp_output = [(1.0, 1.0), (0.0, 2.0), (3.0, 3.0), (0.0, 4.0)]\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n output = sess.run(grads_and_vars)\n self.assertItemsEqual(output, exp_output)\n\n\nclass FreezeGradientsMatchingRegexTest(tf.test.TestCase):\n\n def _create_grads_and_vars(self):\n return [(tf.constant(1.0),\n tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')),\n (tf.constant(2.0),\n tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')),\n (tf.constant(3.0),\n tf.Variable(3.0, name='StackProposalGenerator/weights')),\n (tf.constant(4.0),\n tf.Variable(4.0, name='StackProposalGenerator/biases'))]\n\n def test_freeze_all_feature_extractor_variables(self):\n grads_and_vars = self._create_grads_and_vars()\n regex_list = ['FeatureExtractor/.*']\n grads_and_vars = variables_helper.freeze_gradients_matching_regex(\n grads_and_vars, regex_list)\n exp_output = [(3.0, 3.0), (4.0, 4.0)]\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n output = sess.run(grads_and_vars)\n self.assertItemsEqual(output, exp_output)\n\n\nclass GetVariablesAvailableInCheckpointTest(tf.test.TestCase):\n\n def test_return_all_variables_from_checkpoint(self):\n with tf.Graph().as_default():\n variables = [\n tf.Variable(1.0, name='weights'),\n tf.Variable(1.0, name='biases')\n ]\n checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')\n init_op = tf.global_variables_initializer()\n saver = tf.train.Saver(variables)\n with self.test_session() as sess:\n sess.run(init_op)\n saver.save(sess, checkpoint_path)\n out_variables = variables_helper.get_variables_available_in_checkpoint(\n variables, checkpoint_path)\n self.assertItemsEqual(out_variables, variables)\n\n def test_return_variables_available_in_checkpoint(self):\n checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')\n with tf.Graph().as_default():\n weight_variable = tf.Variable(1.0, name='weights')\n global_step = tf.train.get_or_create_global_step()\n graph1_variables = [\n weight_variable,\n global_step\n ]\n init_op = tf.global_variables_initializer()\n saver = tf.train.Saver(graph1_variables)\n with self.test_session() as sess:\n sess.run(init_op)\n saver.save(sess, checkpoint_path)\n\n with tf.Graph().as_default():\n graph2_variables = graph1_variables + [tf.Variable(1.0, name='biases')]\n out_variables = variables_helper.get_variables_available_in_checkpoint(\n graph2_variables, checkpoint_path, include_global_step=False)\n self.assertItemsEqual(out_variables, [weight_variable])\n\n def test_return_variables_available_an_checkpoint_with_dict_inputs(self):\n checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')\n with tf.Graph().as_default():\n graph1_variables = [\n tf.Variable(1.0, name='ckpt_weights'),\n ]\n init_op = tf.global_variables_initializer()\n saver = tf.train.Saver(graph1_variables)\n with self.test_session() as sess:\n sess.run(init_op)\n saver.save(sess, checkpoint_path)\n\n with tf.Graph().as_default():\n graph2_variables_dict = {\n 'ckpt_weights': tf.Variable(1.0, name='weights'),\n 'ckpt_biases': tf.Variable(1.0, name='biases')\n }\n out_variables = variables_helper.get_variables_available_in_checkpoint(\n graph2_variables_dict, checkpoint_path)\n\n self.assertTrue(isinstance(out_variables, dict))\n self.assertItemsEqual(out_variables.keys(), ['ckpt_weights'])\n self.assertTrue(out_variables['ckpt_weights'].op.name == 'weights')\n\n def test_return_variables_with_correct_sizes(self):\n checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')\n with tf.Graph().as_default():\n bias_variable = tf.Variable(3.0, name='biases')\n global_step = tf.train.get_or_create_global_step()\n graph1_variables = [\n tf.Variable([[1.0, 2.0], [3.0, 4.0]], name='weights'),\n bias_variable,\n global_step\n ]\n init_op = tf.global_variables_initializer()\n saver = tf.train.Saver(graph1_variables)\n with self.test_session() as sess:\n sess.run(init_op)\n saver.save(sess, checkpoint_path)\n\n with tf.Graph().as_default():\n graph2_variables = [\n tf.Variable([1.0, 2.0], name='weights'), # New variable shape.\n bias_variable,\n global_step\n ]\n\n out_variables = variables_helper.get_variables_available_in_checkpoint(\n graph2_variables, checkpoint_path, include_global_step=True)\n self.assertItemsEqual(out_variables, [bias_variable, global_step])\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ], [ "tensorflow.zeros_like", "tensorflow.expand_dims", "tensorflow.shape", "tensorflow.name_scope" ], [ "numpy.minimum", "numpy.ones", "numpy.concatenate", "numpy.argmax", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.where" ], [ "tensorflow.Graph", "tensorflow.constant", "tensorflow.Variable", "tensorflow.test.main", "tensorflow.train.get_or_create_global_step", "tensorflow.global_variables_initializer", "tensorflow.train.Saver" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
KorotkiyEugene/dsp_sdr_basic
[ "78a77736d0c33951d782f6889884633e4a42c5bd" ]
[ "4/arduino_serial/read_serial_data.py" ]
[ "import serial, time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft, ifft, fftshift\n\nFs = 6250 # Hz\nTs = 1/Fs\nNFFT = 4*1024\nOFFSET = int(0.1*NFFT)\n\n# Time vector\nt = np.linspace(0, NFFT-1, NFFT)*Ts\n# Frequency vector\nf = np.linspace(0, int(NFFT/2)-1, int(NFFT/2))*Fs/NFFT\n\n#ser=serial.Serial(\"/dev/ttyACM1\",115200,timeout=1)\nser=serial.Serial(port=\"COM3\", baudrate=115200, bytesize=8, timeout=2, stopbits=serial.STOPBITS_ONE)\n\n# Wait for serial to be ready\ntime.sleep(1)\n# Flush buffers\nser.flushInput()\n\nx = np.zeros(NFFT+OFFSET)\n \nfor i in range(NFFT+OFFSET):\n x[i] = 5*float(ser.readline().strip().decode())/1024;\n #print(x[i])\n\nser.close()\n\n# Get Spectrum\n\nx = x[OFFSET-1:-1]\n\nxFFT1 = fft(x, NFFT)/NFFT\nxFFT2 = xFFT1[0:int(NFFT/2)]\n\nspectrum = 20*np.log10(np.abs(xFFT2))\n\n# Plot results\n\nplt.figure(figsize=(14, 6))\n\nplt.subplot(1, 2, 1)\nplt.ylabel('Value (volts)')\nplt.xlabel('Time (seconds)')\nplt.title('Signal')\nplt.plot(t, x)\nplt.grid()\n\nplt.subplot(1, 2, 2)\nplt.ylabel('Power (dBm)')\nplt.xlabel('Frequency (Hz)')\nplt.title('Spectrum')\nplt.plot(f, spectrum)\nplt.grid()\n\nplt.tight_layout()\nplt.show()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "numpy.abs", "matplotlib.pyplot.title", "numpy.linspace", "matplotlib.pyplot.figure", "matplotlib.pyplot.plot", "scipy.fftpack.fft", "matplotlib.pyplot.subplot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
robert-giaquinto/gradient-boosted-normalizing-flows
[ "eca3726774f4498f1583bb79d4a9b955b4f51412" ]
[ "models/generative_flow.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport random\nimport torch.distributions as D\n\n\nclass GenerativeFlow(nn.Module):\n \"\"\"\n Generative flow base class\n For models performing density estimation and matching\n \"\"\"\n\n def __init__(self, args):\n super(GenerativeFlow, self).__init__()\n self.num_flows = args.num_flows\n self.z_size = args.z_size\n self.density_evaluation = args.density_evaluation\n self.args = args\n\n # base distribution for calculation of log prob under the model\n self.register_buffer('base_dist_mean', torch.randn(self.z_size, device=args.device).normal_(0, 0.1))\n self.register_buffer('base_dist_var', 3.0 * torch.ones(self.z_size, device=args.device))\n\n # Normalizing flow layers\n self.flow_transformation = None\n\n # auxiliary\n if args.cuda:\n self.FloatTensor = torch.cuda.FloatTensor\n else:\n self.FloatTensor = torch.FloatTensor\n\n # log-det-jacobian = 0 without flows\n self.log_det_j = self.FloatTensor(1).zero_()\n\n\n @property\n def base_dist(self):\n #rval = D.MultivariateNormal(self.base_dist_mean, self.base_dist_var)\n rval = D.Normal(self.base_dist_mean, self.base_dist_var)\n return rval\n\n def forward(self):\n raise NotImplementedError\n" ]
[ [ "torch.randn", "torch.ones", "torch.distributions.Normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
talsperre/rl-algo-implementations
[ "3d9daf25317bc3d1d9776546df47ff4d70d25084" ]
[ "test/test_dqn_agent.py" ]
[ "import sys\nsys.path.append(\"../\")\nimport gym\nimport time\nimport torch\nimport random\nimport unittest\nimport rl_models\nimport numpy as np\n\nfrom rl_models.models.AtariCNN import AtariCNN\nfrom rl_models.agents.DQNAgent import DQNAgent\nfrom rl_models.common.utils.RingBuffer import RingBuffer\nfrom rl_models.common.wrappers.AtariWrappers import wrap_deepmind, wrap_pytorch\nrandom.seed(42)\nnp.random.seed(42)\ntorch.manual_seed(42)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef make_env(env_id):\n env = gym.make(env_id)\n env = wrap_deepmind(env, frame_stack=True)\n env = wrap_pytorch(env)\n env.seed(42)\n return env\n\ndef make_net(inp_shape, num_actions):\n PolicyNet = AtariCNN(inp_shape, num_actions)\n TargetNet = AtariCNN(inp_shape, num_actions)\n return PolicyNet, TargetNet\n\n\nclass TestDQNAgent(unittest.TestCase):\n def test_random_action_type(self):\n env = make_env(\"BreakoutDeterministic-v4\")\n PolicyNet, TargetNet = make_net([4, 84, 84], env.action_space.n)\n replay_memory = RingBuffer(256)\n agent = DQNAgent(env, replay_memory, device)\n action = agent.select_action(PolicyNet, epsilon=1)\n self.assertIsInstance(action, int)\n\n def test_greedy_action_type(self):\n env = make_env(\"BreakoutDeterministic-v4\")\n PolicyNet, TargetNet = make_net([4, 84, 84], env.action_space.n)\n replay_memory = RingBuffer(256)\n agent = DQNAgent(env, replay_memory, device)\n action = agent.select_action(PolicyNet, epsilon=0.0)\n self.assertIsInstance(action, int)\n \n def test_play_single_step(self):\n env = make_env(\"BreakoutDeterministic-v4\")\n PolicyNet, TargetNet = make_net([4, 84, 84], env.action_space.n)\n replay_memory = RingBuffer(256)\n agent = DQNAgent(env, replay_memory, device)\n reward, is_done = agent.play_step(PolicyNet, epsilon=0.0)\n self.assertIsInstance(reward, float)\n self.assertIsInstance(is_done, bool)\n self.assertEqual(len(agent.replay_memory), 1)\n \n def test_play_episode(self):\n env = gym.make('PongDeterministic-v4')\n PolicyNet, TargetNet = make_net([4, 84, 84], env.action_space.n)\n replay_memory = RingBuffer(256)\n agent = DQNAgent(env, replay_memory, device)\n is_done = False\n while not is_done:\n reward, is_done = agent.play_step(PolicyNet, epsilon=0.0)\n\n\nif __name__==\"__main__\":\n unittest.main()" ]
[ [ "torch.manual_seed", "torch.cuda.is_available", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rebeccabernie/CurrencyAnalyser
[ "1f57e5b5fee854912c205cb98f57c980027f0a03" ]
[ "Applied Project/BitcoinPrediction/RoughWork/mediumpost/CNN.py" ]
[ "\n\n# coding: utf-8\n\n# In[1]:\n\n\nfrom keras import applications\nfrom keras.models import Sequential\nfrom keras.models import Model\nfrom keras.layers import Dropout, Flatten, Dense, Activation\nfrom keras.callbacks import CSVLogger\nimport tensorflow as tf\nfrom scipy.ndimage import imread\nimport numpy as np\nimport random\nfrom keras.layers import LSTM\nfrom keras.layers import Conv1D, MaxPooling1D\nfrom keras import backend as K\nimport keras\nfrom keras.callbacks import CSVLogger, ModelCheckpoint\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras import optimizers\nimport h5py\nfrom sklearn.preprocessing import MinMaxScaler\nimport os\nimport pandas as pd\n# import matplotlib\n\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nos.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\n# In[2]:\n\n\nwith h5py.File(''.join(['bitcoin2012_2017_256_16.h5']), 'r') as hf:\n datas = hf['inputs'].value\n labels = hf['outputs'].value\n input_times = hf['input_times'].value\n output_times = hf['output_times'].value\n original_datas = hf['original_datas'].value\n original_outputs = hf['original_outputs'].value\n\n\n\n\n# In[3]:\n\n\n\n\n# In[4]:\n\n# For CNN\nscaler=MinMaxScaler()\n#split training validation\ntraining_size = int(0.8* datas.shape[0])\ntraining_datas = datas[:training_size,:]\ntraining_labels = labels[:training_size,:]\nvalidation_datas = datas[training_size:,:]\nvalidation_labels = labels[training_size:,:]\nground_true = original_outputs[training_size:,:]\n# For LSTM\n\n# nb_samples = datas.shape[0]\n# nb_samples\n# datetimes = np.load('datetime.npy')\n# epochs = 50\n# batch_size = 15\n# step_size = 5\n# nb_validation_samples = int(0.3*nb_samples)\n# nb_training_samples = nb_samples - nb_validation_samples\n\n# input_step_size = 50\n# output_size = 30\n# scaler = MinMaxScaler(feature_range=(0, 1))\n\n# training_datas = scaler.fit_transform(datas[:nb_training_samples])\n# validation_datas = scaler.fit_transform(datas[-nb_validation_samples:])\n\n# training_labels = labels[:nb_training_samples]\n# validation_labels = labels[-nb_validation_samples:]\n\n# training_next_price = training_datas[:,-1]\n# validation_next_price = validation_datas[:,-1]\n\n# training_datas = training_datas[:,:-1]\n# validation_datas = validation_datas[:,:-1]\n\n# training_datas = training_datas.reshape(nb_training_samples, step_size,1)\n# validation_datas = validation_datas.reshape(nb_validation_samples, step_size,1)\n\n\n\nstep_size = datas.shape[1]\nbatch_size= 8\nnb_features = datas.shape[2]\nepochs = 1\n\n#build model\nmodel = Sequential()\nmodel.add(Conv1D(activation=\"relu\", input_shape=(step_size, nb_features), strides=3\t, filters=8, kernel_size=8))\nmodel.add(Dropout(0.25))\nmodel.add(Conv1D(activation=\"relu\", strides=2, filters=8, kernel_size=8))\nmodel.add(Dropout(0.25))\nmodel.add(Conv1D( strides=2, filters=4, kernel_size=8))\nmodel.load_weights('weights/bitcoin2012_2017_256_16_CNNweights-improvement-02-0.00011.hdf5')\nmodel.compile(loss='mse', optimizer='adam')\n\n\n# In[5]:\n\n\n# model = Sequential()\n# model.add(LSTM(10\n# , input_shape=(input_step_size,1),\n \n# return_sequences=False))\n# model.add(Dropout(0.2))\n\n\n# model.add(Dense(output_size))\n# model.add(Activation('sigmoid'))\n\n# model.load_weights('weights/bitcoin2012_2017_50_30_weights.hdf5')\n# model.compile(loss='mse', optimizer='adam')\n# scaler = MinMaxScaler(feature_range=(0, 1))\n# scaler.fit(datas.reshape(-1))\n# predicted_inverted = scaler.inverse_transform(predicted)\n# ground_true = scaler.inverse_transform(validation_next_price)\n# In[6]:\n\n\npredicted = model.predict(validation_datas)\npredicted_inverted = []\n\n# In[7]:\nfor i in range(original_datas.shape[1]):\n\tscaler.fit(original_datas[:,i].reshape(-1,1))\n\tpredicted_inverted.append(scaler.inverse_transform(predicted[:,:,i]))\n\n#get only the close data\nground_true = ground_true[:,:,0].reshape(-1)\noutput_times = output_times.reshape(-1)\n\npredicted_inverted = np.array(predicted_inverted)[:,:,0].reshape(-1)\n\n\n\n# In[8]:\nprint(output_times.shape, ground_true.shape)\n\nplt.plot(output_times[-1000:],ground_true[-1000:])\nplt.plot(output_times[-1000:],predicted_inverted[-1000:])\n\n# In[ ]:\n\n\nplt.show()\n\n\n# In[ ]:\n\n" ]
[ [ "matplotlib.pyplot.switch_backend", "matplotlib.pyplot.plot", "sklearn.preprocessing.MinMaxScaler", "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joelouismarino/generalized_filtering
[ "f1032d62761c498e5206c0a2b07a012c8223b3de" ]
[ "evaluate.py" ]
[ "import pickle\nimport sys, os\nimport torch\nfrom config import run_config, train_config, data_config, model_config\nfrom util.logging import Logger\nfrom util.data.load_data import load_data\nfrom lib.models import load_model\nfrom util.eval import eval_model\n\ndef start_evaluating(run_config, train_config, data_config, model_config):\n # hack to prevent the data loader from going on GPU 0\n os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=str(run_config['cuda_device'])\n # torch.cuda.set_device(run_config['cuda_device'])\n torch.cuda.set_device(0)\n\n logger = Logger(run_config)\n\n # load the data\n train_data, val_data, test_data = load_data(data_config, train_config['batch_size'])\n if test_data is None:\n test_data = val_data\n\n # load the model\n print('Loading model...')\n model = load_model(model_config)\n\n assert run_config['resume_path'] is not None, 'Resume path must be set for evaluation.'\n print('Loading checkpoint ' + run_config['resume_path'])\n model = logger.load_best(model)\n # model = logger.load_epoch(model, 500)\n\n # load the training batch size (needed to evaluate AVF)\n sys.path.insert(0, os.path.join(run_config['log_root_path'], run_config['resume_path'], 'source', 'config'))\n import train_config as tc\n reload(tc)\n batch_size = tc.train_config['batch_size']\n\n print('Putting the model on the GPU...')\n model.cuda()\n\n model.eval()\n\n output = eval_model(test_data, model, train_config, training_batch_size=batch_size)\n path = os.path.join(run_config['log_root_path'], run_config['log_dir'])\n with open(path, 'wb') as f:\n pickle.dump(output, f)\n\n\nif __name__=='__main__':\n start_evaluating(run_config, train_config, data_config, model_config)\n" ]
[ [ "torch.cuda.set_device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ferorga/CarND-Capstone
[ "b91ce151c556d507d0abfcfd54f620d7b10b58b0" ]
[ "ros/src/waypoint_updater/waypoint_updater.py" ]
[ "#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import PoseStamped\nfrom std_msgs.msg import Int32\nfrom styx_msgs.msg import Lane, Waypoint\nfrom scipy.spatial import KDTree\nimport numpy as np\nimport math\n\n'''\nThis node will publish waypoints from the car's current position to some `x` distance ahead.\nAs mentioned in the doc, you should ideally first implement a version which does not care\nabout traffic lights or obstacles.\nOnce you have created dbw_node, you will update this node to use the status of traffic lights too.\nPlease note that our simulator also provides the exact location of traffic lights and their\ncurrent status in `/vehicle/traffic_lights` message. You can use this message to build this node\nas well as to verify your TL classifier.\n'''\n\nLOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number\nMAX_DECEL = 1.0\nMPS2MPH = 2.236936\nSAFETY_FACTOR = 0.90\n\nclass WaypointUpdater(object):\n def __init__(self):\n rospy.init_node('waypoint_updater')\n\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n\n # Attribute initialization\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.stopline_wp_idx = -1\n self.speed_limit = rospy.get_param('/waypoint_loader/velocity') / 3.6\n rospy.loginfo(\"Speed limit set to %.2f MPH\", self.speed_limit*MPS2MPH)\n\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n\n self.loop()\n\n def loop(self):\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n if self.pose and self.base_waypoints and self.waypoint_tree:\n self.publish_waypoints()\n rate.sleep()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n \"\"\"\n Callback for /base_waypoints message. \n Updates the list of points and the KDTree\n \"\"\"\n self.base_waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[w.pose.pose.position.x, w.pose.pose.position.y] for w in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n self.stopline_wp_idx = msg.data\n\n def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)\n for i in range(wp1, wp2 + 1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n def get_closest_waypoint_id(self):\n \"\"\"\n Returns the index of the closest waypoint ahead of the vehicle\n \"\"\"\n if self.waypoint_tree:\n pt = [self.pose.pose.position.x, self.pose.pose.position.y]\n closest_id = self.waypoint_tree.query(pt, 1)[1]\n\n closest_pt = np.array(self.waypoints_2d[closest_id])\n prev_pt = np.array(self.waypoints_2d[closest_id - 1])\n pt = np.array(pt)\n value = np.dot(closest_pt - prev_pt, pt - closest_pt)\n if value > 0:\n closest_id = (closest_id + 1) % len(self.waypoints_2d)\n\n return closest_id\n return 0\n\n def publish_waypoints(self):\n if not self.base_waypoints:\n return\n lane = self.generate_lane()\n self.final_waypoints_pub.publish(lane)\n\n def generate_lane(self):\n lane = Lane()\n\n closest_idx = self.get_closest_waypoint_id()\n farthest_idx = closest_idx + LOOKAHEAD_WPS\n waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx]\n\n if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):\n lane.waypoints = waypoints\n else:\n lane.waypoints = self.decelerate_waypoints(waypoints, closest_idx)\n\n return lane\n\n def decelerate_waypoints(self, waypoints, closest_idx):\n result = []\n for i, wp in enumerate(waypoints):\n new_point = Waypoint()\n new_point.pose = wp.pose\n\n stop_idx = max(self.stopline_wp_idx - closest_idx - 2, 0) # Two waypints back from line so the front of\n # the car stops at the line\n dist = self.distance(waypoints, i, stop_idx)\n vel = math.sqrt(2 * MAX_DECEL * SAFETY_FACTOR * dist)\n if vel < 1.0:\n vel = 0.0\n\n new_point.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)\n result.append(new_point)\n\n return result\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')" ]
[ [ "numpy.dot", "numpy.array", "scipy.spatial.KDTree" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
EiffL/SSELFI
[ "83420e68e4efa867d4a86dc27d09411dd93fb5b7" ]
[ "conditional_masked_autoregressive.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport six\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import affine_scalar\nfrom tensorflow_probability.python.bijectors import bijector as bijector_lib\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.math.numeric import clip_by_value_preserve_gradient\nfrom tensorflow_probability.python.bijectors import masked_dense\n\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\n\nimport tensorflow_probability as tfp\ntfd = tfp.distributions\ntfb = tfp.bijectors\n\n__all__ = [\n 'ConditionalMaskedAutoregressiveFlow',\n 'masked_autoregressive_conditional_template'\n]\n\nclass ConditionalMaskedAutoregressiveFlow(bijector_lib.Bijector):\n \"\"\" Conditional Affine MaskedAutoregressiveFlow bijector.\n \"\"\"\n def __init__(self,\n shift_and_log_scale_fn=None,\n conditioning=None,\n bijector_fn=None,\n is_constant_jacobian=False,\n validate_args=False,\n unroll_loop=False,\n event_ndims=1,\n name=None):\n \"\"\"Creates the MaskedAutoregressiveFlow bijector.\n Args:\n shift_and_log_scale_fn: Python `callable` which computes `shift` and\n `log_scale` from the inverse domain (`y`). Calculation must respect the\n 'autoregressive property' (see class docstring). Suggested default\n `tfb.AutoregressiveNetwork(params=2, hidden_layers=...)`.\n Typically the function contains `tf.Variables`. Returning `None` for\n either (both) `shift`, `log_scale` is equivalent to (but more efficient\n than) returning zero. If `shift_and_log_scale_fn` returns a single\n `Tensor`, the returned value will be unstacked to get the `shift` and\n `log_scale`: `tf.unstack(shift_and_log_scale_fn(y), num=2, axis=-1)`.\n bijector_fn: Python `callable` which returns a `tfb.Bijector` which\n transforms event tensor with the signature\n `(input, **condition_kwargs) -> bijector`. The bijector must operate on\n scalar events and must not alter the rank of its input. The\n `bijector_fn` will be called with `Tensors` from the inverse domain\n (`y`). Calculation must respect the 'autoregressive property' (see\n class docstring).\n is_constant_jacobian: Python `bool`. Default: `False`. When `True` the\n implementation assumes `log_scale` does not depend on the forward domain\n (`x`) or inverse domain (`y`) values. (No validation is made;\n `is_constant_jacobian=False` is always safe but possibly computationally\n inefficient.)\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n unroll_loop: Python `bool` indicating whether the `tf.while_loop` in\n `_forward` should be replaced with a static for loop. Requires that\n the final dimension of `x` be known at graph construction time. Defaults\n to `False`.\n event_ndims: Python `integer`, the intrinsic dimensionality of this\n bijector. 1 corresponds to a simple vector autoregressive bijector as\n implemented by the `tfp.bijectors.AutoregressiveNetwork`, 2 might be\n useful for a 2D convolutional `shift_and_log_scale_fn` and so on.\n name: Python `str`, name given to ops managed by this object.\n Raises:\n ValueError: If both or none of `shift_and_log_scale_fn` and `bijector_fn`\n are specified.\n \"\"\"\n name = name or 'conditional_masked_autoregressive_flow'\n self._unroll_loop = unroll_loop\n self._event_ndims = event_ndims\n if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n raise ValueError('Exactly one of `shift_and_log_scale_fn` and '\n '`bijector_fn` should be specified.')\n if shift_and_log_scale_fn:\n def _bijector_fn(x, **condition_kwargs):\n if conditioning is not None:\n print(x, conditioning)\n x = tf.concat([conditioning, x], axis=-1)\n cond_depth = tf.compat.dimension_value(\n tensorshape_util.with_rank_at_least(conditioning.shape, 1)[-1])\n else:\n cond_depth = 0\n params = shift_and_log_scale_fn(x, **condition_kwargs)\n if tf.is_tensor(params):\n shift, log_scale = tf.unstack(params, num=2, axis=-1)\n else:\n shift, log_scale = params\n shift = shift[..., cond_depth:]\n log_scale = log_scale[..., cond_depth:]\n return affine_scalar.AffineScalar(shift=shift, log_scale=log_scale)\n\n bijector_fn = _bijector_fn\n\n if validate_args:\n bijector_fn = _validate_bijector_fn(bijector_fn)\n # Still do this assignment for variable tracking.\n self._shift_and_log_scale_fn = shift_and_log_scale_fn\n self._bijector_fn = bijector_fn\n super(ConditionalMaskedAutoregressiveFlow, self).__init__(\n forward_min_event_ndims=self._event_ndims,\n is_constant_jacobian=is_constant_jacobian,\n validate_args=validate_args,\n name=name)\n\n def _forward(self, x, **kwargs):\n static_event_size = tensorshape_util.num_elements(\n tensorshape_util.with_rank_at_least(\n x.shape, self._event_ndims)[-self._event_ndims:])\n\n if self._unroll_loop:\n if not static_event_size:\n raise ValueError(\n 'The final {} dimensions of `x` must be known at graph '\n 'construction time if `unroll_loop=True`. `x.shape: {!r}`'.format(\n self._event_ndims, x.shape))\n y = tf.zeros_like(x, name='y0')\n\n for _ in range(static_event_size):\n y = self._bijector_fn(y, **kwargs).forward(x)\n return y\n\n event_size = tf.reduce_prod(tf.shape(x)[-self._event_ndims:])\n y0 = tf.zeros_like(x, name='y0')\n # call the template once to ensure creation\n if not tf.executing_eagerly():\n _ = self._bijector_fn(y0, **kwargs).forward(y0)\n def _loop_body(index, y0):\n \"\"\"While-loop body for autoregression calculation.\"\"\"\n # Set caching device to avoid re-getting the tf.Variable for every while\n # loop iteration.\n with tf1.variable_scope(tf1.get_variable_scope()) as vs:\n if vs.caching_device is None and not tf.executing_eagerly():\n vs.set_caching_device(lambda op: op.device)\n bijector = self._bijector_fn(y0, **kwargs)\n y = bijector.forward(x)\n return index + 1, y\n # If the event size is available at graph construction time, we can inform\n # the graph compiler of the maximum number of steps. If not,\n # static_event_size will be None, and the maximum_iterations argument will\n # have no effect.\n _, y = tf.while_loop(\n cond=lambda index, _: index < event_size,\n body=_loop_body,\n loop_vars=(0, y0),\n maximum_iterations=static_event_size)\n return y\n\n def _inverse(self, y, **kwargs):\n bijector = self._bijector_fn(y, **kwargs)\n return bijector.inverse(y)\n\n def _inverse_log_det_jacobian(self, y, **kwargs):\n return self._bijector_fn(y, **kwargs).inverse_log_det_jacobian(\n y, event_ndims=self._event_ndims)\n\ndef masked_autoregressive_conditional_template(hidden_layers,\n conditional_tensor,\n shift_only=False,\n activation=tf.nn.relu,\n log_scale_min_clip=-3.,\n log_scale_max_clip=3.,\n log_scale_clip_gradient=True,\n name=None,\n *args, # pylint: disable=keyword-arg-before-vararg\n **kwargs):\n \"\"\"Build the Masked Autoregressive Density Estimator (Germain et al., 2015).\n This will be wrapped in a make_template to ensure the variables are only\n created once. It takes the input and returns the `loc` ('mu' in [Germain et\n al. (2015)][1]) and `log_scale` ('alpha' in [Germain et al. (2015)][1]) from\n the MADE network.\n Warning: This function uses `masked_dense` to create randomly initialized\n `tf.Variables`. It is presumed that these will be fit, just as you would any\n other neural architecture which uses `tf.layers.dense`.\n #### About Hidden Layers\n Each element of `hidden_layers` should be greater than the `input_depth`\n (i.e., `input_depth = tf.shape(input)[-1]` where `input` is the input to the\n neural network). This is necessary to ensure the autoregressivity property.\n #### About Clipping\n This function also optionally clips the `log_scale` (but possibly not its\n gradient). This is useful because if `log_scale` is too small/large it might\n underflow/overflow making it impossible for the `MaskedAutoregressiveFlow`\n bijector to implement a bijection. Additionally, the `log_scale_clip_gradient`\n `bool` indicates whether the gradient should also be clipped. The default does\n not clip the gradient; this is useful because it still provides gradient\n information (for fitting) yet solves the numerical stability problem. I.e.,\n `log_scale_clip_gradient = False` means\n `grad[exp(clip(x))] = grad[x] exp(clip(x))` rather than the usual\n `grad[clip(x)] exp(clip(x))`.\n Args:\n hidden_layers: Python `list`-like of non-negative integer, scalars\n indicating the number of units in each hidden layer. Default: `[512, 512].\n shift_only: Python `bool` indicating if only the `shift` term shall be\n computed. Default: `False`.\n activation: Activation function (callable). Explicitly setting to `None`\n implies a linear activation.\n log_scale_min_clip: `float`-like scalar `Tensor`, or a `Tensor` with the\n same shape as `log_scale`. The minimum value to clip by. Default: -5.\n log_scale_max_clip: `float`-like scalar `Tensor`, or a `Tensor` with the\n same shape as `log_scale`. The maximum value to clip by. Default: 3.\n log_scale_clip_gradient: Python `bool` indicating that the gradient of\n `tf.clip_by_value` should be preserved. Default: `False`.\n name: A name for ops managed by this function. Default:\n 'masked_autoregressive_default_template'.\n *args: `tf.layers.dense` arguments.\n **kwargs: `tf.layers.dense` keyword arguments.\n Returns:\n shift: `Float`-like `Tensor` of shift terms (the 'mu' in\n [Germain et al. (2015)][1]).\n log_scale: `Float`-like `Tensor` of log(scale) terms (the 'alpha' in\n [Germain et al. (2015)][1]).\n Raises:\n NotImplementedError: if rightmost dimension of `inputs` is unknown prior to\n graph execution.\n #### References\n [1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:\n Masked Autoencoder for Distribution Estimation. In _International\n Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509\n \"\"\"\n name = name or 'masked_autoregressive_conditional_template'\n with tf.name_scope(name):\n\n def _fn(x):\n \"\"\"MADE parameterized via `masked_autoregressive_default_template`.\"\"\"\n # TODO(b/67594795): Better support of dynamic shape.\n cond_depth = tf.compat.dimension_value(\n tensorshape_util.with_rank_at_least(conditional_tensor.shape, 1)[-1])\n\n input_shape = (\n np.int32(tensorshape_util.as_list(x.shape))\n if tensorshape_util.is_fully_defined(x.shape) else tf.shape(x))\n if tensorshape_util.rank(x.shape) == 1:\n x = x[tf.newaxis, ...]\n x = tf.concat([conditional_tensor, x], axis=-1)\n input_depth = tf.compat.dimension_value(\n tensorshape_util.with_rank_at_least(x.shape, 1)[-1])\n if input_depth is None:\n raise NotImplementedError(\n 'Rightmost dimension must be known prior to graph execution.')\n for i, units in enumerate(hidden_layers):\n x = masked_dense(\n inputs=x,\n units=units,\n num_blocks=input_depth,\n exclusive=True if i == 0 else False,\n activation=activation,\n *args, # pylint: disable=keyword-arg-before-vararg\n **kwargs)\n x = masked_dense(\n inputs=x,\n units=(1 if shift_only else 2) * input_depth,\n num_blocks=input_depth,\n activation=None,\n *args, # pylint: disable=keyword-arg-before-vararg\n **kwargs)\n if shift_only:\n x = x[..., cond_depth:]\n x = tf.reshape(x, shape=input_shape)\n return x, None\n else:\n x = x[..., 2*cond_depth:]\n x = tf.reshape(x, shape=tf.concat([input_shape, [2]], axis=0))\n shift, log_scale = tf.unstack(x, num=2, axis=-1)\n which_clip = (\n tf.clip_by_value\n if log_scale_clip_gradient else clip_by_value_preserve_gradient)\n log_scale = which_clip(log_scale, log_scale_min_clip, log_scale_max_clip)\n return shift, log_scale\n\n return tf1.make_template(name, _fn)\n" ]
[ [ "tensorflow.compat.v2.zeros_like", "tensorflow.compat.v2.unstack", "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.is_tensor", "tensorflow.compat.v2.concat", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.reshape", "tensorflow.compat.v1.make_template", "tensorflow.compat.v1.get_variable_scope", "tensorflow.compat.v2.while_loop" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Suchun-sv/pyHGT
[ "49fb66e04386835d9dc3ba22abba121f8a960469" ]
[ "OAG/pyHGT/conv.py" ]
[ "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv, GATConv\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.nn.inits import glorot\nfrom torch_geometric.utils import softmax\n\n\nclass HGTConv(MessagePassing):\n def __init__(self, in_dim, out_dim, num_types, num_relations, n_heads, dropout=0.2, use_norm=True, use_RTE=True,\n **kwargs):\n super(HGTConv, self).__init__(node_dim=0, aggr='add', **kwargs)\n\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.num_types = num_types\n self.num_relations = num_relations\n self.total_rel = num_types * num_relations * num_types\n self.n_heads = n_heads\n self.d_k = out_dim // n_heads\n self.sqrt_dk = math.sqrt(self.d_k)\n self.use_norm = use_norm\n self.use_RTE = use_RTE\n self.att = None\n\n self.k_linears = nn.ModuleList()\n self.q_linears = nn.ModuleList()\n self.v_linears = nn.ModuleList()\n self.a_linears = nn.ModuleList()\n self.norms = nn.ModuleList()\n\n for t in range(num_types):\n self.k_linears.append(nn.Linear(in_dim, out_dim))\n self.q_linears.append(nn.Linear(in_dim, out_dim))\n self.v_linears.append(nn.Linear(in_dim, out_dim))\n self.a_linears.append(nn.Linear(out_dim, out_dim))\n if use_norm:\n self.norms.append(nn.LayerNorm(out_dim))\n '''\n TODO: make relation_pri smaller, as not all <st, rt, tt> pair exist in meta relation list.\n '''\n self.relation_pri = nn.Parameter(torch.ones(num_relations, self.n_heads))\n self.relation_att = nn.Parameter(torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))\n self.relation_msg = nn.Parameter(torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))\n self.skip = nn.Parameter(torch.ones(num_types))\n self.drop = nn.Dropout(dropout)\n\n if self.use_RTE:\n self.emb = RelTemporalEncoding(in_dim)\n\n glorot(self.relation_att)\n glorot(self.relation_msg)\n\n def forward(self, node_inp, node_type, edge_index, edge_type, edge_time):\n return self.propagate(edge_index, node_inp=node_inp, node_type=node_type, \\\n edge_type=edge_type, edge_time=edge_time)\n\n def message(self, edge_index_i, node_inp_i, node_inp_j, node_type_i, node_type_j, edge_type, edge_time):\n '''\n j: source, i: target; <j, i>\n '''\n data_size = edge_index_i.size(0)\n '''\n Create Attention and Message tensor beforehand.\n '''\n res_att = torch.zeros(data_size, self.n_heads).to(node_inp_i.device)\n res_msg = torch.zeros(data_size, self.n_heads, self.d_k).to(node_inp_i.device)\n\n for source_type in range(self.num_types):\n sb = (node_type_j == int(source_type))\n k_linear = self.k_linears[source_type]\n v_linear = self.v_linears[source_type]\n for target_type in range(self.num_types):\n tb = (node_type_i == int(target_type)) & sb\n q_linear = self.q_linears[target_type]\n for relation_type in range(self.num_relations):\n '''\n idx is all the edges with meta relation <source_type, relation_type, target_type>\n '''\n idx = (edge_type == int(relation_type)) & tb\n if idx.sum() == 0:\n continue\n '''\n Get the corresponding input node representations by idx.\n Add tempotal encoding to source representation (j)\n '''\n target_node_vec = node_inp_i[idx]\n source_node_vec = node_inp_j[idx]\n if self.use_RTE:\n source_node_vec = self.emb(source_node_vec, edge_time[idx])\n '''\n Step 1: Heterogeneous Mutual Attention\n '''\n q_mat = q_linear(target_node_vec).view(-1, self.n_heads, self.d_k)\n k_mat = k_linear(source_node_vec).view(-1, self.n_heads, self.d_k)\n k_mat = torch.bmm(k_mat.transpose(1, 0), self.relation_att[relation_type]).transpose(1, 0)\n res_att[idx] = (q_mat * k_mat).sum(dim=-1) * self.relation_pri[relation_type] / self.sqrt_dk\n '''\n Step 2: Heterogeneous Message Passing\n '''\n v_mat = v_linear(source_node_vec).view(-1, self.n_heads, self.d_k)\n res_msg[idx] = torch.bmm(v_mat.transpose(1, 0), self.relation_msg[relation_type]).transpose(1, 0)\n '''\n Softmax based on target node's id (edge_index_i). Store attention value in self.att for later visualization.\n '''\n self.att = softmax(res_att, edge_index_i)\n res = res_msg * self.att.view(-1, self.n_heads, 1)\n del res_att, res_msg\n return res.view(-1, self.out_dim)\n\n def update(self, aggr_out, node_inp, node_type):\n '''\n Step 3: Target-specific Aggregation\n x = W[node_type] * gelu(Agg(x)) + x\n '''\n aggr_out = F.gelu(aggr_out)\n res = torch.zeros(aggr_out.size(0), self.out_dim).to(node_inp.device)\n for target_type in range(self.num_types):\n idx = (node_type == int(target_type))\n if idx.sum() == 0:\n continue\n trans_out = self.drop(self.a_linears[target_type](aggr_out[idx]))\n '''\n Add skip connection with learnable weight self.skip[t_id]\n '''\n alpha = torch.sigmoid(self.skip[target_type])\n if self.use_norm:\n res[idx] = self.norms[target_type](trans_out * alpha + node_inp[idx] * (1 - alpha))\n else:\n res[idx] = trans_out * alpha + node_inp[idx] * (1 - alpha)\n return res\n\n def __repr__(self):\n return '{}(in_dim={}, out_dim={}, num_types={}, num_types={})'.format(\n self.__class__.__name__, self.in_dim, self.out_dim,\n self.num_types, self.num_relations)\n\n\nclass DenseHGTConv(MessagePassing):\n def __init__(self, in_dim, out_dim, num_types, num_relations, n_heads, dropout=0.2, use_norm=True, use_RTE=True,\n **kwargs):\n super(DenseHGTConv, self).__init__(node_dim=0, aggr='add', **kwargs)\n\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.num_types = num_types\n self.num_relations = num_relations\n self.total_rel = num_types * num_relations * num_types\n self.n_heads = n_heads\n self.d_k = out_dim // n_heads\n self.sqrt_dk = math.sqrt(self.d_k)\n self.use_norm = use_norm\n self.use_RTE = use_RTE\n self.att = None\n\n self.k_linears = nn.ModuleList()\n self.q_linears = nn.ModuleList()\n self.v_linears = nn.ModuleList()\n self.a_linears = nn.ModuleList()\n self.norms = nn.ModuleList()\n\n for t in range(num_types):\n self.k_linears.append(nn.Linear(in_dim, out_dim))\n self.q_linears.append(nn.Linear(in_dim, out_dim))\n self.v_linears.append(nn.Linear(in_dim, out_dim))\n self.a_linears.append(nn.Linear(out_dim, out_dim))\n if use_norm:\n self.norms.append(nn.LayerNorm(out_dim))\n '''\n TODO: make relation_pri smaller, as not all <st, rt, tt> pair exist in meta relation list.\n '''\n self.relation_pri = nn.Parameter(torch.ones(num_relations, self.n_heads))\n self.relation_att = nn.Parameter(torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))\n self.relation_msg = nn.Parameter(torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))\n self.drop = nn.Dropout(dropout)\n\n if self.use_RTE:\n self.emb = RelTemporalEncoding(in_dim)\n\n glorot(self.relation_att)\n glorot(self.relation_msg)\n\n self.mid_linear = nn.Linear(out_dim, out_dim * 2)\n self.out_linear = nn.Linear(out_dim * 2, out_dim)\n self.out_norm = nn.LayerNorm(out_dim)\n\n def forward(self, node_inp, node_type, edge_index, edge_type, edge_time):\n return self.propagate(edge_index, node_inp=node_inp, node_type=node_type, \\\n edge_type=edge_type, edge_time=edge_time)\n\n def message(self, edge_index_i, node_inp_i, node_inp_j, node_type_i, node_type_j, edge_type, edge_time):\n '''\n j: source, i: target; <j, i>\n '''\n data_size = edge_index_i.size(0)\n '''\n Create Attention and Message tensor beforehand.\n '''\n res_att = torch.zeros(data_size, self.n_heads).to(node_inp_i.device)\n res_msg = torch.zeros(data_size, self.n_heads, self.d_k).to(node_inp_i.device)\n\n for source_type in range(self.num_types):\n sb = (node_type_j == int(source_type))\n k_linear = self.k_linears[source_type]\n v_linear = self.v_linears[source_type]\n for target_type in range(self.num_types):\n tb = (node_type_i == int(target_type)) & sb\n q_linear = self.q_linears[target_type]\n for relation_type in range(self.num_relations):\n '''\n idx is all the edges with meta relation <source_type, relation_type, target_type>\n '''\n idx = (edge_type == int(relation_type)) & tb\n if idx.sum() == 0:\n continue\n '''\n Get the corresponding input node representations by idx.\n Add tempotal encoding to source representation (j)\n '''\n target_node_vec = node_inp_i[idx]\n source_node_vec = node_inp_j[idx]\n if self.use_RTE:\n source_node_vec = self.emb(source_node_vec, edge_time[idx])\n '''\n Step 1: Heterogeneous Mutual Attention\n '''\n q_mat = q_linear(target_node_vec).view(-1, self.n_heads, self.d_k)\n k_mat = k_linear(source_node_vec).view(-1, self.n_heads, self.d_k)\n k_mat = torch.bmm(k_mat.transpose(1, 0), self.relation_att[relation_type]).transpose(1, 0)\n res_att[idx] = (q_mat * k_mat).sum(dim=-1) * self.relation_pri[relation_type] / self.sqrt_dk\n '''\n Step 2: Heterogeneous Message Passing\n '''\n v_mat = v_linear(source_node_vec).view(-1, self.n_heads, self.d_k)\n res_msg[idx] = torch.bmm(v_mat.transpose(1, 0), self.relation_msg[relation_type]).transpose(1, 0)\n '''\n Softmax based on target node's id (edge_index_i). Store attention value in self.att for later visualization.\n '''\n self.att = softmax(res_att, edge_index_i)\n res = res_msg * self.att.view(-1, self.n_heads, 1)\n del res_att, res_msg\n return res.view(-1, self.out_dim)\n\n def update(self, aggr_out, node_inp, node_type):\n '''\n Step 3: Target-specific Aggregation\n x = W[node_type] * Agg(x) + x\n '''\n res = torch.zeros(aggr_out.size(0), self.out_dim).to(node_inp.device)\n for target_type in range(self.num_types):\n idx = (node_type == int(target_type))\n if idx.sum() == 0:\n continue\n trans_out = self.drop(self.a_linears[target_type](aggr_out[idx])) + node_inp[idx]\n '''\n Add skip connection with learnable weight self.skip[t_id]\n '''\n if self.use_norm:\n trans_out = self.norms[target_type](trans_out)\n\n '''\n Step 4: Shared Dense Layer\n x = Out_L(gelu(Mid_L(x))) + x\n '''\n\n trans_out = self.drop(self.out_linear(F.gelu(self.mid_linear(trans_out)))) + trans_out\n res[idx] = self.out_norm(trans_out)\n return res\n\n def __repr__(self):\n return '{}(in_dim={}, out_dim={}, num_types={}, num_types={})'.format(\n self.__class__.__name__, self.in_dim, self.out_dim,\n self.num_types, self.num_relations)\n\n\nclass RelTemporalEncoding(nn.Module):\n '''\n Implement the Temporal Encoding (Sinusoid) function.\n '''\n\n def __init__(self, n_hid, max_len=240, dropout=0.2):\n super(RelTemporalEncoding, self).__init__()\n position = torch.arange(0., max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, n_hid, 2) *\n -(math.log(10000.0) / n_hid))\n emb = nn.Embedding(max_len, n_hid)\n emb.weight.data[:, 0::2] = torch.sin(position * div_term) / math.sqrt(n_hid)\n emb.weight.data[:, 1::2] = torch.cos(position * div_term) / math.sqrt(n_hid)\n emb.requires_grad = False\n self.emb = emb\n self.lin = nn.Linear(n_hid, n_hid)\n\n def forward(self, x, t):\n return x + self.lin(self.emb(t))\n\n\nclass GeneralConv(nn.Module):\n def __init__(self, conv_name, in_hid, out_hid, num_types, num_relations, n_heads, dropout, use_norm=True,\n use_RTE=True):\n super(GeneralConv, self).__init__()\n self.conv_name = conv_name\n if self.conv_name == 'hgt':\n self.base_conv = HGTConv(in_hid, out_hid, num_types, num_relations, n_heads, dropout, use_norm, use_RTE)\n elif self.conv_name == 'dense_hgt':\n self.base_conv = DenseHGTConv(in_hid, out_hid, num_types, num_relations, n_heads, dropout, use_norm,\n use_RTE)\n elif self.conv_name == 'gcn':\n self.base_conv = GCNConv(in_hid, out_hid)\n elif self.conv_name == 'gat':\n self.base_conv = GATConv(in_hid, out_hid // n_heads, heads=n_heads)\n\n def forward(self, meta_xs, node_type, edge_index, edge_type, edge_time):\n if self.conv_name == 'hgt':\n return self.base_conv(meta_xs, node_type, edge_index, edge_type, edge_time)\n elif self.conv_name == 'gcn':\n return self.base_conv(meta_xs, edge_index)\n elif self.conv_name == 'gat':\n return self.base_conv(meta_xs, edge_index)\n elif self.conv_name == 'dense_hgt':\n return self.base_conv(meta_xs, node_type, edge_index, edge_type, edge_time)\n" ]
[ [ "torch.nn.Dropout", "torch.sigmoid", "torch.ones", "torch.Tensor", "torch.sin", "torch.zeros", "torch.nn.ModuleList", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.nn.functional.gelu", "torch.nn.Linear", "torch.arange", "torch.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
undeadyequ/PaddleOCR
[ "7e31d064ba3054f87fa27cff84784f706248c61e" ]
[ "tools/extend/util.py" ]
[ "\"\"\"\r\ncreated by: Donghyeon Won\r\n\"\"\"\r\n\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom PIL import Image\r\n\r\nfrom torch.utils.data import Dataset\r\nimport torch.nn as nn\r\nimport torchvision.transforms as transforms\r\nimport torchvision.models as models\r\nimport torch\r\nimport argparse\r\n\r\n\r\nclass ProtestDataset(Dataset):\r\n \"\"\"\r\n dataset for training and evaluation\r\n \"\"\"\r\n def __init__(self, txt_file, img_dir, transform = None):\r\n \"\"\"\r\n Args:\r\n txt_file: Path to txt file with annotation\r\n img_dir: Directory with images\r\n transform: Optional transform to be applied on a sample.\r\n \"\"\"\r\n self.label_frame = pd.read_csv(txt_file, delimiter=\"\\t\").replace('-', 0)\r\n self.img_dir = img_dir\r\n self.transform = transform\r\n def __len__(self):\r\n return len(self.label_frame)\r\n def __getitem__(self, idx):\r\n imgpath = os.path.join(self.img_dir,\r\n self.label_frame.iloc[idx, 0])\r\n image = pil_loader(imgpath)\r\n\r\n protest = self.label_frame.iloc[idx, 1:2].to_numpy().astype('float')\r\n violence = self.label_frame.iloc[idx, 2:3].to_numpy().astype('float')\r\n visattr = self.label_frame.iloc[idx, 3:].to_numpy().astype('float')\r\n label = {'protest':protest, 'violence':violence, 'visattr':visattr}\r\n\r\n sample = {\"image\":image, \"label\":label}\r\n if self.transform:\r\n sample[\"image\"] = self.transform(sample[\"image\"])\r\n return sample\r\n\r\nclass ProtestDatasetEval(Dataset):\r\n \"\"\"\r\n dataset for just calculating the output (does not need an annotation file)\r\n \"\"\"\r\n def __init__(self, img_dir):\r\n \"\"\"\r\n Args:\r\n img_dir: Directory with images\r\n \"\"\"\r\n self.img_dir = img_dir\r\n self.transform = transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]),\r\n ])\r\n self.img_list = sorted(os.listdir(img_dir))\r\n def __len__(self):\r\n return len(self.img_list)\r\n def __getitem__(self, idx):\r\n imgpath = os.path.join(self.img_dir,\r\n self.img_list[idx])\r\n image = pil_loader(imgpath)\r\n # we need this variable to check if the image is protest or not)\r\n sample = {\"imgpath\":imgpath, \"image\":image}\r\n sample[\"image\"] = self.transform(sample[\"image\"])\r\n return sample\r\n\r\n\r\ndef pil_loader(path):\r\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\r\n with open(path, 'rb') as f:\r\n img = Image.open(f)\r\n return img.convert('RGB')\r\n\r\n\r\ndef paddle_infer_args(parser):\r\n def str2bool(v):\r\n return v.lower() in (\"true\", \"t\", \"1\")\r\n\r\n group1 = parser.add_argument_group('paddle')\r\n # params for prediction engine\r\n group1.add_argument(\"--use_gpu\", type=str2bool, default=True)\r\n group1.add_argument(\"--ir_optim\", type=str2bool, default=True)\r\n group1.add_argument(\"--use_tensorrt\", type=str2bool, default=False)\r\n group1.add_argument(\"--use_fp16\", type=str2bool, default=False)\r\n group1.add_argument(\"--gpu_mem\", type=int, default=500)\r\n\r\n # params for text detector\r\n group1.add_argument(\"--image_dir\", type=str)\r\n group1.add_argument(\"--det_algorithm\", type=str, default='DB')\r\n group1.add_argument(\"--det_model_dir\", type=str)\r\n group1.add_argument(\"--det_limit_side_len\", type=float, default=960)\r\n group1.add_argument(\"--det_limit_type\", type=str, default='max')\r\n\r\n # DB parmas\r\n group1.add_argument(\"--det_db_thresh\", type=float, default=0.3)\r\n group1.add_argument(\"--det_db_box_thresh\", type=float, default=0.5)\r\n group1.add_argument(\"--det_db_unclip_ratio\", type=float, default=1.6)\r\n group1.add_argument(\"--max_batch_size\", type=int, default=10)\r\n # EAST parmas\r\n group1.add_argument(\"--det_east_score_thresh\", type=float, default=0.8)\r\n group1.add_argument(\"--det_east_cover_thresh\", type=float, default=0.1)\r\n group1.add_argument(\"--det_east_nms_thresh\", type=float, default=0.2)\r\n\r\n # SAST parmas\r\n group1.add_argument(\"--det_sast_score_thresh\", type=float, default=0.5)\r\n group1.add_argument(\"--det_sast_nms_thresh\", type=float, default=0.2)\r\n group1.add_argument(\"--det_sast_polygon\", type=bool, default=False)\r\n\r\n # params for text recognizer\r\n group1.add_argument(\"--rec_algorithm\", type=str, default='CRNN')\r\n group1.add_argument(\"--rec_model_dir\", type=str)\r\n group1.add_argument(\"--rec_image_shape\", type=str, default=\"3, 32, 320\")\r\n group1.add_argument(\"--rec_char_type\", type=str, default='ch')\r\n group1.add_argument(\"--rec_batch_num\", type=int, default=6)\r\n group1.add_argument(\"--max_text_length\", type=int, default=25)\r\n group1.add_argument(\r\n \"--rec_char_dict_path\",\r\n type=str,\r\n default=\"./ppocr/utils/ppocr_keys_v1.txt\")\r\n group1.add_argument(\"--use_space_char\", type=str2bool, default=True)\r\n group1.add_argument(\r\n \"--vis_font_path\", type=str, default=\"./doc/fonts/simfang.ttf\")\r\n group1.add_argument(\"--drop_score\", type=float, default=0.5)\r\n\r\n # params for text classifier\r\n group1.add_argument(\"--use_angle_cls\", type=str2bool, default=False)\r\n group1.add_argument(\"--cls_model_dir\", type=str)\r\n group1.add_argument(\"--cls_image_shape\", type=str, default=\"3, 48, 192\")\r\n group1.add_argument(\"--label_list\", type=list, default=['0', '180'])\r\n group1.add_argument(\"--cls_batch_num\", type=int, default=6)\r\n group1.add_argument(\"--cls_thresh\", type=float, default=0.9)\r\n\r\n group1.add_argument(\"--enable_mkldnn\", type=str2bool, default=False)\r\n group1.add_argument(\"--use_pdserving\", type=str2bool, default=False)\r\n\r\n return parser\r\n\r\n\r\n\r\ndef protest_train_parser(parser):\r\n group = parser.add_argument_group('protest_train')\r\n group.add_argument(\"--data_dir\",\r\n type=str,\r\n default = \"UCLA-protest\",\r\n help = \"directory path to UCLA-protest\",\r\n )\r\n group.add_argument(\"--cuda\",\r\n action = \"store_true\",\r\n help = \"use cuda?\",\r\n )\r\n group.add_argument(\"--workers\",\r\n type = int,\r\n default = 4,\r\n help = \"number of workers\",\r\n )\r\n group.add_argument(\"--batch_size\",\r\n type = int,\r\n default = 8,\r\n help = \"batch size\",\r\n )\r\n group.add_argument(\"--epochs\",\r\n type = int,\r\n default = 100,\r\n help = \"number of epochs\",\r\n )\r\n group.add_argument(\"--weight_decay\",\r\n type = float,\r\n default = 1e-4,\r\n help = \"weight decay\",\r\n )\r\n group.add_argument(\"--lr\",\r\n type = float,\r\n default = 0.01,\r\n help = \"learning rate\",\r\n )\r\n group.add_argument(\"--momentum\",\r\n type = float,\r\n default = 0.9,\r\n help = \"momentum\",\r\n )\r\n group.add_argument(\"--print_freq\",\r\n type = int,\r\n default = 10,\r\n help = \"print frequency\",\r\n )\r\n group.add_argument('--resume',\r\n default='', type=str, metavar='PATH',\r\n help='path to latest checkpoint (default: none)')\r\n group.add_argument('--change_lr',\r\n action = \"store_true\",\r\n help = \"Use this if you want to \\\r\n change learning rate when resuming\")\r\n group.add_argument('--start_epoch', default=0, type=int, metavar='N',\r\n help='manual epoch number (useful on restarts)')\r\n return parser\r\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
jellysquider/magenta
[ "0fc8188870f5d1c988b76dae434b21e58362516c", "0fc8188870f5d1c988b76dae434b21e58362516c", "0fc8188870f5d1c988b76dae434b21e58362516c" ]
[ "magenta/pipelines/chord_pipelines_test.py", "magenta/music/chords_lib_test.py", "magenta/music/sequences_lib_test.py" ]
[ "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for chord_pipelines.\"\"\"\n\n# internal imports\nimport tensorflow as tf\n\nfrom magenta.common import testing_lib as common_testing_lib\nfrom magenta.music import chords_lib\nfrom magenta.music import constants\nfrom magenta.music import sequences_lib\nfrom magenta.music import testing_lib\nfrom magenta.pipelines import chord_pipelines\n\nNO_CHORD = constants.NO_CHORD\n\n\nclass ChordPipelinesTest(tf.test.TestCase):\n\n def _unit_transform_test(self, unit, input_instance,\n expected_outputs):\n outputs = unit.transform(input_instance)\n self.assertTrue(isinstance(outputs, list))\n common_testing_lib.assert_set_equality(self, expected_outputs, outputs)\n self.assertEqual(unit.input_type, type(input_instance))\n if outputs:\n self.assertEqual(unit.output_type, type(outputs[0]))\n\n def testChordsExtractor(self):\n quantized_sequence = sequences_lib.QuantizedSequence()\n quantized_sequence.steps_per_quarter = 1\n testing_lib.add_quantized_chords_to_sequence(\n quantized_sequence, [('C', 2), ('Am', 4), ('F', 5)])\n quantized_sequence.total_steps = 8\n expected_events = [[NO_CHORD, NO_CHORD, 'C', 'C', 'Am', 'F', 'F', 'F']]\n expected_chord_progressions = []\n for events_list in expected_events:\n chords = chords_lib.ChordProgression(\n events_list, steps_per_quarter=1, steps_per_bar=4)\n expected_chord_progressions.append(chords)\n unit = chord_pipelines.ChordsExtractor(all_transpositions=False)\n self._unit_transform_test(unit, quantized_sequence,\n expected_chord_progressions)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for chords_lib.\"\"\"\n\n# internal imports\nimport tensorflow as tf\n\nfrom magenta.music import chord_symbols_lib\nfrom magenta.music import chords_lib\nfrom magenta.music import constants\nfrom magenta.music import melodies_lib\nfrom magenta.music import sequences_lib\nfrom magenta.music import testing_lib\n\nNO_CHORD = constants.NO_CHORD\n\n\nclass ChordsLibTest(tf.test.TestCase):\n\n def setUp(self):\n self.quantized_sequence = sequences_lib.QuantizedSequence()\n self.quantized_sequence.qpm = 60.0\n self.quantized_sequence.steps_per_quarter = 4\n\n def testTranspose(self):\n # Transpose ChordProgression with basic triads.\n events = ['Cm', 'F', 'B-', 'E-']\n chords = chords_lib.ChordProgression(events)\n chords.transpose(transpose_amount=7)\n expected = ['Gm', 'C', 'F', 'B-']\n self.assertEqual(expected, list(chords))\n\n # Transpose ChordProgression with more complex chords.\n events = ['Esus2', 'B13', 'A7/B', 'F#dim']\n chords = chords_lib.ChordProgression(events)\n chords.transpose(transpose_amount=-2)\n expected = ['Dsus2', 'A13', 'G7/A', 'Edim']\n self.assertEqual(expected, list(chords))\n\n # Transpose ChordProgression containing NO_CHORD.\n events = ['C', 'B-', NO_CHORD, 'F', 'C']\n chords = chords_lib.ChordProgression(events)\n chords.transpose(transpose_amount=4)\n expected = ['E', 'D', NO_CHORD, 'A', 'E']\n self.assertEqual(expected, list(chords))\n\n def testTransposeUnknownChordSymbol(self):\n # Attempt to transpose ChordProgression with unknown chord symbol.\n events = ['Cm', 'G7', 'P#13', 'F']\n chords = chords_lib.ChordProgression(events)\n with self.assertRaises(chord_symbols_lib.ChordSymbolException):\n chords.transpose(transpose_amount=-4)\n\n def testFromQuantizedSequence(self):\n testing_lib.add_quantized_chords_to_sequence(\n self.quantized_sequence,\n [('Am', 4), ('D7', 8), ('G13', 12), ('Csus', 14)])\n chords = chords_lib.ChordProgression()\n chords.from_quantized_sequence(\n self.quantized_sequence, start_step=0, end_step=16)\n expected = [NO_CHORD, NO_CHORD, NO_CHORD, NO_CHORD,\n 'Am', 'Am', 'Am', 'Am', 'D7', 'D7', 'D7', 'D7',\n 'G13', 'G13', 'Csus', 'Csus']\n self.assertEqual(expected, list(chords))\n\n def testFromQuantizedSequenceWithinSingleChord(self):\n testing_lib.add_quantized_chords_to_sequence(\n self.quantized_sequence, [('F', 0), ('Gm', 8)])\n chords = chords_lib.ChordProgression()\n chords.from_quantized_sequence(\n self.quantized_sequence, start_step=4, end_step=6)\n expected = ['F'] * 2\n self.assertEqual(expected, list(chords))\n\n def testFromQuantizedSequenceWithNoChords(self):\n chords = chords_lib.ChordProgression()\n chords.from_quantized_sequence(\n self.quantized_sequence, start_step=0, end_step=16)\n expected = [NO_CHORD] * 16\n self.assertEqual(expected, list(chords))\n\n def testFromQuantizedSequenceWithCoincidentChords(self):\n testing_lib.add_quantized_chords_to_sequence(\n self.quantized_sequence,\n [('Am', 4), ('D7', 8), ('G13', 12), ('Csus', 12)])\n chords = chords_lib.ChordProgression()\n with self.assertRaises(chords_lib.CoincidentChordsException):\n chords.from_quantized_sequence(\n self.quantized_sequence, start_step=0, end_step=16)\n\n def testExtractChords(self):\n self.quantized_sequence.steps_per_quarter = 1\n testing_lib.add_quantized_chords_to_sequence(\n self.quantized_sequence, [('C', 2), ('G7', 6), ('F', 8)])\n self.quantized_sequence.total_steps = 10\n chord_progressions, _ = chords_lib.extract_chords(self.quantized_sequence)\n expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7', 'F', 'F']]\n self.assertEqual(expected, [list(chords) for chords in chord_progressions])\n\n def testExtractChordsAllTranspositions(self):\n self.quantized_sequence.steps_per_quarter = 1\n testing_lib.add_quantized_chords_to_sequence(\n self.quantized_sequence, [('C', 1)])\n self.quantized_sequence.total_steps = 2\n chord_progressions, _ = chords_lib.extract_chords(self.quantized_sequence,\n all_transpositions=True)\n expected = zip([NO_CHORD] * 12, ['G-', 'G', 'A-', 'A', 'B-', 'B',\n 'C', 'D-', 'D', 'E-', 'E', 'F'])\n self.assertEqual(expected, [tuple(chords) for chords in chord_progressions])\n\n def testExtractChordsForMelodies(self):\n self.quantized_sequence.steps_per_quarter = 1\n testing_lib.add_quantized_track_to_sequence(\n self.quantized_sequence, 0,\n [(12, 100, 2, 4), (11, 1, 6, 11)])\n testing_lib.add_quantized_track_to_sequence(\n self.quantized_sequence, 1,\n [(12, 127, 2, 4), (14, 50, 6, 8),\n (50, 100, 33, 37), (52, 100, 34, 37)])\n testing_lib.add_quantized_chords_to_sequence(\n self.quantized_sequence,\n [('C', 2), ('G7', 6), ('Cmaj7', 33)])\n melodies, _ = melodies_lib.extract_melodies(\n self.quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,\n ignore_polyphonic_notes=True)\n chord_progressions, _ = chords_lib.extract_chords_for_melodies(\n self.quantized_sequence, melodies)\n expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C',\n 'G7', 'G7', 'G7', 'G7', 'G7'],\n [NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'],\n ['G7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']]\n self.assertEqual(expected, [list(chords) for chords in chord_progressions])\n\n def testExtractChordsForMelodiesCoincidentChords(self):\n self.quantized_sequence.steps_per_quarter = 1\n testing_lib.add_quantized_track_to_sequence(\n self.quantized_sequence, 0,\n [(12, 100, 2, 4), (11, 1, 6, 11)])\n testing_lib.add_quantized_track_to_sequence(\n self.quantized_sequence, 1,\n [(12, 127, 2, 4), (14, 50, 6, 8),\n (50, 100, 33, 37), (52, 100, 34, 37)])\n testing_lib.add_quantized_chords_to_sequence(\n self.quantized_sequence,\n [('C', 2), ('G7', 6), ('E13', 8), ('Cmaj7', 8)])\n melodies, _ = melodies_lib.extract_melodies(\n self.quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,\n ignore_polyphonic_notes=True)\n chord_progressions, stats = chords_lib.extract_chords_for_melodies(\n self.quantized_sequence, melodies)\n expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'],\n ['Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']]\n stats_dict = dict([(stat.name, stat) for stat in stats])\n self.assertIsNone(chord_progressions[0])\n self.assertEqual(expected,\n [list(chords) for chords in chord_progressions[1:]])\n self.assertEqual(stats_dict['coincident_chords'].count, 1)\n\n def testToSequence(self):\n chords = chords_lib.ChordProgression(\n [NO_CHORD, 'C7', 'C7', 'C7', 'C7', 'Am7b5', 'F6', 'F6', NO_CHORD])\n sequence = chords.to_sequence(sequence_start_time=2, qpm=60.0)\n\n self.assertProtoEquals(\n 'ticks_per_quarter: 220 '\n 'tempos < qpm: 60.0 > '\n 'text_annotations < '\n ' text: \"C7\" time: 2.25 annotation_type: CHORD_SYMBOL '\n '> '\n 'text_annotations < '\n ' text: \"Am7b5\" time: 3.25 annotation_type: CHORD_SYMBOL '\n '> '\n 'text_annotations < '\n ' text: \"F6\" time: 3.5 annotation_type: CHORD_SYMBOL '\n '> '\n 'text_annotations < '\n ' text: \"N.C.\" time: 4.0 annotation_type: CHORD_SYMBOL '\n '> ',\n sequence)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for sequences_lib.\"\"\"\n\nimport copy\n\n# internal imports\nimport tensorflow as tf\n\nfrom magenta.common import testing_lib as common_testing_lib\nfrom magenta.music import sequences_lib\nfrom magenta.music import testing_lib\nfrom magenta.protobuf import music_pb2\n\n\nclass SequencesLibTest(tf.test.TestCase):\n\n def setUp(self):\n self.steps_per_quarter = 4\n self.note_sequence = common_testing_lib.parse_test_proto(\n music_pb2.NoteSequence,\n \"\"\"\n time_signatures: {\n numerator: 4\n denominator: 4}\n tempos: {\n qpm: 60}\"\"\")\n self.expected_quantized_sequence = sequences_lib.QuantizedSequence()\n self.expected_quantized_sequence.qpm = 60.0\n self.expected_quantized_sequence.steps_per_quarter = self.steps_per_quarter\n\n def testExtractSubsequence(self):\n sequence = copy.copy(self.note_sequence)\n testing_lib.add_track_to_sequence(\n sequence, 0,\n [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),\n (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])\n expected_subsequence = copy.copy(self.note_sequence)\n testing_lib.add_track_to_sequence(\n expected_subsequence, 0,\n [(40, 45, 2.50, 3.50), (55, 120, 4.0, 4.01)])\n expected_subsequence.total_time = 4.75\n\n subsequence = sequences_lib.extract_subsequence(sequence, 2.5, 4.75)\n self.assertProtoEquals(expected_subsequence, subsequence)\n\n def testEq(self):\n left_hand = sequences_lib.QuantizedSequence()\n left_hand.qpm = 123.0\n left_hand.steps_per_quarter = 7\n left_hand.time_signature = sequences_lib.QuantizedSequence.TimeSignature(\n numerator=7, denominator=8)\n testing_lib.add_quantized_track_to_sequence(\n left_hand, 0,\n [(12, 100, 0, 40), (11, 100, 1, 2)])\n testing_lib.add_quantized_track_to_sequence(\n left_hand, 2,\n [(55, 100, 4, 6), (14, 120, 4, 10)])\n testing_lib.add_quantized_track_to_sequence(\n left_hand, 3,\n [(1, 10, 0, 6), (2, 50, 20, 21), (0, 101, 17, 21)])\n testing_lib.add_quantized_chords_to_sequence(\n left_hand, [('Cmaj7', 1), ('G9', 2)])\n right_hand = sequences_lib.QuantizedSequence()\n right_hand.qpm = 123.0\n right_hand.steps_per_quarter = 7\n right_hand.time_signature = sequences_lib.QuantizedSequence.TimeSignature(\n numerator=7, denominator=8)\n testing_lib.add_quantized_track_to_sequence(\n right_hand, 0,\n [(11, 100, 1, 2), (12, 100, 0, 40)])\n testing_lib.add_quantized_track_to_sequence(\n right_hand, 2,\n [(14, 120, 4, 10), (55, 100, 4, 6)])\n testing_lib.add_quantized_track_to_sequence(\n right_hand, 3,\n [(0, 101, 17, 21), (2, 50, 20, 21), (1, 10, 0, 6)])\n testing_lib.add_quantized_chords_to_sequence(\n right_hand, [('G9', 2), ('Cmaj7', 1)])\n self.assertEqual(left_hand, right_hand)\n\n def testNotEq(self):\n left_hand = sequences_lib.QuantizedSequence()\n left_hand.bpm = 123.0\n left_hand.steps_per_beat = 7\n left_hand.time_signature = sequences_lib.QuantizedSequence.TimeSignature(\n numerator=7, denominator=8)\n testing_lib.add_quantized_track_to_sequence(\n left_hand, 0,\n [(12, 100, 0, 40), (11, 100, 1, 2)])\n testing_lib.add_quantized_track_to_sequence(\n left_hand, 2,\n [(55, 100, 4, 6), (15, 120, 4, 10)])\n testing_lib.add_quantized_track_to_sequence(\n left_hand, 3,\n [(1, 10, 0, 6), (2, 50, 20, 21), (0, 101, 17, 21)])\n testing_lib.add_quantized_chords_to_sequence(\n left_hand, [('Cmaj7', 1), ('G9', 2)])\n right_hand = sequences_lib.QuantizedSequence()\n right_hand.bpm = 123.0\n right_hand.steps_per_beat = 7\n right_hand.time_signature = sequences_lib.QuantizedSequence.TimeSignature(\n numerator=7, denominator=8)\n testing_lib.add_quantized_track_to_sequence(\n right_hand, 0,\n [(11, 100, 1, 2), (12, 100, 0, 40)])\n testing_lib.add_quantized_track_to_sequence(\n right_hand, 2,\n [(14, 120, 4, 10), (55, 100, 4, 6)])\n testing_lib.add_quantized_track_to_sequence(\n right_hand, 3,\n [(0, 101, 17, 21), (2, 50, 20, 21), (1, 10, 0, 6)])\n testing_lib.add_quantized_chords_to_sequence(\n right_hand, [('G9', 2), ('C7', 1)])\n self.assertNotEqual(left_hand, right_hand)\n\n def testFromNoteSequence(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),\n (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])\n testing_lib.add_chords_to_sequence(\n self.note_sequence,\n [('B7', 0.22), ('Em9', 4.0)])\n testing_lib.add_quantized_track_to_sequence(\n self.expected_quantized_sequence, 0,\n [(12, 100, 0, 40), (11, 55, 1, 2), (40, 45, 10, 14),\n (55, 120, 16, 17), (52, 99, 19, 20)])\n testing_lib.add_quantized_chords_to_sequence(\n self.expected_quantized_sequence,\n [('B7', 1), ('Em9', 16)])\n quantized = sequences_lib.QuantizedSequence()\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n self.assertEqual(self.expected_quantized_sequence, quantized)\n\n def testFromNoteSequence_TimeSignatureChange(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),\n (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])\n del self.note_sequence.time_signatures[:]\n quantized = sequences_lib.QuantizedSequence()\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n # Single time signature.\n self.note_sequence.time_signatures.add(numerator=4, denominator=4, time=0)\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n # Multiple time signatures with no change.\n self.note_sequence.time_signatures.add(numerator=4, denominator=4, time=1)\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n # Time signature change.\n self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=2)\n with self.assertRaises(sequences_lib.MultipleTimeSignatureException):\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n def testFromNoteSequence_ImplicitTimeSignatureChange(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),\n (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])\n del self.note_sequence.time_signatures[:]\n quantized = sequences_lib.QuantizedSequence()\n\n # No time signature.\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n # Implicit time signature change.\n self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=2)\n with self.assertRaises(sequences_lib.MultipleTimeSignatureException):\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n def testFromNoteSequence_NoImplicitTimeSignatureChangeOutOfOrder(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),\n (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])\n del self.note_sequence.time_signatures[:]\n quantized = sequences_lib.QuantizedSequence()\n\n # No time signature.\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n # No implicit time signature change, but time signatures are added out of\n # order.\n self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=2)\n self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=0)\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n def testFromNoteSequence_TempoChange(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),\n (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])\n del self.note_sequence.tempos[:]\n quantized = sequences_lib.QuantizedSequence()\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n # Single tempo.\n self.note_sequence.tempos.add(qpm=60, time=0)\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n # Multiple tempos with no change.\n self.note_sequence.tempos.add(qpm=60, time=1)\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n # Tempo change.\n self.note_sequence.tempos.add(qpm=120, time=2)\n with self.assertRaises(sequences_lib.MultipleTempoException):\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n def testFromNoteSequence_ImplicitTempoChange(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),\n (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])\n del self.note_sequence.tempos[:]\n quantized = sequences_lib.QuantizedSequence()\n\n # No tempo.\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n # Implicit tempo change.\n self.note_sequence.tempos.add(qpm=60, time=2)\n with self.assertRaises(sequences_lib.MultipleTempoException):\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n def testFromNoteSequence_NoImplicitTempoChangeOutOfOrder(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),\n (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])\n del self.note_sequence.tempos[:]\n quantized = sequences_lib.QuantizedSequence()\n\n # No tempo.\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n # No implicit tempo change, but tempos are added out of order.\n self.note_sequence.tempos.add(qpm=60, time=2)\n self.note_sequence.tempos.add(qpm=60, time=0)\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n def testRounding(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 1,\n [(12, 100, 0.01, 0.24), (11, 100, 0.22, 0.55), (40, 100, 0.50, 0.75),\n (41, 100, 0.689, 1.18), (44, 100, 1.19, 1.69), (55, 100, 4.0, 4.01)])\n testing_lib.add_quantized_track_to_sequence(\n self.expected_quantized_sequence, 1,\n [(12, 100, 0, 1), (11, 100, 1, 2), (40, 100, 2, 3),\n (41, 100, 3, 5), (44, 100, 5, 7), (55, 100, 16, 17)])\n quantized = sequences_lib.QuantizedSequence()\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n self.assertEqual(self.expected_quantized_sequence, quantized)\n\n def testMultiTrack(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(12, 100, 1.0, 4.0), (19, 100, 0.95, 3.0)])\n testing_lib.add_track_to_sequence(\n self.note_sequence, 3,\n [(12, 100, 1.0, 4.0), (19, 100, 2.0, 5.0)])\n testing_lib.add_track_to_sequence(\n self.note_sequence, 7,\n [(12, 100, 1.0, 5.0), (19, 100, 2.0, 4.0), (24, 100, 3.0, 3.5)])\n testing_lib.add_quantized_track_to_sequence(\n self.expected_quantized_sequence, 0,\n [(12, 100, 4, 16), (19, 100, 4, 12)])\n testing_lib.add_quantized_track_to_sequence(\n self.expected_quantized_sequence, 3,\n [(12, 100, 4, 16), (19, 100, 8, 20)])\n testing_lib.add_quantized_track_to_sequence(\n self.expected_quantized_sequence, 7,\n [(12, 100, 4, 20), (19, 100, 8, 16), (24, 100, 12, 14)])\n quantized = sequences_lib.QuantizedSequence()\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n self.assertEqual(self.expected_quantized_sequence, quantized)\n\n def testStepsPerBar(self):\n quantized = sequences_lib.QuantizedSequence()\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n self.assertEqual(16, quantized.steps_per_bar())\n\n self.note_sequence.time_signatures[0].numerator = 6\n self.note_sequence.time_signatures[0].denominator = 8\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n self.assertEqual(12.0, quantized.steps_per_bar())\n\n def testFilterDrums(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(12, 100, 1.0, 4.0), (19, 100, 0.95, 3.0)])\n testing_lib.add_track_to_sequence(\n self.note_sequence, 3,\n [(12, 100, 1.0, 4.0), (19, 100, 2.0, 5.0)])\n\n # Make instrument 0 a drum.\n for note in self.note_sequence.notes:\n if note.instrument == 0:\n note.is_drum = True\n\n testing_lib.add_quantized_track_to_sequence(\n self.expected_quantized_sequence, 3,\n [(12, 100, 4, 16), (19, 100, 8, 20)])\n\n quantized = sequences_lib.QuantizedSequence()\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n self.assertEqual(self.expected_quantized_sequence, quantized)\n\n def testDeepcopy(self):\n quantized = sequences_lib.QuantizedSequence()\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),\n (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])\n quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)\n\n quantized_copy = copy.deepcopy(quantized)\n self.assertEqual(quantized, quantized_copy)\n\n testing_lib.add_quantized_track_to_sequence(\n quantized, 1,\n [(12, 100, 4, 20), (19, 100, 8, 16), (24, 100, 12, 14)])\n\n self.assertNotEqual(quantized, quantized_copy)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ], [ "tensorflow.test.main" ], [ "tensorflow.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eur-synclab/bidsification
[ "15bbb1586e5c6bd20371776f66623bedfc99075e" ]
[ "bidsification_script.py" ]
[ "import os\nimport shutil\nimport sys\nimport glob\nfrom pathlib import Path\nimport pandas as pd\n\n# -----------------\n# STEP 0: variables\n# -----------------\n\nroot_dir = '/exports/fsw/Bendlab/SamenUniek'\nraw_sessions = ['MCC_ses01-lab']\nbids_sessions = ['ses-w01lab']\nfile_type = ['3DT1', 'SNAT1', 'SNAT2', 'SNAT3', 'PCG1', 'PCG2', 'PCG3', 'rsfMRI', 'hires', 'B0-map_RS', 'B0-map', 'B0-map', 'B0-map', 'jones30_A', 'jones30_P']\nnew_file_type = ['T1mri', 'bold_SNAT1', 'bold_SNAT2', 'bold_SNAT3', 'bold_PCG1', 'bold_PCG2', 'bold_PCG3', 'bold_rsfmr', 'T2str', 'B0RS', 'Bzero1', 'Bzero2', 'Bzero3', 'DTIap', 'DTIpa', 'unknown_type', 'log']\ncols = ['participant','nr_files'] + new_file_type\nprefix = 'sub-mcc'\nconversion_log_dir = os.path.join(root_dir, 'conversion_logs')\n# Create top-level pseudobids directory\npseudobids_dir = os.path.join(root_dir, 'pseudobids')\nif not os.path.exists(pseudobids_dir):\n os.mkdir(pseudobids_dir)\n\n# --------------------------------\n# STEP 1: Loop through sessions, participants:\n# - rename PAR and REC files (in place)\n# - copy participant files to new pseudobids directory structure\n# --------------------------------\nfor i, session in enumerate(raw_sessions):\n raw_data_dir = os.path.join(root_dir, session)\n print(raw_data_dir)\n\n # Log file\n conversion_log_fn = os.path.join(conversion_log_dir, session + '_conversion_log.csv')\n # If the log file already exists, read contents into dataframe. If not, create dataframe.\n if os.path.isfile(conversion_log_fn):\n df = pd.read_csv(conversion_log_fn)\n else:\n df = pd.DataFrame(columns=cols)\n \n # Read directory names from raw data foler, write to text file\n for p, participant in enumerate(os.listdir(raw_data_dir)):\n\n # Check in log-file if conversion has already been done.\n # If done, skip.\n if participant in df['participant'].tolist():\n print(f\"Participant {participant} already converted to pseudobids. Skipping...\")\n continue\n\n # Access participant_dir, continue if it exists\n participant_dir = os.path.join(raw_data_dir, participant)\n first_b0_found = False\n b0_found = 0\n fsl_found = False\n if os.path.isdir(participant_dir):\n print(f\"{str(p).zfill(3)}: {participant}\")\n\n all_files = [name for name in os.listdir(participant_dir) if os.path.isfile(os.path.join(participant_dir, name))]\n\n new_row = [None] * len(cols)\n new_row[0] = participant\n new_row[1] = len(all_files)\n\n all_codes = [('0' + file[11:-4] if len(file[11:-4]) < 4 else file[11:-4]) for file in all_files] # assumes unique codes\n all_codes_sorted = sorted(all_codes)\n all_codes_sorted = list(dict.fromkeys(all_codes_sorted))\n\n for j, code in enumerate(all_codes_sorted):\n if 'FSL' in code:\n new_row[-2] = code\n continue\n if code[0] == '0':\n code = code[1:]\n fns = glob.glob(os.path.join(participant_dir, '*_' + code + '.PAR'))\n if len(fns) > 1:\n if new_row[-1] is not None:\n new_row[-1] = f\"{new_row[-1]} | WARNING: found {len(fns)} files with pattern {code}.PAR for participant {participant}. Using first one...\"\n else:\n new_row[-1] = f\"WARNING: found {len(fns)} files with pattern {code}.PAR for participant {participant}. Using first one...\"\n print(new_row[-1])\n continue\n elif len(fns) == 0:\n if new_row[-1] is not None:\n new_row[-1] = f\"{new_row[-1]} | ERROR: found NO files with pattern {code}.PAR for participant {participant}. Ignoring this file...\"\n else:\n new_row[-1] = f\"ERROR: found NO files with pattern {code}.PAR for participant {participant}. Ignoring this file...\"\n print(new_row[-1])\n continue\n name = fns[0]\n # open and read the protecolline needed for renaming\n with open(name, 'r') as f:\n protocolline = f.readlines()\n \n line = protocolline[13]\n # Find the first value in the file_type list that exists in protocolline 13 (old identifier)\n match = next((x for x in file_type if x in line), False)\n # Find the index in the new_file_type list that corresponds to the match (new identifier)\n if not match:\n if new_row[-1] is not None:\n new_row[-1] = f\"{new_row[-1]} | ERROR: no known file type found in ({code}.PAR) file for participant {participant}. Ignoring this file...\"\n else:\n new_row[-1] = f\"ERROR: no known file type found in ({code}.PAR) file for participant {participant}. Ignoring this file...\"\n continue\n elif match == 'B0-map':\n if not first_b0_found:\n first_b0_found = True\n b0_found = b0_found + 1\n idx = 9 + b0_found\n if new_row[-1] is not None:\n new_row[-1] = f\"{new_row[-1]} | NOTE: B0 map found ({code}.PAR) for participant {participant}.\"\n else:\n new_row[-1] = f\"NOTE: B0 map found ({code}.PAR) for participant {participant}.\"\n print(new_row[-1])\n else:\n idx = file_type.index(match)\n \n new_row[idx+2] = code\n\n # Rename PAR file, if it doesn't already exist\n if new_file_type[idx] in name:\n print('WARNING: renamed file ' + name + ' already exists in the folder! This file will therefore be skipped!')\n else:\n rename = name[:-4] + '_' + new_file_type[idx] + name[-4:]\n os.rename(name, rename)\n # Rename REC file, if it doesn't already exist\n nameREC = name[:-4] + '.REC'\n if os.path.isfile(nameREC):\n renameREC = name[:-4] + '_' + new_file_type[idx] + '.REC'\n # If the renameREC file does not yet exist, proceed with renaming\n if not os.path.isfile(renameREC):\n os.rename(nameREC, renameREC)\n else:\n print('WARNING: file ' + renameREC + ' already exists in the folder! This file will therefore be skipped!')\n else:\n print('ERROR: corresponding REC file not found for: ' + name)\n\n # Create bids-like directory structure for participant\n sub_dir = os.path.join(pseudobids_dir, prefix + str(participant[4:10]))\n if not os.path.exists(sub_dir):\n os.mkdir(sub_dir)\n # Session-level directory\n session_dir = os.path.join(sub_dir, str(bids_sessions[i]))\n # Copy renamed raw data to pseudobids directory\n if os.path.exists(session_dir):\n all_files_to_copy = [fn for fn in os.listdir(participant_dir) if os.path.isfile(os.path.join(participant_dir, fn))]\n for file_to_copy in all_files_to_copy:\n if not 'FSL' in file_to_copy:\n shutil.copy2(os.path.join(participant_dir, file_to_copy), session_dir)\n else:\n shutil.copytree(participant_dir, session_dir, ignore=shutil.ignore_patterns('FSL*'))\n\n # Add participant info to log file\n df_new_row = pd.DataFrame([new_row], columns=cols)\n df = df.append(df_new_row, ignore_index=True)\n df.to_csv(conversion_log_fn)\n else:\n print('Error: participant directory not found for ' + participant)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
Data-Laboratory/WorkExamples
[ "27e58207e664da7813673e6792c0c30c0a5bf74c", "27e58207e664da7813673e6792c0c30c0a5bf74c", "27e58207e664da7813673e6792c0c30c0a5bf74c", "27e58207e664da7813673e6792c0c30c0a5bf74c", "27e58207e664da7813673e6792c0c30c0a5bf74c" ]
[ "195_xgboost_for_image_classification_using_VGG16.py", "125_126_GAN_predict_mnist.py", "50_K_means_intro.py", "166b_Intro_to_time_series_Forecasting_using_LSTM_and_TimeseriesGenerator.py", "204_train_simple_unet_for_mitochondria.py" ]
[ "#Ref: Sreenivas Sarwar Anik\n\n\"\"\"\n\nIMAGE CLASSIFICATION USING XGBOOST by extracting features using VGG16 imagenet pretrained weights.\n\nThis code explains the process of using XGBoost for image classification\nusing pretrained weights (VGG16) as feature extractors.\n\nCode last tested on: \n Tensorflow: 2.2.0\n Keras: 2.3.1\n Python: 3.7\n\npip install xgboost \n \nXGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1,\n importance_type='gain', interaction_constraints='',\n learning_rate=0.300000012, max_delta_step=0, max_depth=6,\n min_child_weight=1, missing=nan, monotone_constraints='()',\n n_estimators=100, n_jobs=0, num_parallel_tree=1,\n objective='multi:softprob', random_state=0, reg_alpha=0,\n reg_lambda=1, scale_pos_weight=None, subsample=1,\n tree_method='exact', validate_parameters=1, verbosity=None) \n \n\"\"\"\n\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport glob\nimport cv2\n\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\nfrom keras.layers.normalization import BatchNormalization\nimport os\nimport seaborn as sns\nfrom keras.applications.vgg16 import VGG16\n\n\n# Read input images and assign labels based on folder names\nprint(os.listdir(\"images/classification/\"))\n\nSIZE = 256 #Resize images\n\n#Capture training data and labels into respective lists\ntrain_images = []\ntrain_labels = [] \n\nfor directory_path in glob.glob(\"images/classification/train/*\"):\n label = directory_path.split(\"\\\\\")[-1]\n print(label)\n for img_path in glob.glob(os.path.join(directory_path, \"*.jpg\")):\n print(img_path)\n img = cv2.imread(img_path, cv2.IMREAD_COLOR) \n img = cv2.resize(img, (SIZE, SIZE))\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n train_images.append(img)\n train_labels.append(label)\n\n#Convert lists to arrays \ntrain_images = np.array(train_images)\ntrain_labels = np.array(train_labels)\n\n\n# Capture test/validation data and labels into respective lists\n\ntest_images = []\ntest_labels = [] \nfor directory_path in glob.glob(\"images/classification/validation/*\"):\n fruit_label = directory_path.split(\"\\\\\")[-1]\n for img_path in glob.glob(os.path.join(directory_path, \"*.jpg\")):\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (SIZE, SIZE))\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n test_images.append(img)\n test_labels.append(fruit_label)\n\n#Convert lists to arrays \ntest_images = np.array(test_images)\ntest_labels = np.array(test_labels)\n\n#Encode labels from text to integers.\nfrom sklearn import preprocessing\nle = preprocessing.LabelEncoder()\nle.fit(test_labels)\ntest_labels_encoded = le.transform(test_labels)\nle.fit(train_labels)\ntrain_labels_encoded = le.transform(train_labels)\n\n#Split data into test and train datasets (already split but assigning to meaningful convention)\nx_train, y_train, x_test, y_test = train_images, train_labels_encoded, test_images, test_labels_encoded\n\n###################################################################\n# Normalize pixel values to between 0 and 1\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\n#One hot encode y values for neural network. \n# from keras.utils import to_categorical\n# y_train_one_hot = to_categorical(y_train)\n# y_test_one_hot = to_categorical(y_test)\n\n#############################\n#Load model wothout classifier/fully connected layers\nVGG_model = VGG16(weights='imagenet', include_top=False, input_shape=(SIZE, SIZE, 3))\n\n#Make loaded layers as non-trainable. This is important as we want to work with pre-trained weights\nfor layer in VGG_model.layers:\n\tlayer.trainable = False\n \nVGG_model.summary() #Trainable parameters will be 0\n\n\n#Now, let us use features from convolutional network for RF\nfeature_extractor=VGG_model.predict(x_train)\n\nfeatures = feature_extractor.reshape(feature_extractor.shape[0], -1)\n\nX_for_training = features #This is our X input to RF\n\n#RANDOM FOREST\n#from sklearn.ensemble import RandomForestClassifier\n#model = RandomForestClassifier(n_estimators = 50, random_state = 42)\n\n# Train the model on training data\n\n\n#XGBOOST\nimport xgboost as xgb\nmodel = xgb.XGBClassifier()\nmodel.fit(X_for_training, y_train) #For sklearn no one hot encoding\n\n#Send test data through same feature extractor process\nX_test_feature = VGG_model.predict(x_test)\nX_test_features = X_test_feature.reshape(X_test_feature.shape[0], -1)\n\n#Now predict using the trained RF model. \nprediction = model.predict(X_test_features)\n#Inverse le transform to get original label back. \nprediction = le.inverse_transform(prediction)\n\n#Print overall accuracy\nfrom sklearn import metrics\nprint (\"Accuracy = \", metrics.accuracy_score(test_labels, prediction))\n\n#Confusion Matrix - verify accuracy of each class\nfrom sklearn.metrics import confusion_matrix\n\ncm = confusion_matrix(test_labels, prediction)\n#print(cm)\nsns.heatmap(cm, annot=True)\n\n#Check results on a few select images\nn=np.random.randint(0, x_test.shape[0])\nimg = x_test[n]\nplt.imshow(img)\ninput_img = np.expand_dims(img, axis=0) #Expand dims so the input is (num images, x, y, c)\ninput_img_feature=VGG_model.predict(input_img)\ninput_img_features=input_img_feature.reshape(input_img_feature.shape[0], -1)\nprediction = model.predict(input_img_features)[0] \nprediction = le.inverse_transform([prediction]) #Reverse the label encoder to original name\nprint(\"The prediction for this image is: \", prediction)\nprint(\"The actual label for this image is: \", test_labels[n])\n\n\n\n\n\n\n\n", "#Ref: Sreenivas Sarwar Anik\n\n\"\"\"\nReferences from the video: \nhttps://www.thispersondoesnotexist.com/\nhttp://www.wisdom.weizmann.ac.il/~vision/courses/2018_2/Advanced_Topics_in_Computer_Vision/files/DomainTransfer.pdf\n\"\"\"\n\n#FOr single image\n# example of generating an image for a specific point in the latent space\nfrom keras.models import load_model\nfrom numpy import asarray\nfrom matplotlib import pyplot\nfrom numpy.random import randn\n\n# load model\nmodel = load_model('generator_model_100K.h5')\n\n#To create same image, suppy same vector each time\n# all 0s\n#vector = asarray([[0. for _ in range(100)]]) #Vector of all zeros\n\n#To create random images each time...\nvector = randn(100) #Vector of random numbers (creates a column, need to reshape)\nvector = vector.reshape(1, 100)\n\n# generate image\nX = model.predict(vector)\n\n# plot the result\npyplot.imshow(X[0, :, :, 0], cmap='gray_r')\npyplot.show()\n\n\"\"\"\n#Uncomment to run this part of the code....\n##############################################\n\n# example of loading the generator model and generating images\nfrom keras.models import load_model\nfrom numpy.random import randn\nfrom matplotlib import pyplot as plt\n\n\n# generate points in latent space as input for the generator\ndef generate_latent_points(latent_dim, n_samples):\n\t# generate points in the latent space\n\tx_input = randn(latent_dim * n_samples)\n\t# reshape into a batch of inputs for the network\n\tx_input = x_input.reshape(n_samples, latent_dim)\n\treturn x_input\n\n# create and save a plot of generated images (reversed grayscale)\ndef save_plot(examples, n):\n\t# plot images\n\tfor i in range(n * n):\n\t\t# define subplot\n\t\tplt.subplot(n, n, 1 + i)\n\t\t# turn off axis\n\t\tplt.axis('off')\n\t\t# plot raw pixel data\n\t\tplt.imshow(examples[i, :, :, 0], cmap='gray_r')\n\tplt.show()\n\n# load model\nmodel = load_model('generator_model_100K.h5')\n# generate images\n#Generate 16 images, each image provide a vector of size 100 as input\nlatent_points = generate_latent_points(100, 16) \n# generate images\nX = model.predict(latent_points)\n# plot the result\nsave_plot(X, 4) #Plot 4x4 grid (Change to 5 if generating 25 images)\n\n\"\"\"\n", "#Ref: Sreenivas Sarwar Anik\n\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\ndf=pd.read_excel('other_files/K_Means.xlsx')\nprint(df.head())\n\nimport seaborn as sns\nsns.regplot(x=df['X'], y=df['Y'], fit_reg=False)\n\n\nfrom sklearn.cluster import KMeans\n\n#https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html\nkmeans = KMeans(n_clusters=3, init='k-means++', max_iter=300, n_init=10, random_state=0)\n\nmodel = kmeans.fit(df)\n\npredicted_values = kmeans.predict(df)\n\n\nplt.scatter(df['X'], df['Y'], c=predicted_values, s=50, cmap='viridis')\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=200, c='black', alpha=0.5)\nplt.show()\n", "#Ref: Sreenivas Sarwar Anik\n\"\"\"\nDataset from: https://www.kaggle.com/rakannimer/air-passengers\nInternational Airline Passengers prediction problem.\nThis is a problem where, given a year and a month, the task is to predict \nthe number of international airline passengers in units of 1,000. \nThe data ranges from January 1949 to December 1960, or 12 years, with 144 observations.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pandas import read_csv\nimport math\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, SimpleRNN, LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\n# load the dataset\ndataframe = read_csv('data/AirPassengers.csv', usecols=[1])\nplt.plot(dataframe)\n\n#Convert pandas dataframe to numpy array\ndataset = dataframe.values\ndataset = dataset.astype('float32') #COnvert values to float\n\n# Normalization is optional but recommended for neural network as certain \n# activation functions are sensitive to magnitude of numbers. \n# normalize the dataset\nscaler = MinMaxScaler(feature_range=(0, 1)) #Also try QuantileTransformer\ndataset = scaler.fit_transform(dataset)\n\n#We cannot use random way of splitting dataset into train and test as\n#the sequence of events is important for time series.\n#So let us take first 60% values for train and the remaining 1/3 for testing\n# split into train and test sets\ntrain_size = int(len(dataset) * 0.66)\ntest_size = len(dataset) - train_size\ntrain, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]\n\n\n#Use TimeseriesGenerator to organize training data into the right format\n#We can use a generator instead......\nfrom keras.preprocessing.sequence import TimeseriesGenerator # Generates batches for sequence data\nseq_size = length = 10 \nbatch_size = 1\ntrain_generator = TimeseriesGenerator(train,train,length=length,batch_size=batch_size)\nprint(\"Total number of samples in the original training data = \", len(train)) # 95\nprint(\"Total number of samples in the generated data = \", len(train_generator)) # 55\n#With length 40 it generated 55 samples, each of length 40 (by using data of length 95)\n\n# print a couple of samples... \nx, y = train_generator[0]\n\n#Also generate validation data\nvalidation_generator = TimeseriesGenerator(test, test, length=length ,batch_size=batch_size)\n\n\n\n#Input dimensions are... (N x seq_size)\nnum_features = 1 #Univariate example\n\n#############################################\n#Check SimpleRNN before moving on to LSTM\n# print('Build SimpleRNN model...')\n# # create and fit pure, simple RNN\n# model = Sequential()\n# model.add(SimpleRNN(64, input_shape=(length, num_features), activation='relu')) #12\n# model.add(Dense(1))\n# model.compile(loss='mean_squared_error', optimizer='adam', metrics = ['acc'])\n# print(model.summary()) \n\n##################################################\n##########################################################\n#LSTM single layer with 50 units\n# model = Sequential()\n\n# model.add(LSTM(50, input_shape=(length, num_features)))\n# model.add(Dense(1))\n# model.compile(optimizer = 'adam', loss='mse')\n# ###############################################\n# ######################################################\n#Stacked LSTM with 1 hidden dense layer\n# reshape input to be [samples, time steps, features]\n#trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\n#testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))\n#\nmodel = Sequential()\nmodel.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(length, num_features)))\nmodel.add(LSTM(50, activation='relu'))\n#model.add(Dense(32))\nmodel.add(Dense(1))\nmodel.compile(optimizer='adam', loss='mean_squared_error')\n\nmodel.summary()\nprint('Train...')\n###############################################\n\n#Bidirectional LSTM\n# reshape input to be [samples, time steps, features]\n#trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\n#testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))\n#\n##For some sequence forecasting problems we may need LSTM to learn\n## sequence in both forward and backward directions\n#from keras.layers import Bidirectional\n#model = Sequential()\n#model.add(Bidirectional(LSTM(50, activation='relu'), input_shape=(None, seq_size)))\n#model.add(Dense(1))\n#model.compile(optimizer='adam', loss='mean_squared_error')\n#model.summary()\n#print('Train...')\n\n#########################################################\n#ConvLSTM\n#The layer expects input as a sequence of two-dimensional images, \n#therefore the shape of input data must be: [samples, timesteps, rows, columns, features]\n\n# trainX = trainX.reshape((trainX.shape[0], 1, 1, 1, seq_size))\n# testX = testX.reshape((testX.shape[0], 1, 1, 1, seq_size))\n\n# model = Sequential()\n# model.add(ConvLSTM2D(filters=64, kernel_size=(1,1), activation='relu', input_shape=(1, 1, 1, seq_size)))\n# model.add(Flatten())\n# model.add(Dense(32))\n# model.add(Dense(1))\n# model.compile(optimizer='adam', loss='mean_squared_error')\n# model.summary()\n#print('Train...')\n\n\n\n#########################################\nmodel.fit_generator(generator=train_generator, verbose=2, epochs=100, validation_data=validation_generator)\n\n#############################################\ntrainPredict = model.predict(train_generator)\ntestPredict = model.predict(validation_generator)\n\ntrainPredict = scaler.inverse_transform(trainPredict)\ntrainY_inverse = scaler.inverse_transform(train)\ntestPredict = scaler.inverse_transform(testPredict)\ntestY_inverse = scaler.inverse_transform(test)\n\n\n# calculate root mean squared error\ntrainScore = math.sqrt(mean_squared_error(trainY_inverse[length:], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\n\ntestScore = math.sqrt(mean_squared_error(testY_inverse[length:], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))\n\n# shift train predictions for plotting\n#we must shift the predictions so that they align on the x-axis with the original dataset. \ntrainPredictPlot = np.empty_like(dataset)\ntrainPredictPlot[:, :] = np.nan\ntrainPredictPlot[length:len(trainPredict)+length, :] = trainPredict\n\n# shift test predictions for plotting\ntestPredictPlot = np.empty_like(dataset)\ntestPredictPlot[:, :] = np.nan\n#testPredictPlot[len(trainPredict)+(seq_size*2)-1:len(dataset)-1, :] = testPredict\ntestPredictPlot[len(train)+(length)-1:len(dataset)-1, :] = testPredict\n\n\n# plot baseline and predictions\nplt.plot(scaler.inverse_transform(dataset))\nplt.plot(trainPredictPlot)\nplt.plot(testPredictPlot)\nplt.show()\n\n\n\n\n\n", "#Ref: Sreenivas Sarwar Anik\n\n\"\"\"\n#Ref: Sreenivas Sarwar Anik\n\nTraining and testing for semantic segmentation (Unet) of mitochondria\nUses standard Unet framework with no tricks!\n\nDataset info: Electron microscopy (EM) dataset from\nhttps://www.epfl.ch/labs/cvlab/data/data-em/\n\nPatches of 256x256 from images and labels \nhave been extracted (via separate program) and saved to disk. \n\n\nThis code uses 256x256 images/masks.\n\nTo annotate images and generate labels, you can use APEER (for free):\nwww.apeer.com \n\"\"\"\n\nfrom simple_unet_model import simple_unet_model #Use normal unet model\nfrom keras.utils import normalize\nimport os\nimport cv2\nfrom PIL import Image\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\nimage_directory = 'data/generated_patches/images/'\nmask_directory = 'data/generated_patches/masks/'\n\n\nSIZE = 256\nimage_dataset = [] #Many ways to handle data, you can use pandas. Here, we are using a list format. \nmask_dataset = [] #Place holders to define add labels. We will add 0 to all parasitized images and 1 to uninfected.\n\nimages = os.listdir(image_directory)\nfor i, image_name in enumerate(images): #Remember enumerate method adds a counter and returns the enumerate object\n if (image_name.split('.')[1] == 'tif'):\n #print(image_directory+image_name)\n image = cv2.imread(image_directory+image_name, 0)\n image = Image.fromarray(image)\n image = image.resize((SIZE, SIZE))\n image_dataset.append(np.array(image))\n\n#Iterate through all images in Uninfected folder, resize to 64 x 64\n#Then save into the same numpy array 'dataset' but with label 1\n\nmasks = os.listdir(mask_directory)\nfor i, image_name in enumerate(masks):\n if (image_name.split('.')[1] == 'tif'):\n image = cv2.imread(mask_directory+image_name, 0)\n image = Image.fromarray(image)\n image = image.resize((SIZE, SIZE))\n mask_dataset.append(np.array(image))\n\n\n#Normalize images\nimage_dataset = np.expand_dims(normalize(np.array(image_dataset), axis=1),3)\n#D not normalize masks, just rescale to 0 to 1.\nmask_dataset = np.expand_dims((np.array(mask_dataset)),3) /255.\n\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(image_dataset, mask_dataset, test_size = 0.10, random_state = 0)\n\n#Sanity check, view few mages\nimport random\nimport numpy as np\nimage_number = random.randint(0, len(X_train))\nplt.figure(figsize=(12, 6))\nplt.subplot(121)\nplt.imshow(np.reshape(X_train[image_number], (256, 256)), cmap='gray')\nplt.subplot(122)\nplt.imshow(np.reshape(y_train[image_number], (256, 256)), cmap='gray')\nplt.show()\n\n###############################################################\nIMG_HEIGHT = image_dataset.shape[1]\nIMG_WIDTH = image_dataset.shape[2]\nIMG_CHANNELS = image_dataset.shape[3]\n\ndef get_model():\n return simple_unet_model(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)\n\nmodel = get_model()\n\n\n#If starting with pre-trained weights. \n#model.load_weights('mitochondria_gpu_tf1.4.hdf5')\n\nhistory = model.fit(X_train, y_train, \n batch_size = 16, \n verbose=1, \n epochs=1, \n validation_data=(X_test, y_test), \n shuffle=False)\n\nmodel.save('mitochondria_test.hdf5')\n\n############################################################\n#Evaluate the model\n\n\n\t# evaluate model\n_, acc = model.evaluate(X_test, y_test)\nprint(\"Accuracy = \", (acc * 100.0), \"%\")\n\n\n#plot the training and validation accuracy and loss at each epoch\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs = range(1, len(loss) + 1)\nplt.plot(epochs, loss, 'y', label='Training loss')\nplt.plot(epochs, val_loss, 'r', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()\n\nacc = history.history['acc']\n#acc = history.history['accuracy']\nval_acc = history.history['val_acc']\n#val_acc = history.history['val_accuracy']\n\nplt.plot(epochs, acc, 'y', label='Training acc')\nplt.plot(epochs, val_acc, 'r', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\nplt.show()\n\n##################################\n#IOU\ny_pred=model.predict(X_test)\ny_pred_thresholded = y_pred > 0.5\n\nintersection = np.logical_and(y_test, y_pred_thresholded)\nunion = np.logical_or(y_test, y_pred_thresholded)\niou_score = np.sum(intersection) / np.sum(union)\nprint(\"IoU socre is: \", iou_score)\n\n#######################################################################\n#Predict on a few images\nmodel = get_model()\nmodel.load_weights('mitochondria_50_plus_100_epochs.hdf5') #Trained for 50 epochs and then additional 100\n#model.load_weights('mitochondria_gpu_tf1.4.hdf5') #Trained for 50 epochs\n\ntest_img_number = random.randint(0, len(X_test))\ntest_img = X_test[test_img_number]\nground_truth=y_test[test_img_number]\ntest_img_norm=test_img[:,:,0][:,:,None]\ntest_img_input=np.expand_dims(test_img_norm, 0)\nprediction = (model.predict(test_img_input)[0,:,:,0] > 0.2).astype(np.uint8)\n\ntest_img_other = cv2.imread('data/test_images/02-1_256.tif', 0)\n#test_img_other = cv2.imread('data/test_images/img8.tif', 0)\ntest_img_other_norm = np.expand_dims(normalize(np.array(test_img_other), axis=1),2)\ntest_img_other_norm=test_img_other_norm[:,:,0][:,:,None]\ntest_img_other_input=np.expand_dims(test_img_other_norm, 0)\n\n#Predict and threshold for values above 0.5 probability\n#Change the probability threshold to low value (e.g. 0.05) for watershed demo.\nprediction_other = (model.predict(test_img_other_input)[0,:,:,0] > 0.2).astype(np.uint8)\n\nplt.figure(figsize=(16, 8))\nplt.subplot(231)\nplt.title('Testing Image')\nplt.imshow(test_img[:,:,0], cmap='gray')\nplt.subplot(232)\nplt.title('Testing Label')\nplt.imshow(ground_truth[:,:,0], cmap='gray')\nplt.subplot(233)\nplt.title('Prediction on test image')\nplt.imshow(prediction, cmap='gray')\nplt.subplot(234)\nplt.title('External Image')\nplt.imshow(test_img_other, cmap='gray')\nplt.subplot(235)\nplt.title('Prediction of external Image')\nplt.imshow(prediction_other, cmap='gray')\nplt.show()\n\n#plt.imsave('input.jpg', test_img[:,:,0], cmap='gray')\n#plt.imsave('data/results/output2.jpg', prediction_other, cmap='gray')\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.expand_dims", "sklearn.metrics.accuracy_score", "sklearn.metrics.confusion_matrix", "numpy.array", "sklearn.preprocessing.LabelEncoder", "numpy.random.randint" ], [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "numpy.random.randn" ], [ "matplotlib.pyplot.scatter", "pandas.read_excel", "matplotlib.pyplot.show", "sklearn.cluster.KMeans" ], [ "pandas.read_csv", "numpy.empty_like", "sklearn.metrics.mean_squared_error", "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "sklearn.preprocessing.MinMaxScaler" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.imshow", "numpy.expand_dims", "matplotlib.pyplot.title", "numpy.logical_and", "numpy.reshape", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.plot", "numpy.logical_or", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Julian-Hochhaus/py_modules
[ "cf3fa9b1193a0022ffcd7d3b15979ac19936da61" ]
[ "modules/tab2latex/tab2latex/textable.py" ]
[ "import uncertainties\nimport numpy as np\nimport os\nfrom uncertainties import ufloat\n\n#######\n#private functions\n#######\ndef __write_seperator_or_newline(data,texfile,j):\n if(j==(len(data)-1)):\n texfile.write(\"\\\\\")\n else:\n texfile.write(\"&\")\n#*********\n#Write tables body\n#*********\n\ndef __writedata(data,names,texfile,caption,label, dec_points,len_data_max):\n for i in range(len_data_max):\n texfile.write(\" \")\n #writing all data except the last columns\n for j in range(len(data)):\n if isinstance(data[j][i],uncertainties.core.Variable) or (isinstance(data[j][i],uncertainties.core.AffineScalarFunc)):\n if(str(data[j][i])==\"0.0+/-0\"):\n texfile.write(r'{'+r'$\\num{'+'0'+'}$'+r'}')\n __write_seperator_or_newline(data,texfile,j);\n else:\n texfile.write(r'{'+r'${:L}$'.format(data[j][i])+r'}')\n __write_seperator_or_newline(data,texfile,j);\n else:\n if(data[j][i]=='-'):\n texfile.write(r'{'+r\"$\\\\text{\\\\textbf{---}}$\"+r'}')\n __write_seperator_or_newline(data,texfile,j);\n else:\n val=ufloat(data[j][i],0)\n val_str=r'{:L}'.format(val)\n if('(' in val_str):\n val,exp=val_str.split(r'\\pm')\n br,val=val.split('(')\n val=str(round(float(val),dec_points[j]))\n br,exp=exp.split(')')\n texfile.write(r'{$'+val+exp+r'$}')\n else:\n val=str(round(val.n,dec_points[j]))\n texfile.write(r'{$'+val+r'$}')\n\n __write_seperator_or_newline(data,texfile,j);\n texfile.write(\" \\\\\\\\\\n\")\n\n#*********\n#write dashes to shorter arrays\n#*********\n\ndef __append_dashes_to_short_arrays(data, len_data_max):\n for i in range(len(data)):\n if not len_data_max==len(data[i]):\n for j in range(len_data_max-len(data[i])):\n data[i]=np.append(data[i],'-')\n\n#*********\n#returns max lenght of data arrays\n#*********\n\ndef __max_array_lenght(data):\n len_data_arr=[]\n for i in range(len(data)):\n len_data_arr.append(len(data[i]))\n len_data_max=max(len_data_arr)\n return len_data_max\n\n#*********\n#write column heads\n#*********\n\ndef __column_names(names,texfile):\n for i in range(len(names)-1):\n texfile.write(\"{\"+names[i]+\"}& \")\n texfile.write(\"{\"+names[len(names)-1]+\"}\\\\\\ \\n\")\n\n#*********\n#write format string\n#*********\n\ndef __write_format_string(data,format_string,texfile):\n if format_string==None:\n for col in data:\n texfile.write(\"S\")\n else:\n texfile.write(format_string)\n texfile.write(\"}\\n\");\n\n###############\n#*********\n#Error function\n#*********\n\ndef __test_error(data,dec_points,names):\n for i in range(len(data)-1):\n if not(len(names)==len(data) and len(dec_points)==len(data)):\n raise TypeError(\"data and names and dec_points must have same dimension! \"+\"len(data)= \"+str(len(data))+\"; len(names)= \"+str(len(names)) +\"; len(dec_points)= \"+str(len(dec_points)))\n\n#######\n#private functions\n########\n#*******\n######\n\n\ndef long_tab(data=[[1,2,3],[42,42,42]],names=[\"col1\",\"col2\"],filename=\"test.tex\",caption=\"Caption\",label=\"test\", dec_points=[0,2],format_string=None):\n try: #test, if names and data and dec_points array have different lenghts\n __test_error(data,dec_points,names)\n except TypeError: raise\n else:\n data=np.copy(data)\n #appends em dashs to shorter data arrays\n len_data_max=__max_array_lenght(data)\n __append_dashes_to_short_arrays(data,len_data_max)\n #start writing table\n texfile = open(filename,\"w\")\n texfile.write(\" \\\\begin{longtable}{\")\n __write_format_string(data,format_string,texfile);\n texfile.write(\" \\\\caption{\"+caption+\"}\\n\");\n texfile.write(\"\\\\sisetup{detect-all, parse-numbers=false}\\n\")#setup columnwidth to fit best\n texfile.write(\" \\\\label{tab:\"+label+\"}\\\\\\ \\n\")\n texfile.write(\" \\\\toprule\"+\"\\n\");\n __column_names(names,texfile)\n texfile.write(\"\\\\midrule\\n\");\n texfile.write(\"\\\\endfirsthead\"+\"\\n\")\n texfile.write(\" \\\\toprule\"+\"\\n\");\n #writing column titles\n __column_names(names,texfile)\n texfile.write(\"\\\\midrule\\n\");\n texfile.write(\"\\\\endhead\"+\"\\n\")\n texfile.write(\"\\\\midrule\\n\");\n texfile.write(\"\\\\endfoot\")\n texfile.write(\" \\\\bottomrule\\n\");\n texfile.write(\"\\\\endlastfoot\");\n __writedata(data,names,texfile,caption,label, dec_points,len_data_max)\n texfile.write(\" \\\\end{longtable}\\n\");\n texfile.close()\n############################################\n####\ndef test():\n\tprint(\"test\")\n\n####\n############################################\n\ndef latex_tab(data=[[1,2,3],[42,42,42]],names=[\"col1\",\"col2\"],filename=\"test.tex\",caption=\"Caption\",label=\"test\", dec_points=[0,2], format_string=None):\n try: #test, if names and data and dec_points array have different lenghts\n __test_error(data,dec_points,names)\n except TypeError: raise\n else:\n data=np.copy(data)\n #appends em dashs to shorter data arrays\n len_data_max=__max_array_lenght(data)\n __append_dashes_to_short_arrays(data,len_data_max)\n #start writing table\n texfile = open(filename,\"w\")\n texfile.write(\"\\\\begin{table}\\n\");\n texfile.write(\" \\\\caption{\"+caption+\"}\\n\");\n texfile.write(\" \\\\label{tab:\"+label+\"}\\n\")\n texfile.write(\" \\\\centering\\n\")\n texfile.write(\"\\\\sisetup{detect-all, parse-numbers=false}\\n\")#setup columnwidth to fit best\n texfile.write(\" \\\\begin{tabular}{\")\n __write_format_string(data,format_string,texfile)\n texfile.write(\" \\\\toprule \\n \");\n #writing column titles\n __column_names(names,texfile)\n texfile.write(\" \\\\midrule\\n\");\n #writing data\n __writedata(data,names,texfile,caption,label, dec_points,len_data_max)\n texfile.write(\" \\\\bottomrule\\n\");\n texfile.write(\" \\\\end{tabular}\\n\");\n texfile.write(\"\\\\end{table}\");\n texfile.close()\n\n#if module is executed as script:\nif __name__ == \"__main__\":\n arr1=[1e-30,0.0003,4]\n arr2=[0.8e-3,12234783573e12,234,42,1.2800000200]\n import textable\n print(\"42 is the answer to life the universe and everything!\")\n print(\"Running this module as script generates a sample table.tex\")\n latex_tab([arr1,arr2],[\"col1\",\"col2\"],\"table.tex\",'caption','sample')\n" ]
[ [ "numpy.copy", "numpy.append" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sophiaas/e3nn
[ "92351b9225df7aeaf70fdc124c7b0e566d4c0eda" ]
[ "examples/animated_rsh.py" ]
[ "# pylint: disable=not-callable, no-member, invalid-name, missing-docstring, line-too-long\nimport math\nimport os\nimport subprocess\nimport argparse\nimport shutil\nimport tqdm\n\nimport plotly.graph_objs as go\nimport torch\n\nfrom e3nn import o3, rsh\n\n\ndef rsh_surface(l, m, scale, tr, rot):\n n = 50\n a = torch.linspace(0, 2 * math.pi, 2 * n)\n b = torch.linspace(0, math.pi, n)\n a, b = torch.meshgrid(a, b)\n\n f = rsh.spherical_harmonics_alpha_beta([l], a, b)\n f = torch.einsum('ij,...j->...i', o3.irr_repr(l, *rot), f)\n f = f[..., l + m]\n\n r = o3.angles_to_xyz(a, b)\n x, y, z = r[:, :, 0], r[:, :, 1], r[:, :, 2]\n\n r = f.abs()\n x = scale * r * x + tr[0]\n y = scale * r * y + tr[1]\n z = scale * r * z + tr[2]\n\n max_value = 0.5\n\n return go.Surface(\n x=x.numpy(),\n y=y.numpy(),\n z=z.numpy(),\n surfacecolor=f.numpy(),\n showscale=False,\n cmin=-max_value,\n cmax=max_value,\n colorscale=[[0, 'rgb(0,50,255)'], [0.5, 'rgb(200,200,200)'], [1, 'rgb(255,50,0)']],\n )\n\n\ndef main(lmax, resolution, steps):\n scale = 0.5 * math.sqrt(4 * math.pi) / math.sqrt(2 * lmax + 1)\n\n axis = dict(\n showbackground=False,\n showticklabels=False,\n showgrid=False,\n zeroline=False,\n title='',\n nticks=3,\n range=[-lmax / 2 - 0.5, lmax / 2 + 0.5]\n )\n\n layout = dict(\n width=resolution,\n height=resolution,\n scene=dict(\n xaxis=axis,\n yaxis=axis,\n zaxis=axis,\n aspectmode='manual',\n aspectratio=dict(x=1, y=1, z=1),\n camera=dict(\n up=dict(x=0, y=0, z=1),\n center=dict(x=0, y=0, z=0),\n eye=dict(x=0, y=-1.3, z=0),\n projection=dict(type='perspective'),\n ),\n ),\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)',\n margin=dict(l=0, r=0, t=0, b=0)\n )\n\n if os.path.exists('sh'):\n shutil.rmtree('sh')\n os.makedirs('sh')\n\n for i in tqdm.tqdm(range(steps)):\n rot = 2 * math.pi * i / steps\n a, b, c = 0, math.pi / 4, 0\n abc = o3.compose(-c, -b, -a, *o3.compose(0, 0, rot, a, b, c))\n\n surfaces = [\n rsh_surface(l, m, scale, [l + (m if m < 0 else 0) - lmax / 2, 0, lmax / 2 - l + (m if m > 0 else 0)], abc)\n for l in range(lmax + 1)\n for m in range(-l, l + 1)\n ]\n\n fig = go.Figure(surfaces, layout=layout)\n fig.write_image('sh/{:03d}.png'.format(i))\n\n subprocess.check_output([\"convert\", \"-delay\", \"3\", \"-loop\", \"0\", \"-dispose\", \"2\", \"sh/*.png\", \"output.gif\"])\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--lmax\", type=int, default=2)\n parser.add_argument(\"--resolution\", type=int, default=500)\n parser.add_argument(\"--steps\", type=int, default=30)\n\n args = parser.parse_args()\n\n main(args.lmax, args.resolution, args.steps)\n" ]
[ [ "torch.linspace", "torch.meshgrid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
harrylin-hyl/CenterNet-better
[ "567d4370ddf2b52386b46321dbd9b77e7e9c8b8b", "567d4370ddf2b52386b46321dbd9b77e7e9c8b8b", "567d4370ddf2b52386b46321dbd9b77e7e9c8b8b" ]
[ "dl_lib/network/centernet.py", "dl_lib/data/transforms/transform.py", "dl_lib/network/generator/centernet_decode.py" ]
[ "import math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom dl_lib.layers import ShapeSpec\nfrom dl_lib.structures import Boxes, ImageList, Instances\n\nfrom .generator import CenterNetDecoder, CenterNetGT\nfrom .loss import modified_focal_loss, reg_l1_loss\n\n\nclass CenterNet(nn.Module):\n \"\"\"\n Implement CenterNet (https://arxiv.org/abs/1904.07850).\n \"\"\"\n def __init__(self, cfg):\n super().__init__()\n\n self.device = torch.device(cfg.MODEL.DEVICE)\n self.cfg = cfg\n\n # fmt: off\n self.num_classes = cfg.MODEL.CENTERNET.NUM_CLASSES\n # Loss parameters:\n # Inference parameters:\n self.max_detections_per_image = cfg.TEST.DETECTIONS_PER_IMAGE\n # fmt: on\n self.backbone = cfg.build_backbone(\n cfg, input_shape=ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))\n )\n self.upsample = cfg.build_upsample_layers(cfg)\n self.head = cfg.build_head(cfg)\n # self.cls_head = cfg.build_cls_head(cfg)\n # self.wh_head = cfg.build_width_height_head(cfg)\n # self.reg_head = cfg.build_center_reg_head(cfg)\n\n # backbone_shape = self.backbone.output_shape()\n # feature_shapes = [backbone_shape[f] for f in self.in_features]\n\n self.mean, self.std = cfg.MODEL.PIXEL_MEAN, cfg.MODEL.PIXEL_STD\n pixel_mean = torch.Tensor(self.mean).to(self.device).view(3, 1, 1)\n pixel_std = torch.Tensor(self.std).to(self.device).view(3, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n self.to(self.device)\n\n def forward(self, batched_inputs):\n \"\"\"\n Args:\n batched_inputs(list): batched outputs of :class:`DatasetMapper` .\n Each item in the list contains the inputs for one image.\n For now, each item in the list is a dict that contains:\n\n * image: Tensor, image in (C, H, W) format.\n * instances: Instances\n\n Other information that's included in the original dicts, such as:\n\n * \"height\", \"width\" (int): the output resolution of the model, used in inference.\n See :meth:`postprocess` for details.\n Returns:\n dict[str: Tensor]:\n \"\"\"\n images = self.preprocess_image(batched_inputs)\n\n if not self.training:\n return self.inference(images)\n\n features = self.backbone(images.tensor)\n up_fmap = self.upsample(features)\n pred_dict = self.head(up_fmap)\n\n gt_dict = self.get_ground_truth(batched_inputs)\n\n return self.losses(pred_dict, gt_dict)\n\n def losses(self, pred_dict, gt_dict):\n r\"\"\"\n calculate losses of pred and gt\n\n Args:\n gt_dict(dict): a dict contains all information of gt\n gt_dict = {\n \"score_map\": gt scoremap,\n \"wh\": gt width and height of boxes,\n \"reg\": gt regression of box center point,\n \"reg_mask\": mask of regression,\n \"index\": gt index,\n }\n pred(dict): a dict contains all information of prediction\n pred = {\n \"cls\": predicted score map\n \"reg\": predcited regression\n \"wh\": predicted width and height of box\n }\n \"\"\"\n # scoremap loss\n pred_score = pred_dict['cls']\n cur_device = pred_score.device\n for k in gt_dict:\n gt_dict[k] = gt_dict[k].to(cur_device)\n\n loss_cls = modified_focal_loss(pred_score, gt_dict['score_map'])\n\n mask = gt_dict['reg_mask']\n index = gt_dict['index']\n index = index.to(torch.long)\n # width and height loss, better version\n loss_wh = reg_l1_loss(pred_dict['wh'], mask, index, gt_dict['wh'])\n\n # regression loss\n loss_reg = reg_l1_loss(pred_dict['reg'], mask, index, gt_dict['reg'])\n\n loss_cls *= self.cfg.MODEL.LOSS.CLS_WEIGHT\n loss_wh *= self.cfg.MODEL.LOSS.WH_WEIGHT\n loss_reg *= self.cfg.MODEL.LOSS.REG_WEIGHT\n\n loss = {\n \"loss_cls\": loss_cls,\n \"loss_box_wh\": loss_wh,\n \"loss_center_reg\": loss_reg,\n }\n # print(loss)\n return loss\n\n @torch.no_grad()\n def get_ground_truth(self, batched_inputs):\n return CenterNetGT.generate(self.cfg, batched_inputs)\n\n @torch.no_grad()\n def inference(self, images):\n \"\"\"\n image(tensor): ImageList in dl_lib.structures\n \"\"\"\n n, c, h, w = images.tensor.shape\n new_h, new_w = (h | 31) + 1, (w | 31) + 1\n center_wh = np.array([w // 2, h // 2], dtype=np.float32)\n size_wh = np.array([new_w, new_h], dtype=np.float32)\n down_scale = self.cfg.MODEL.CENTERNET.DOWN_SCALE\n img_info = dict(center=center_wh, size=size_wh,\n height=new_h // down_scale,\n width=new_w // down_scale)\n\n pad_value = [-x / y for x, y in zip(self.mean, self.std)]\n aligned_img = torch.Tensor(pad_value).reshape((1, -1, 1, 1)).expand(n, c, new_h, new_w)\n aligned_img = aligned_img.to(images.tensor.device)\n\n pad_w, pad_h = math.ceil((new_w - w) / 2), math.ceil((new_h - h) / 2)\n aligned_img[..., pad_h:h + pad_h, pad_w:w + pad_w] = images.tensor\n\n features = self.backbone(aligned_img)\n up_fmap = self.upsample(features)\n pred_dict = self.head(up_fmap)\n results = self.decode_prediction(pred_dict, img_info)\n\n ori_w, ori_h = img_info['center'] * 2\n det_instance = Instances((int(ori_h), int(ori_w)), **results)\n\n return [{\"instances\": det_instance}]\n\n def decode_prediction(self, pred_dict, img_info):\n \"\"\"\n Args:\n pred_dict(dict): a dict contains all information of prediction\n img_info(dict): a dict contains needed information of origin image\n \"\"\"\n fmap = pred_dict[\"cls\"]\n reg = pred_dict[\"reg\"]\n wh = pred_dict[\"wh\"]\n\n boxes, scores, classes = CenterNetDecoder.decode(fmap, wh, reg)\n # boxes = Boxes(boxes.reshape(boxes.shape[-2:]))\n scores = scores.reshape(-1)\n classes = classes.reshape(-1).to(torch.int64)\n\n # dets = CenterNetDecoder.decode(fmap, wh, reg)\n boxes = CenterNetDecoder.transform_boxes(boxes, img_info)\n boxes = Boxes(boxes)\n return dict(pred_boxes=boxes, scores=scores, pred_classes=classes)\n\n def preprocess_image(self, batched_inputs):\n \"\"\"\n Normalize, pad and batch the input images.\n \"\"\"\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images = [self.normalizer(img / 255) for img in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n return images\n", "# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# File: transform.py\n\nimport numpy as np\nfrom PIL import Image\n\nfrom .extend_transform import HFlipTransform, NoOpTransform, Transform\n\n__all__ = [\"ExtentTransform\", \"ResizeTransform\"]\n\n\nclass ExtentTransform(Transform):\n \"\"\"\n Extracts a subregion from the source image and scales it to the output size.\n\n The fill color is used to map pixels from the source rect that fall outside\n the source image.\n\n See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform\n \"\"\"\n\n def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0):\n \"\"\"\n Args:\n src_rect (x0, y0, x1, y1): src coordinates\n output_size (h, w): dst image size\n interp: PIL interpolation methods\n fill: Fill color used when src_rect extends outside image\n \"\"\"\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img, interp=None):\n h, w = self.output_size\n ret = Image.fromarray(img).transform(\n size=(w, h),\n method=Image.EXTENT,\n data=self.src_rect,\n resample=interp if interp else self.interp,\n fill=self.fill,\n )\n return np.asarray(ret)\n\n def apply_coords(self, coords):\n # Transform image center from source coordinates into output coordinates\n # and then map the new origin to the corner of the output image.\n h, w = self.output_size\n x0, y0, x1, y1 = self.src_rect\n new_coords = coords.astype(np.float32)\n new_coords[:, 0] -= 0.5 * (x0 + x1)\n new_coords[:, 1] -= 0.5 * (y0 + y1)\n new_coords[:, 0] *= w / (x1 - x0)\n new_coords[:, 1] *= h / (y1 - y0)\n new_coords[:, 0] += 0.5 * w\n new_coords[:, 1] += 0.5 * h\n return new_coords\n\n def apply_segmentation(self, segmentation):\n segmentation = self.apply_image(segmentation, interp=Image.NEAREST)\n return segmentation\n\n\nclass ResizeTransform(Transform):\n \"\"\"\n Resize the image to a target size.\n \"\"\"\n\n def __init__(self, h, w, new_h, new_w, interp):\n \"\"\"\n Args:\n h, w (int): original image size\n new_h, new_w (int): new image size\n interp: PIL interpolation methods\n \"\"\"\n # TODO decide on PIL vs opencv\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img, interp=None):\n assert img.shape[:2] == (self.h, self.w)\n pil_image = Image.fromarray(img)\n interp_method = interp if interp is not None else self.interp\n pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)\n ret = np.asarray(pil_image)\n return ret\n\n def apply_coords(self, coords):\n coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)\n coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)\n return coords\n\n def apply_segmentation(self, segmentation):\n segmentation = self.apply_image(segmentation, interp=Image.NEAREST)\n return segmentation\n\n\ndef HFlip_rotated_box(transform, rotated_boxes):\n \"\"\"\n Apply the horizontal flip transform on rotated boxes.\n\n Args:\n rotated_boxes (ndarray): Nx5 floating point array of\n (x_center, y_center, width, height, angle_degrees) format\n in absolute coordinates.\n \"\"\"\n # Transform x_center\n rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]\n # Transform angle\n rotated_boxes[:, 4] = -rotated_boxes[:, 4]\n return rotated_boxes\n\n\ndef Resize_rotated_box(transform, rotated_boxes):\n \"\"\"\n Apply the resizing transform on rotated boxes. For details of how these (approximation)\n formulas are derived, please refer to :meth:`RotatedBoxes.scale`.\n\n Args:\n rotated_boxes (ndarray): Nx5 floating point array of\n (x_center, y_center, width, height, angle_degrees) format\n in absolute coordinates.\n \"\"\"\n scale_factor_x = transform.new_w * 1.0 / transform.w\n scale_factor_y = transform.new_h * 1.0 / transform.h\n rotated_boxes[:, 0] *= scale_factor_x\n rotated_boxes[:, 1] *= scale_factor_y\n theta = rotated_boxes[:, 4] * np.pi / 180.0\n c = np.cos(theta)\n s = np.sin(theta)\n rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))\n rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))\n rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi\n\n return rotated_boxes\n\n\nHFlipTransform.register_type(\"rotated_box\", HFlip_rotated_box)\nNoOpTransform.register_type(\"rotated_box\", lambda t, x: x)\nResizeTransform.register_type(\"rotated_box\", Resize_rotated_box)\n", "#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n# author: [email protected]\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom dl_lib.data.transforms.transform_gen import CenterAffine\nfrom dl_lib.nn_utils.feature_utils import gather_feature\n\n\nclass CenterNetDecoder(object):\n\n @staticmethod\n def decode(fmap, wh, reg=None, cat_spec_wh=False, K=100):\n r\"\"\"\n decode output feature map to detection results\n\n Args:\n fmap(Tensor): output feature map\n wh(Tensor): tensor that represents predicted width-height\n reg(Tensor): tensor that represens regression of center points\n cat_spec_wh(bool): whether apply gather on tensor `wh` or not\n K(int): topk value\n \"\"\"\n batch, channel, height, width = fmap.shape\n\n fmap = CenterNetDecoder.pseudo_nms(fmap)\n\n scores, index, clses, ys, xs = CenterNetDecoder.topk_score(fmap, K=K)\n if reg is not None:\n reg = gather_feature(reg, index, use_transform=True)\n reg = reg.reshape(batch, K, 2)\n xs = xs.view(batch, K, 1) + reg[:, :, 0:1]\n ys = ys.view(batch, K, 1) + reg[:, :, 1:2]\n else:\n xs = xs.view(batch, K, 1) + 0.5\n ys = ys.view(batch, K, 1) + 0.5\n wh = gather_feature(wh, index, use_transform=True)\n\n if cat_spec_wh:\n wh = wh.view(batch, K, channel, 2)\n clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long()\n wh = wh.gather(2, clses_ind).reshape(batch, K, 2)\n else:\n wh = wh.reshape(batch, K, 2)\n\n clses = clses.reshape(batch, K, 1).float()\n scores = scores.reshape(batch, K, 1)\n\n half_w, half_h = wh[..., 0:1] / 2, wh[..., 1:2] / 2\n bboxes = torch.cat([xs - half_w, ys - half_h,\n xs + half_w, ys + half_h],\n dim=2)\n\n detections = (bboxes, scores, clses)\n\n return detections\n\n @staticmethod\n def transform_boxes(boxes, img_info, scale=1):\n r\"\"\"\n transform predicted boxes to target boxes\n\n Args:\n boxes(Tensor): torch Tensor with (Batch, N, 4) shape\n img_info(dict): dict contains all information of original image\n scale(float): used for multiscale testing\n \"\"\"\n boxes = boxes.cpu().numpy().reshape(-1, 4)\n\n center = img_info['center']\n size = img_info['size']\n output_size = (img_info['width'], img_info['height'])\n src, dst = CenterAffine.generate_src_and_dst(center, size, output_size)\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n\n coords = boxes.reshape(-1, 2)\n aug_coords = np.column_stack((coords, np.ones(coords.shape[0])))\n target_boxes = np.dot(aug_coords, trans.T).reshape(-1, 4)\n return target_boxes\n\n @staticmethod\n def pseudo_nms(fmap, pool_size=3):\n r\"\"\"\n apply max pooling to get the same effect of nms\n\n Args:\n fmap(Tensor): output tensor of previous step\n pool_size(int): size of max-pooling\n \"\"\"\n pad = (pool_size - 1) // 2\n fmap_max = F.max_pool2d(fmap, pool_size, stride=1, padding=pad)\n keep = (fmap_max == fmap).float()\n return fmap * keep\n\n @staticmethod\n def topk_score(scores, K=40):\n \"\"\"\n get top K point in score map\n \"\"\"\n batch, channel, height, width = scores.shape\n\n # get topk score and its index in every H x W(channel dim) feature map\n topk_scores, topk_inds = torch.topk(scores.reshape(batch, channel, -1), K)\n\n topk_inds = topk_inds % (height * width)\n topk_ys = (topk_inds / width).int().float()\n topk_xs = (topk_inds % width).int().float()\n\n # get all topk in in a batch\n topk_score, index = torch.topk(topk_scores.reshape(batch, -1), K)\n # div by K because index is grouped by K(C x K shape)\n topk_clses = (index / K).int()\n topk_inds = gather_feature(topk_inds.view(batch, -1, 1), index).reshape(batch, K)\n topk_ys = gather_feature(topk_ys.reshape(batch, -1, 1), index).reshape(batch, K)\n topk_xs = gather_feature(topk_xs.reshape(batch, -1, 1), index).reshape(batch, K)\n\n return topk_score, topk_inds, topk_clses, topk_ys, topk_xs\n" ]
[ [ "torch.device", "numpy.array", "torch.no_grad", "torch.Tensor" ], [ "numpy.square", "numpy.asarray", "numpy.cos", "numpy.sin", "numpy.arctan2" ], [ "numpy.dot", "torch.cat", "numpy.ones", "numpy.float32", "torch.nn.functional.max_pool2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Brant-Skywalker/PFN
[ "a9966724e0500854fa7630f968c497f8bc19bbe9" ]
[ "PFN-nested/utils/metrics.py" ]
[ "import torch\r\nimport torch.nn as nn\r\n\r\nclass micro():\r\n def __init__(self, rel2idx, ner2idx):\r\n self.rel2idx = rel2idx\r\n self.ner2idx = ner2idx\r\n\r\n\r\n def get_ner_index(self, tensor):\r\n index = (tensor == 1).nonzero(as_tuple=False)\r\n index_scalar = []\r\n for index_tup in index:\r\n scalar = []\r\n for i in index_tup:\r\n scalar.append(i.item())\r\n index_scalar.append(tuple(scalar))\r\n return index_scalar\r\n\r\n def get_re_index(self, tensor):\r\n index = (tensor == 1).nonzero(as_tuple=False)\r\n index_list = []\r\n for index_tup in index:\r\n for i in index_tup:\r\n index_list.append(i.item())\r\n return index_list\r\n\r\n def get_trip(self, ner_pred, re_head_pred, re_tail_pred):\r\n seq_len = ner_pred.size(0)\r\n relation = len(self.rel2idx)\r\n\r\n\r\n re_head_pred = re_head_pred.view(seq_len * seq_len, relation)\r\n re_tail_pred = re_tail_pred.view(seq_len * seq_len, relation)\r\n\r\n ner_pred = torch.sum(ner_pred, dim=-1)\r\n ner_pred = torch.where(ner_pred > 0, torch.ones_like(ner_pred), torch.zeros_like(ner_pred))\r\n\r\n ner_pred_index = self.get_ner_index(ner_pred)\r\n ner_map = {} # head to [(head,tail1),(head,tail2)]\r\n for tup in ner_pred_index:\r\n if tup[0] not in ner_map:\r\n ner_map[tup[0]] = [tup]\r\n else:\r\n ner_map[tup[0]].append(tup)\r\n\r\n\r\n full_trip = []\r\n\r\n for r in range(relation):\r\n re_head_pred_index = self.get_re_index(re_head_pred[:, r])\r\n re_tail_pred_index = self.get_re_index(re_tail_pred[:, r])\r\n\r\n for i in range(seq_len*seq_len):\r\n if i in re_head_pred_index:\r\n subj_head = int(i // seq_len)\r\n obj_head = int(i % seq_len)\r\n if subj_head not in ner_map.keys() or obj_head not in ner_map.keys():\r\n continue\r\n\r\n subjects = ner_map[subj_head]\r\n objects = ner_map[obj_head]\r\n\r\n for s in subjects:\r\n for o in objects:\r\n posit = s[1] * seq_len + o[1]\r\n if posit in re_tail_pred_index:\r\n full_trip.append([s, r, o])\r\n\r\n return full_trip\r\n\r\n\r\n\r\n def count_num(self, ner_pred, ner_label, re_pred_head, re_pred_tail, re_label_head, re_label_tail):\r\n ner_pred = torch.where(ner_pred>=0.5, torch.ones_like(ner_pred),\r\n torch.zeros_like(ner_pred))\r\n re_pred_head = torch.where(re_pred_head>=0.5, torch.ones_like(re_pred_head),\r\n torch.zeros_like(re_pred_head))\r\n re_pred_tail = torch.where(re_pred_tail>=0.5, torch.ones_like(re_pred_tail),\r\n torch.zeros_like(re_pred_tail))\r\n\r\n\r\n batch = ner_pred.size(2)\r\n pred_num, gold_num, right_num = 0, 0, 0\r\n for i in range(batch):\r\n ner_pred_batch = ner_pred[:, :, i, :]\r\n ner_label_batch = ner_label[:, :, i, :]\r\n\r\n re_label_head_batch = re_label_head[:,:,i,:]\r\n re_label_tail_batch = re_label_tail[:,:,i,:]\r\n re_label_set = self.get_trip(ner_label_batch, re_label_head_batch, re_label_tail_batch)\r\n\r\n re_pred_head_batch = re_pred_head[:,:,i,:]\r\n re_pred_tail_batch = re_pred_tail[:,:,i,:]\r\n re_pred_set = self.get_trip(ner_pred_batch, re_pred_head_batch, re_pred_tail_batch)\r\n\r\n\r\n pred_num += len(re_pred_set)\r\n gold_num += len(re_label_set)\r\n\r\n re_right = [trip for trip in re_pred_set if trip in re_label_set]\r\n\r\n ner_right_batch = ner_pred_batch * ner_label_batch\r\n ner_right_batch = torch.sum(ner_right_batch, dim=-1)\r\n\r\n for trip in re_right:\r\n subject = trip[0]\r\n object = trip[2]\r\n if ner_right_batch[subject[0], subject[1]] > 0 and ner_right_batch[object[0], object[1]] > 0:\r\n right_num += 1\r\n return [pred_num, gold_num, right_num]\r\n\r\n\r\n def count_ner_num(self, ner_pred, ner_label):\r\n ner_pred = torch.where(ner_pred>=0.5, torch.ones_like(ner_pred),\r\n torch.zeros_like(ner_pred))\r\n ner_pred_num = ner_pred.sum().item()\r\n ner_gold_num = ner_label.sum().item()\r\n\r\n ner_right = ner_pred * ner_label\r\n ner_right_num = ner_right.sum().item()\r\n return [ner_pred_num, ner_gold_num, ner_right_num]\r\n\r\n\r\nclass macro():\r\n def __init__(self, rel2idx, ner2idx):\r\n self.rel2idx = rel2idx\r\n self.ner2idx = ner2idx\r\n\r\n\r\n def get_ner_index(self, tensor):\r\n index = (tensor == 1).nonzero(as_tuple=False)\r\n index_scalar = []\r\n for index_tup in index:\r\n scalar = []\r\n for i in index_tup:\r\n scalar.append(i.item())\r\n index_scalar.append(tuple(scalar))\r\n return index_scalar\r\n\r\n def get_re_index(self, tensor):\r\n index = (tensor == 1).nonzero(as_tuple=False)\r\n index_list = []\r\n for index_tup in index:\r\n for i in index_tup:\r\n index_list.append(i.item())\r\n return index_list\r\n\r\n def get_trip(self, ner_pred, re_head_pred, re_tail_pred, relation):\r\n seq_len = ner_pred.size(0)\r\n\r\n\r\n re_head_pred = re_head_pred.view(seq_len * seq_len)\r\n re_tail_pred = re_tail_pred.view(seq_len * seq_len)\r\n\r\n ner_pred = torch.sum(ner_pred, dim=-1)\r\n ner_pred = torch.where(ner_pred > 0, torch.ones_like(ner_pred), torch.zeros_like(ner_pred))\r\n\r\n ner_pred_index = self.get_ner_index(ner_pred)\r\n ner_map = {} # head to [(head,tail1),(head,tail2)]\r\n for tup in ner_pred_index:\r\n if tup[0] not in ner_map:\r\n ner_map[tup[0]] = [tup]\r\n else:\r\n ner_map[tup[0]].append(tup)\r\n\r\n\r\n full_trip = []\r\n\r\n\r\n re_head_pred_index = self.get_re_index(re_head_pred)\r\n re_tail_pred_index = self.get_re_index(re_tail_pred)\r\n\r\n for i in range(seq_len*seq_len):\r\n if i in re_head_pred_index:\r\n subj_head = int(i // seq_len)\r\n obj_head = int(i % seq_len)\r\n if subj_head not in ner_map.keys() or obj_head not in ner_map.keys():\r\n continue\r\n\r\n subjects = ner_map[subj_head]\r\n objects = ner_map[obj_head]\r\n\r\n for s in subjects:\r\n for o in objects:\r\n posit = s[1] * seq_len + o[1]\r\n if posit in re_tail_pred_index:\r\n full_trip.append([s, relation, o])\r\n\r\n return full_trip\r\n\r\n\r\n def count_num(self, ner_pred, ner_label, re_pred_head, re_pred_tail, re_label_head, re_label_tail):\r\n ner_pred = torch.where(ner_pred>=0.5, torch.ones_like(ner_pred),\r\n torch.zeros_like(ner_pred))\r\n re_pred_head = torch.where(re_pred_head>=0.5, torch.ones_like(re_pred_head),\r\n torch.zeros_like(re_pred_head))\r\n re_pred_tail = torch.where(re_pred_tail>=0.5, torch.ones_like(re_pred_tail),\r\n torch.zeros_like(re_pred_tail))\r\n triple_num_list = []\r\n\r\n batch = ner_pred.size(2)\r\n for r in range(len(self.rel2idx)):\r\n pred_num, gold_num, right_num = 0, 0, 0\r\n for i in range(batch):\r\n ner_pred_batch = ner_pred[:, :, i, :]\r\n ner_label_batch = ner_label[:, :, i, :]\r\n\r\n re_label_head_batch = re_label_head[:,:,i,r]\r\n re_label_tail_batch = re_label_tail[:,:,i,r]\r\n re_label_set = self.get_trip(ner_label_batch, re_label_head_batch, re_label_tail_batch, r)\r\n\r\n re_pred_head_batch = re_pred_head[:,:,i,r]\r\n re_pred_tail_batch = re_pred_tail[:,:,i,r]\r\n re_pred_set = self.get_trip(ner_pred_batch, re_pred_head_batch, re_pred_tail_batch, r)\r\n\r\n\r\n pred_num += len(re_pred_set)\r\n gold_num += len(re_label_set)\r\n\r\n re_right = [trip for trip in re_pred_set if trip in re_label_set]\r\n\r\n ner_right_batch = ner_pred_batch * ner_label_batch\r\n ner_right_batch = torch.sum(ner_right_batch, dim=-1)\r\n\r\n for trip in re_right:\r\n subject = trip[0]\r\n object = trip[2]\r\n if ner_right_batch[subject[0], subject[1]] > 0 and ner_right_batch[object[0], object[1]] > 0:\r\n right_num += 1\r\n\r\n triple_num_list += [pred_num, gold_num, right_num]\r\n\r\n return triple_num_list\r\n\r\n\r\n def count_ner_num(self, ner_pred, ner_label):\r\n ner_pred = torch.where(ner_pred>=0.5, torch.ones_like(ner_pred),\r\n torch.zeros_like(ner_pred))\r\n entity_num_list = []\r\n for i in range(len(self.ner2idx)):\r\n ner_pred_single = ner_pred[:, :, :, i]\r\n ner_label_single = ner_label[:, :, :, i]\r\n\r\n ner_pred_num = ner_pred_single.sum().item()\r\n ner_gold_num = ner_label_single.sum().item()\r\n\r\n ner_right = ner_pred_single * ner_label_single\r\n ner_right_num = ner_right.sum().item()\r\n entity_num_list += [ner_pred_num, ner_gold_num, ner_right_num]\r\n\r\n return entity_num_list\r\n\r\n\r\ndef f1(num):\r\n results = {}\r\n results[\"p\"], results[\"r\"], results[\"f\"] = 0, 0, 0\r\n type_num = len(num)/3\r\n\r\n for i in range(0, len(num), 3):\r\n pred_num, gold_num, right_num = num[i], num[i+1], num[i+2]\r\n if pred_num == 0:\r\n p = 0\r\n else:\r\n p = float(right_num) / pred_num\r\n if gold_num == 0:\r\n r = 0\r\n else:\r\n r = float(right_num) / gold_num\r\n if p + r == 0:\r\n F1 = 0\r\n else:\r\n F1 = 2 * p * r / (p + r)\r\n\r\n\r\n results[\"p\"] += p\r\n results[\"r\"] += r\r\n results[\"f\"] += F1\r\n results[\"p\"] = results[\"p\"] / type_num\r\n results[\"r\"] = results[\"r\"] / type_num\r\n results[\"f\"] = results[\"f\"] / type_num\r\n\r\n return results\r\n\r\nclass loss(nn.Module):\r\n def __init__(self):\r\n super(loss, self).__init__()\r\n self.loss_ner = nn.BCELoss(reduction='sum')\r\n self.loss_re_head = nn.BCELoss(reduction='sum')\r\n self.loss_re_tail = nn.BCELoss(reduction='sum')\r\n\r\n\r\n def forward(self, ner_pred, ner_label, re_pred_head, re_pred_tail, re_label_head, re_label_tail):\r\n seq_len = ner_pred.size(1)\r\n ner_loss = self.loss_ner(ner_pred, ner_label) / seq_len\r\n re_head_loss = self.loss_re_head(re_pred_head, re_label_head) / seq_len\r\n re_tail_loss = self.loss_re_tail(re_pred_tail, re_label_tail) / seq_len\r\n loss = ner_loss + re_head_loss + re_tail_loss\r\n\r\n return loss\r\n\r\n\r\n\r\n\r\n" ]
[ [ "torch.zeros_like", "torch.sum", "torch.ones_like", "torch.nn.BCELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ayjabri/DeepRL
[ "0be095e3a3d04f60b4cdc97ed330dffc17b3024a", "0be095e3a3d04f60b4cdc97ed330dffc17b3024a" ]
[ "04_Reinforce/01_reinforce.py", "05_PolicyGradient/lib/utils.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n'''\nAuthor Ayman Al jabri\nReinforce Method:\nIs one of the simplist Policy_gradient methods. It uses the same formula:\n loss= - sum(Q(s,a) log(pi(s,a))) ----- Where Q(s,a): is the gradient scale. \n Q(s,a) = discounted rewards or sum(gamm**i * ri)\nsteps:\n 1.Initialize the network with random weights\n 2. Play N full episodes, saving their (𝑠,𝑎,𝑟,𝑠′) transitions\n 3. For every step, t, of every episode, k, calculate the discounted total reward for\n subsequent steps: 𝑄(𝑘,𝑡) = Σ 𝛾^𝑖 * 𝑟_𝑖\n 4. Calculate the loss function for all transitions: ℒ = −Σ𝑄(𝑘,𝑡) log 𝜋(𝑠,𝑎)\n 5. Perform an SGD update of weights, minimizing the loss (Use Adam instead - much faster)\n 6. Repeat from step 2 until converged\n\nUsually solve in 440 episodes within 0:00:09\n'''\nimport os\nimport gym\nimport ptan\nimport numpy as np\nimport argparse\n\n\nimport torch\nfrom datetime import datetime, timedelta\nfrom time import time\nfrom lib import model, utils, hyperparameters\nfrom tensorboardX import SummaryWriter\n\[email protected]_grad()\ndef play(env, agent):\n state= env.reset()\n rewards = 0\n while True:\n env.render()\n state_v = torch.FloatTensor([state])\n action = agent(state_v)[0]\n state,r,done,_= env.step(action.item())\n rewards+=r\n if done:\n print(rewards)\n break\n env.close()\n\n\nif __name__=='__main__':\n parser = argparse.ArgumentParser() \n parser.add_argument('--play', action='store_true', help='Play an episode after training is complete')\n parser.add_argument('--save',action='store_true', default=False, help='Store a copy of the network')\n parser.add_argument('--env', default='lander', help='Game name: cartpole, cartpole1, lander, freeway..etc')\n parser.add_argument('--episodes', type=int, help='train N episodes per batch')\n parser.add_argument('--cuda', default=True, action='store_true', help='Use GPU')\n args = parser.parse_args()\n \n params = hyperparameters.HYPERPARAMS[args.env]\n device = torch.device('cuda' if (args.cuda and torch.cuda.is_available()) else 'cpu')\n if args.episodes: params.steps = args.episodes\n env = gym.make(params.env_id)\n net = model.RLNet(params.obs_size, params.act_size).to(device)\n print(net)\n agent = ptan.agent.PolicyAgent(net, apply_softmax=True, preprocessor=ptan.agent.float32_preprocessor,device=device)\n exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, params.gamma)\n generator = model.BatchGenerator(exp_source,params.steps,params)\n \n comment = f'_{args.env}_Reinforce_{params.steps}_episodes'\n current_time = datetime.now().strftime('%b%d_%H-%M-%S')\n logdir = os.path.join('runs', current_time + '_' + comment)\n writer = SummaryWriter(logdir=logdir)\n \n 'MAKE SURE TO USE ADAM OPTIMIZER; IT IS THE MOST STABLE FOR THIS METHOD'\n 'I tried using SGD but it took +500 epochs to solve while ADAM solves it in under 10 seconds and 43 epochs'\n optimizer = torch.optim.Adam(net.parameters(), lr = params.lr)\n \n loss_v = 0.0\n pt = time()\n st = datetime.now()\n with ptan.common.utils.RewardTracker(writer) as tracker:\n for batch in generator:\n for n in range(params.steps):\n reward = generator._total_rewards[n-params.steps]\n frame = generator._end_episode_frames[n-params.steps]\n tracker.reward(reward,frame)\n mean = np.mean(generator._total_rewards[-100:])\n if mean > params.bound_solve:\n print('Solved in {} episodes within {}'.format(generator.episodes, timedelta(seconds=(datetime.now()-st).seconds)))\n break\n optimizer.zero_grad()\n loss_v = utils.calc_reinforce_loss(batch, net, device)\n loss_v.backward()\n writer.add_scalar('loss', loss_v.item(),global_step=generator.frame,display_name=args.env)\n optimizer.step()\n \n \n if args.save:\n fname = f'reinforce_{args.env}_{str(st.date())}.dat'\n torch.save(net.state_dict(), fname)\n if args.play: play(env,agent)\n \n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 20 15:58:22 2020\n\n@author: ayman\n\"\"\"\nimport torch\nimport torch.nn.functional as F\n\n\ndef calc_pg_losses(batch, net, entropy_beta=0.02):\n \"\"\"\n Calculate Policy and Entropy losses from batch.\n\n Parameters\n ----------\n batch : BatchGenerator output\n The batch should contain States, Actions and Batch Scale.\n net : nn.Module\n Policy Gradient network.\n entropy_beta : int\n scalar to adjust entorpy value by when calculating entorpy loss.\n\n Returns\n -------\n policy_loss : Tensor\n DESCRIPTION.\n entropy_loss : Tensor\n DESCRIPTION.\n \"\"\"\n states_v = torch.FloatTensor(batch[0])\n actions = batch[1]\n batch_scale_v = torch.FloatTensor(batch[2])\n\n # policy loss\n logits_v = net(states_v)\n log_prob_v = F.log_softmax(logits_v, dim=1)\n # Gather probabilities with taken actions\n log_prob_action_v = batch_scale_v * \\\n log_prob_v[range(len(actions)), actions]\n policy_loss = - log_prob_action_v.mean()\n\n # entropy loss\n probs_v = F.softmax(logits_v, dim=1)\n entropy = - (probs_v * log_prob_v).sum(dim=1).mean()\n entropy_loss = - entropy_beta * entropy\n return policy_loss, entropy_loss\n" ]
[ [ "torch.FloatTensor", "torch.no_grad", "numpy.mean", "torch.cuda.is_available" ], [ "torch.nn.functional.softmax", "torch.FloatTensor", "torch.nn.functional.log_softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IraKorshunova/bruno
[ "e3050c0fc637659bb29cbe293633dffa145719db" ]
[ "config_rnn/test_few_shot_omniglot.py" ]
[ "import argparse\nimport importlib\nimport json\nimport os\nimport sys\nfrom collections import defaultdict\n\nimport numpy as np\nimport tensorflow as tf\n\nimport logger\nimport utils\nfrom config_rnn import defaults\n\nnp.set_printoptions(suppress=True)\n\n\ndef classify(config_name, seq_len, n_trials, batch_size):\n configs_dir = __file__.split('/')[-2]\n config = importlib.import_module('%s.%s' % (configs_dir, config_name))\n\n # metadata\n save_dir = utils.find_model_metadata('metadata/', args.config_name)\n expid = os.path.dirname(save_dir).split('/')[-1]\n\n assert seq_len == config.seq_len\n\n utils.autodir('logs')\n sys.stdout = logger.Logger(\n 'logs/%s_test_class_%s_%s_%s.log' % (expid, n_trials, config.seq_len, batch_size))\n sys.stderr = sys.stdout\n\n print('Building the model', expid)\n model = tf.make_template('model', config.build_model)\n\n data_iter = config.test_data_iter2\n data_iter.batch_size = batch_size\n\n x_in = tf.placeholder(tf.float32, shape=(data_iter.batch_size,) + config.obs_shape)\n log_probs = model(x_in)[0]\n\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n ckpt_file = save_dir + 'params.ckpt'\n print('restoring parameters from', ckpt_file)\n saver.restore(sess, tf.train.latest_checkpoint(save_dir))\n\n trial_accuracies = []\n\n for trial in range(n_trials):\n\n generator = data_iter.generate(trial=trial)\n\n n_correct = 0\n n_total = 0\n\n x_number2scores = defaultdict(list)\n x_number2true_y = {}\n x_number2ys = {}\n for iteration, (x_batch, y_batch, x_number) in enumerate(generator):\n y_true = int(y_batch[0, -1])\n\n log_p = sess.run(log_probs, feed_dict={x_in: x_batch})\n log_p = log_p.reshape((data_iter.batch_size, config.seq_len))[:, -1]\n\n x_number2scores[x_number].append(log_p)\n x_number2true_y[x_number] = y_true\n x_number2ys[x_number] = y_batch[:, 0]\n if (1. * iteration + 1) % 1000 == 0 or n_trials == 1:\n print(x_number + 1)\n\n # average scores\n for k, v in x_number2scores.items():\n y_true = x_number2true_y[k]\n avg_score = np.mean(np.asarray(v), axis=0)\n max_idx = np.argmax(avg_score)\n if x_number2ys[k][max_idx] == y_true:\n n_correct += 1\n n_total += 1\n\n acc = n_correct / n_total\n print(trial, 'accuracy', acc)\n print('n test examples', n_total)\n trial_accuracies.append(acc)\n print(trial_accuracies)\n\n print('---------------------------------------------')\n print(n_trials, config.seq_len)\n print(trial_accuracies)\n print('average accuracy over trials', np.mean(trial_accuracies))\n print('std accuracy over trials', np.std(trial_accuracies))\n\n\n# -----------------------------------------------------------------------------\nparser = argparse.ArgumentParser()\nparser.add_argument('--config_name', type=str, required=True, help='name of the configuration')\nparser.add_argument('--seq_len', type=int, default=2,\n help='sequence length = number of shots + 1 (do not forget +1 for the test image)')\nparser.add_argument('--batch_size', type=int, default=20, help='batch_size = K-way')\nparser.add_argument('--n_trials', type=int, default=20, help='number of trials')\nparser.add_argument('--mask_dims', type=int, default=0, help='keep the dimensions with correlation > eps_corr')\nparser.add_argument('--eps_corr', type=float, default=0., help='minimum correlation')\nargs, _ = parser.parse_known_args()\ndefaults.set_parameters(args)\nprint('input args:\\n', json.dumps(vars(args), indent=4, separators=(',', ':')))\nif args.mask_dims == 0:\n assert args.eps_corr == 0.\n# -----------------------------------------------------------------------------\n\nclassify(config_name=args.config_name,\n seq_len=args.seq_len,\n n_trials=args.n_trials,\n batch_size=args.batch_size)\n" ]
[ [ "tensorflow.train.latest_checkpoint", "numpy.asarray", "numpy.set_printoptions", "tensorflow.placeholder", "numpy.std", "numpy.argmax", "numpy.mean", "tensorflow.Session", "tensorflow.make_template", "tensorflow.train.Saver" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
kadvinj/efficientdet-tf2
[ "42252013a2902fadcddbf31de7fb62afc93c8154" ]
[ "video.py" ]
[ "#-------------------------------------#\r\n# 调用摄像头或者视频进行检测\r\n# 调用摄像头直接运行即可\r\n# 调用视频可以将cv2.VideoCapture()指定路径\r\n# 视频的保存并不难,可以百度一下看看\r\n#-------------------------------------#\r\nimport time\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom PIL import Image\r\n\r\nfrom efficientdet import EfficientDet\r\n\r\ngpus = tf.config.experimental.list_physical_devices(device_type='GPU')\r\nfor gpu in gpus:\r\n tf.config.experimental.set_memory_growth(gpu, True)\r\n\r\nefficientdet = EfficientDet()\r\n#-------------------------------------#\r\n# 调用摄像头\r\n# capture=cv2.VideoCapture(\"1.mp4\")\r\n#-------------------------------------#\r\ncapture=cv2.VideoCapture(0)\r\nfps = 0.0\r\nwhile(True):\r\n t1 = time.time()\r\n # 读取某一帧\r\n ref,frame=capture.read()\r\n # 格式转变,BGRtoRGB\r\n frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\r\n # 转变成Image\r\n frame = Image.fromarray(np.uint8(frame))\r\n\r\n # 进行检测\r\n frame = np.array(efficientdet.detect_image(frame))\r\n\r\n # RGBtoBGR满足opencv显示格式\r\n frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)\r\n fps = ( fps + (1./(time.time()-t1)) ) / 2\r\n print(\"fps= %.2f\"%(fps))\r\n frame = cv2.putText(frame, \"fps= %.2f\"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\r\n\r\n cv2.imshow(\"video\",frame)\r\n c= cv2.waitKey(1) & 0xff \r\n if c==27:\r\n capture.release()\r\n break\r\n \r\n" ]
[ [ "numpy.uint8", "tensorflow.config.experimental.list_physical_devices", "tensorflow.config.experimental.set_memory_growth" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vladbragoi/indoor_localization_system
[ "b98d278cbd6a2ff8dcc093631eed0605e9e3a35f", "b98d278cbd6a2ff8dcc093631eed0605e9e3a35f", "b98d278cbd6a2ff8dcc093631eed0605e9e3a35f" ]
[ "localization_service/venv/lib/python3.5/site-packages/matplotlib/__init__.py", "localization_service/venv/lib/python3.5/site-packages/mpl_toolkits/mplot3d/art3d.py", "localization_service/venv/lib/python3.5/site-packages/matplotlib/tests/test_constrainedlayout.py" ]
[ "\"\"\"\nThis is an object-oriented plotting library.\n\nA procedural interface is provided by the companion pyplot module,\nwhich may be imported directly, e.g.::\n\n import matplotlib.pyplot as plt\n\nor using ipython::\n\n ipython\n\nat your terminal, followed by::\n\n In [1]: %matplotlib\n In [2]: import matplotlib.pyplot as plt\n\nat the ipython shell prompt.\n\nFor the most part, direct use of the object-oriented library is\nencouraged when programming; pyplot is primarily for working\ninteractively. The\nexceptions are the pyplot commands :func:`~matplotlib.pyplot.figure`,\n:func:`~matplotlib.pyplot.subplot`,\n:func:`~matplotlib.pyplot.subplots`, and\n:func:`~pyplot.savefig`, which can greatly simplify scripting.\n\nModules include:\n\n :mod:`matplotlib.axes`\n defines the :class:`~matplotlib.axes.Axes` class. Most pyplot\n commands are wrappers for :class:`~matplotlib.axes.Axes`\n methods. The axes module is the highest level of OO access to\n the library.\n\n :mod:`matplotlib.figure`\n defines the :class:`~matplotlib.figure.Figure` class.\n\n :mod:`matplotlib.artist`\n defines the :class:`~matplotlib.artist.Artist` base class for\n all classes that draw things.\n\n :mod:`matplotlib.lines`\n defines the :class:`~matplotlib.lines.Line2D` class for\n drawing lines and markers\n\n :mod:`matplotlib.patches`\n defines classes for drawing polygons\n\n :mod:`matplotlib.text`\n defines the :class:`~matplotlib.text.Text`,\n :class:`~matplotlib.text.TextWithDash`, and\n :class:`~matplotlib.text.Annotate` classes\n\n :mod:`matplotlib.image`\n defines the :class:`~matplotlib.image.AxesImage` and\n :class:`~matplotlib.image.FigureImage` classes\n\n :mod:`matplotlib.collections`\n classes for efficient drawing of groups of lines or polygons\n\n :mod:`matplotlib.colors`\n classes for interpreting color specifications and for making\n colormaps\n\n :mod:`matplotlib.cm`\n colormaps and the :class:`~matplotlib.image.ScalarMappable`\n mixin class for providing color mapping functionality to other\n classes\n\n :mod:`matplotlib.ticker`\n classes for calculating tick mark locations and for formatting\n tick labels\n\n :mod:`matplotlib.backends`\n a subpackage with modules for various gui libraries and output\n formats\n\nThe base matplotlib namespace includes:\n\n :data:`~matplotlib.rcParams`\n a global dictionary of default configuration settings. It is\n initialized by code which may be overridden by a matplotlibrc\n file.\n\n :func:`~matplotlib.rc`\n a function for setting groups of rcParams values\n\n :func:`~matplotlib.use`\n a function for setting the matplotlib backend. If used, this\n function must be called immediately after importing matplotlib\n for the first time. In particular, it must be called\n **before** importing pyplot (if pyplot is imported).\n\nmatplotlib was initially written by John D. Hunter (1968-2012) and is now\ndeveloped and maintained by a host of others.\n\nOccasionally the internal documentation (python docstrings) will refer\nto MATLAB&reg;, a registered trademark of The MathWorks, Inc.\n\n\"\"\"\n# NOTE: This file must remain Python 2 compatible for the foreseeable future,\n# to ensure that we error out properly for existing editable installs.\n\nimport sys\nif sys.version_info < (3, 5): # noqa: E402\n raise ImportError(\"\"\"\nMatplotlib 3.0+ does not support Python 2.x, 3.0, 3.1, 3.2, 3.3, or 3.4.\nBeginning with Matplotlib 3.0, Python 3.5 and above is required.\n\nSee Matplotlib `INSTALL.rst` file for more information:\n\n https://github.com/matplotlib/matplotlib/blob/master/INSTALL.rst\n\n\"\"\")\n\nimport atexit\nfrom collections.abc import MutableMapping\nimport contextlib\nimport distutils.version\nimport functools\nimport io\nimport importlib\nimport inspect\nfrom inspect import Parameter\nimport locale\nimport logging\nimport os\nfrom pathlib import Path\nimport pprint\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport tempfile\nimport urllib.request\nimport warnings\n\n# cbook must import matplotlib only within function\n# definitions, so it is safe to import from it here.\nfrom . import cbook, rcsetup\nfrom matplotlib.cbook import (\n MatplotlibDeprecationWarning, dedent, get_label, sanitize_sequence)\nfrom matplotlib.cbook import mplDeprecation # deprecated\nfrom matplotlib.rcsetup import defaultParams, validate_backend, cycler\n\nimport numpy\n\n# Get the version from the _version.py versioneer file. For a git checkout,\n# this is computed based on the number of commits since the last tag.\nfrom ._version import get_versions\n__version__ = str(get_versions()['version'])\ndel get_versions\n\n_log = logging.getLogger(__name__)\n\n__version__numpy__ = '1.10.0' # minimum required numpy version\n\n__bibtex__ = r\"\"\"@Article{Hunter:2007,\n Author = {Hunter, J. D.},\n Title = {Matplotlib: A 2D graphics environment},\n Journal = {Computing In Science \\& Engineering},\n Volume = {9},\n Number = {3},\n Pages = {90--95},\n abstract = {Matplotlib is a 2D graphics package used for Python\n for application development, interactive scripting, and\n publication-quality image generation across user\n interfaces and operating systems.},\n publisher = {IEEE COMPUTER SOC},\n year = 2007\n}\"\"\"\n\n\ndef compare_versions(a, b):\n \"return True if a is greater than or equal to b\"\n if isinstance(a, bytes):\n cbook.warn_deprecated(\n \"3.0\", \"compare_version arguments should be strs.\")\n a = a.decode('ascii')\n if isinstance(b, bytes):\n cbook.warn_deprecated(\n \"3.0\", \"compare_version arguments should be strs.\")\n b = b.decode('ascii')\n if a:\n a = distutils.version.LooseVersion(a)\n b = distutils.version.LooseVersion(b)\n return a >= b\n else:\n return False\n\n\ntry:\n import dateutil\nexcept ImportError:\n raise ImportError(\"Matplotlib requires dateutil\")\n\n\ntry:\n import pyparsing\nexcept ImportError:\n raise ImportError(\"Matplotlib requires pyparsing\")\nelse:\n if not compare_versions(pyparsing.__version__, '2.0.1'):\n raise ImportError(\n \"Matplotlib requires pyparsing>=2.0.1; you have %s\"\n % pyparsing.__version__)\n\n\nif not compare_versions(numpy.__version__, __version__numpy__):\n raise ImportError(\n \"Matplotlib requires numpy>=%s; you have %s\" % (\n __version__numpy__, numpy.__version__))\n\n\nif not hasattr(sys, 'argv'): # for modpython\n sys.argv = ['modpython']\n\n\n_verbose_msg = \"\"\"\\\nmatplotlib.verbose is deprecated;\nCommand line argument --verbose-LEVEL is deprecated.\nThis functionality is now provided by the standard\npython logging library. To get more (or less) logging output:\n import logging\n logger = logging.getLogger('matplotlib')\n logger.set_level(logging.INFO)\"\"\"\n\n\ndef _set_logger_verbose_level(level_str='silent', file_str='sys.stdout'):\n \"\"\"\n Use a --verbose-LEVEL level to set the logging level:\n\n \"\"\"\n levelmap = {'silent': logging.WARNING, 'helpful': logging.INFO,\n 'debug': logging.DEBUG, 'debug-annoying': logging.DEBUG,\n 'info': logging.INFO, 'warning': logging.WARNING}\n # Check that current state of logger isn't already more verbose\n # than the requested level. If it is more verbose, then leave more\n # verbose.\n newlev = levelmap[level_str]\n oldlev = _log.getEffectiveLevel()\n if newlev < oldlev:\n _log.setLevel(newlev)\n std = {\n 'sys.stdout': sys.stdout,\n 'sys.stderr': sys.stderr,\n }\n if file_str in std:\n fileo = std[file_str]\n else:\n fileo = sys.stdout\n try:\n fileo = open(file_str, 'w')\n # if this fails, we will just write to stdout\n except IOError:\n warnings.warn('could not open log file \"{0}\"'\n 'for writing. Check your '\n 'matplotlibrc'.format(file_str))\n console = logging.StreamHandler(fileo)\n console.setLevel(newlev)\n _log.addHandler(console)\n\n\ndef _parse_commandline():\n \"\"\"\n Check for --verbose-LEVEL type command line arguments and\n set logging level appropriately.\n \"\"\"\n\n levels = ('silent', 'helpful', 'debug', 'debug-annoying',\n 'info', 'warning')\n\n for arg in sys.argv[1:]:\n if arg.startswith('--verbose-'):\n level_str = arg[10:]\n # If it doesn't match one of ours, then don't even\n # bother noting it, we are just a 3rd-party library\n # to somebody else's script.\n if level_str in levels:\n _set_logger_verbose_level(level_str)\n\n_parse_commandline()\n\n\nclass Verbose(object):\n \"\"\"\n A class to handle reporting. Set the fileo attribute to any file\n instance to handle the output. Default is sys.stdout\n \"\"\"\n levels = ('silent', 'helpful', 'debug', 'debug-annoying')\n vald = {level: i for i, level in enumerate(levels)}\n\n # parse the verbosity from the command line; flags look like\n # --verbose-silent or --verbose-helpful\n _commandLineVerbose = None\n\n for arg in sys.argv[1:]:\n if not arg.startswith('--verbose-'):\n continue\n level_str = arg[10:]\n # If it doesn't match one of ours, then don't even\n # bother noting it, we are just a 3rd-party library\n # to somebody else's script.\n if level_str in levels:\n _commandLineVerbose = level_str\n\n @cbook.deprecated(\"2.2\", message=_verbose_msg)\n def __init__(self):\n self.set_level('silent')\n self.fileo = sys.stdout\n\n @cbook.deprecated(\"2.2\", message=_verbose_msg)\n def set_level(self, level):\n 'set the verbosity to one of the Verbose.levels strings'\n\n if self._commandLineVerbose is not None:\n level = self._commandLineVerbose\n if level not in self.levels:\n warnings.warn('matplotlib: unrecognized --verbose-* string \"%s\".'\n ' Legal values are %s' % (level, self.levels))\n else:\n self.level = level\n\n @cbook.deprecated(\"2.2\", message=_verbose_msg)\n def set_fileo(self, fname):\n std = {\n 'sys.stdout': sys.stdout,\n 'sys.stderr': sys.stderr,\n }\n if fname in std:\n self.fileo = std[fname]\n else:\n try:\n fileo = open(fname, 'w')\n except IOError:\n raise ValueError('Verbose object could not open log file \"{0}\"'\n ' for writing.\\nCheck your matplotlibrc '\n 'verbose.fileo setting'.format(fname))\n else:\n self.fileo = fileo\n\n @cbook.deprecated(\"2.2\", message=_verbose_msg)\n def report(self, s, level='helpful'):\n \"\"\"\n print message s to self.fileo if self.level>=level. Return\n value indicates whether a message was issued\n\n \"\"\"\n if self.ge(level):\n print(s, file=self.fileo)\n return True\n return False\n\n @cbook.deprecated(\"2.2\", message=_verbose_msg)\n def wrap(self, fmt, func, level='helpful', always=True):\n \"\"\"\n return a callable function that wraps func and reports it\n output through the verbose handler if current verbosity level\n is higher than level\n\n if always is True, the report will occur on every function\n call; otherwise only on the first time the function is called\n \"\"\"\n assert callable(func)\n\n def wrapper(*args, **kwargs):\n ret = func(*args, **kwargs)\n\n if (always or not wrapper._spoke):\n spoke = self.report(fmt % ret, level)\n if not wrapper._spoke:\n wrapper._spoke = spoke\n return ret\n wrapper._spoke = False\n wrapper.__doc__ = func.__doc__\n return wrapper\n\n @cbook.deprecated(\"2.2\", message=_verbose_msg)\n def ge(self, level):\n 'return true if self.level is >= level'\n return self.vald[self.level] >= self.vald[level]\n\n\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n verbose = Verbose()\n\n\ndef _logged_cached(fmt, func=None):\n \"\"\"\n Decorator that logs a function's return value, and memoizes that value.\n\n After ::\n\n @_logged_cached(fmt)\n def func(): ...\n\n the first call to *func* will log its return value at the DEBUG level using\n %-format string *fmt*, and memoize it; later calls to *func* will directly\n return that value.\n \"\"\"\n if func is None: # Return the actual decorator.\n return functools.partial(_logged_cached, fmt)\n\n called = False\n ret = None\n\n @functools.wraps(func)\n def wrapper():\n nonlocal called, ret\n if not called:\n ret = func()\n called = True\n _log.debug(fmt, ret)\n return ret\n\n return wrapper\n\n\ndef checkdep_dvipng():\n try:\n s = subprocess.Popen(['dvipng', '-version'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = s.communicate()\n line = stdout.decode('ascii').split('\\n')[1]\n v = line.split()[-1]\n return v\n except (IndexError, ValueError, OSError):\n return None\n\n\ndef checkdep_ghostscript():\n if checkdep_ghostscript.executable is None:\n if sys.platform == 'win32':\n # mgs is the name in miktex\n gs_execs = ['gswin32c', 'gswin64c', 'mgs', 'gs']\n else:\n gs_execs = ['gs']\n for gs_exec in gs_execs:\n try:\n s = subprocess.Popen(\n [gs_exec, '--version'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = s.communicate()\n if s.returncode == 0:\n v = stdout[:-1].decode('ascii')\n checkdep_ghostscript.executable = gs_exec\n checkdep_ghostscript.version = v\n except (IndexError, ValueError, OSError):\n pass\n return checkdep_ghostscript.executable, checkdep_ghostscript.version\ncheckdep_ghostscript.executable = None\ncheckdep_ghostscript.version = None\n\n\ndef checkdep_pdftops():\n try:\n s = subprocess.Popen(['pdftops', '-v'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = s.communicate()\n lines = stderr.decode('ascii').split('\\n')\n for line in lines:\n if 'version' in line:\n v = line.split()[-1]\n return v\n except (IndexError, ValueError, UnboundLocalError, OSError):\n return None\n\n\ndef checkdep_inkscape():\n if checkdep_inkscape.version is None:\n try:\n s = subprocess.Popen(['inkscape', '-V'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = s.communicate()\n lines = stdout.decode('ascii').split('\\n')\n for line in lines:\n if 'Inkscape' in line:\n v = line.split()[1]\n break\n checkdep_inkscape.version = v\n except (IndexError, ValueError, UnboundLocalError, OSError):\n pass\n return checkdep_inkscape.version\ncheckdep_inkscape.version = None\n\n\ndef checkdep_ps_distiller(s):\n if not s:\n return False\n\n flag = True\n gs_req = '8.60'\n gs_exec, gs_v = checkdep_ghostscript()\n if not compare_versions(gs_v, gs_req):\n flag = False\n warnings.warn(('matplotlibrc ps.usedistiller option can not be used '\n 'unless ghostscript-%s or later is installed on your '\n 'system') % gs_req)\n\n if s == 'xpdf':\n pdftops_req = '3.0'\n pdftops_req_alt = '0.9' # poppler version numbers, ugh\n pdftops_v = checkdep_pdftops()\n if compare_versions(pdftops_v, pdftops_req):\n pass\n elif (compare_versions(pdftops_v, pdftops_req_alt) and not\n compare_versions(pdftops_v, '1.0')):\n pass\n else:\n flag = False\n warnings.warn(('matplotlibrc ps.usedistiller can not be set to '\n 'xpdf unless xpdf-%s or later is installed on '\n 'your system') % pdftops_req)\n\n if flag:\n return s\n else:\n return False\n\n\ndef checkdep_usetex(s):\n if not s:\n return False\n\n gs_req = '8.60'\n dvipng_req = '1.6'\n flag = True\n\n if shutil.which(\"tex\") is None:\n flag = False\n warnings.warn('matplotlibrc text.usetex option can not be used unless '\n 'TeX is installed on your system')\n\n dvipng_v = checkdep_dvipng()\n if not compare_versions(dvipng_v, dvipng_req):\n flag = False\n warnings.warn('matplotlibrc text.usetex can not be used with *Agg '\n 'backend unless dvipng-%s or later is installed on '\n 'your system' % dvipng_req)\n\n gs_exec, gs_v = checkdep_ghostscript()\n if not compare_versions(gs_v, gs_req):\n flag = False\n warnings.warn('matplotlibrc text.usetex can not be used unless '\n 'ghostscript-%s or later is installed on your system'\n % gs_req)\n\n return flag\n\n\n@_logged_cached('$HOME=%s')\ndef get_home():\n \"\"\"\n Return the user's home directory.\n\n If the user's home directory cannot be found, return None.\n \"\"\"\n try:\n return str(Path.home())\n except Exception:\n return None\n\n\ndef _create_tmp_config_dir():\n \"\"\"\n If the config directory can not be created, create a temporary directory.\n \"\"\"\n configdir = os.environ['MPLCONFIGDIR'] = (\n tempfile.mkdtemp(prefix='matplotlib-'))\n atexit.register(shutil.rmtree, configdir)\n return configdir\n\n\ndef _get_xdg_config_dir():\n \"\"\"\n Returns the XDG configuration directory, according to the `XDG\n base directory spec\n <http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.\n \"\"\"\n return (os.environ.get('XDG_CONFIG_HOME')\n or (str(Path(get_home(), \".config\"))\n if get_home()\n else None))\n\n\ndef _get_xdg_cache_dir():\n \"\"\"\n Returns the XDG cache directory, according to the `XDG\n base directory spec\n <http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.\n \"\"\"\n return (os.environ.get('XDG_CACHE_HOME')\n or (str(Path(get_home(), \".cache\"))\n if get_home()\n else None))\n\n\ndef _get_config_or_cache_dir(xdg_base):\n configdir = os.environ.get('MPLCONFIGDIR')\n if configdir:\n configdir = Path(configdir).resolve()\n elif sys.platform.startswith(('linux', 'freebsd')) and xdg_base:\n configdir = Path(xdg_base, \"matplotlib\")\n elif get_home():\n configdir = Path(get_home(), \".matplotlib\")\n else:\n configdir = None\n\n if configdir:\n try:\n configdir.mkdir(parents=True, exist_ok=True)\n except OSError:\n pass\n else:\n if os.access(str(configdir), os.W_OK) and configdir.is_dir():\n return str(configdir)\n\n return _create_tmp_config_dir()\n\n\n@_logged_cached('CONFIGDIR=%s')\ndef get_configdir():\n \"\"\"\n Return the string representing the configuration directory.\n\n The directory is chosen as follows:\n\n 1. If the MPLCONFIGDIR environment variable is supplied, choose that.\n 2a. On Linux, follow the XDG specification and look first in\n `$XDG_CONFIG_HOME`, if defined, or `$HOME/.config`.\n 2b. On other platforms, choose `$HOME/.matplotlib`.\n 3. If the chosen directory exists and is writable, use that as the\n configuration directory.\n 4. If possible, create a temporary directory, and use it as the\n configuration directory.\n 5. A writable directory could not be found or created; return None.\n \"\"\"\n return _get_config_or_cache_dir(_get_xdg_config_dir())\n\n\n@_logged_cached('CACHEDIR=%s')\ndef get_cachedir():\n \"\"\"\n Return the location of the cache directory.\n\n The procedure used to find the directory is the same as for\n _get_config_dir, except using `$XDG_CACHE_HOME`/`~/.cache` instead.\n \"\"\"\n return _get_config_or_cache_dir(_get_xdg_cache_dir())\n\n\ndef _get_data_path():\n 'get the path to matplotlib data'\n\n if 'MATPLOTLIBDATA' in os.environ:\n path = os.environ['MATPLOTLIBDATA']\n if not os.path.isdir(path):\n raise RuntimeError('Path in environment MATPLOTLIBDATA not a '\n 'directory')\n return path\n\n def get_candidate_paths():\n yield Path(__file__).with_name('mpl-data')\n # setuptools' namespace_packages may highjack this init file\n # so need to try something known to be in Matplotlib, not basemap.\n import matplotlib.afm\n yield Path(matplotlib.afm.__file__).with_name('mpl-data')\n # py2exe zips pure python, so still need special check.\n if getattr(sys, 'frozen', None):\n yield Path(sys.executable).with_name('mpl-data')\n # Try again assuming we need to step up one more directory.\n yield Path(sys.executable).parent.with_name('mpl-data')\n # Try again assuming sys.path[0] is a dir not a exe.\n yield Path(sys.path[0]) / 'mpl-data'\n\n for path in get_candidate_paths():\n if path.is_dir():\n return str(path)\n\n raise RuntimeError('Could not find the matplotlib data files')\n\n\n@_logged_cached('matplotlib data path: %s')\ndef get_data_path():\n if defaultParams['datapath'][0] is None:\n defaultParams['datapath'][0] = _get_data_path()\n return defaultParams['datapath'][0]\n\n\ndef get_py2exe_datafiles():\n data_path = Path(get_data_path())\n d = {}\n for path in filter(Path.is_file, data_path.glob(\"**/*\")):\n (d.setdefault(str(path.parent.relative_to(data_path.parent)), [])\n .append(str(path)))\n return list(d.items())\n\n\ndef matplotlib_fname():\n \"\"\"\n Get the location of the config file.\n\n The file location is determined in the following order\n\n - `$PWD/matplotlibrc`\n\n - `$MATPLOTLIBRC` if it is a file (or a named pipe, which can be created\n e.g. by process substitution)\n\n - `$MATPLOTLIBRC/matplotlibrc`\n\n - `$MPLCONFIGDIR/matplotlibrc`\n\n - On Linux,\n\n - `$XDG_CONFIG_HOME/matplotlib/matplotlibrc` (if\n $XDG_CONFIG_HOME is defined)\n\n - or `$HOME/.config/matplotlib/matplotlibrc` (if\n $XDG_CONFIG_HOME is not defined)\n\n - On other platforms,\n\n - `$HOME/.matplotlib/matplotlibrc` if `$HOME` is defined.\n\n - Lastly, it looks in `$MATPLOTLIBDATA/matplotlibrc` for a\n system-defined copy.\n \"\"\"\n\n def gen_candidates():\n yield os.path.join(os.getcwd(), 'matplotlibrc')\n try:\n matplotlibrc = os.environ['MATPLOTLIBRC']\n except KeyError:\n pass\n else:\n yield matplotlibrc\n yield os.path.join(matplotlibrc, 'matplotlibrc')\n yield os.path.join(get_configdir(), 'matplotlibrc')\n yield os.path.join(get_data_path(), 'matplotlibrc')\n\n for fname in gen_candidates():\n if os.path.exists(fname):\n st_mode = os.stat(fname).st_mode\n if stat.S_ISREG(st_mode) or stat.S_ISFIFO(st_mode):\n break\n # Return first candidate that is a file, or last candidate if none is\n # valid (in that case, a warning is raised at startup by `rc_params`).\n return fname\n\n\n# rcParams deprecated and automatically mapped to another key.\n# Values are tuples of (version, new_name, f_old2new, f_new2old).\n_deprecated_map = {}\n\n# rcParams deprecated; some can manually be mapped to another key.\n# Values are tuples of (version, new_name_or_None).\n_deprecated_ignore_map = {\n 'text.dvipnghack': ('2.1', None),\n 'nbagg.transparent': ('2.2', 'figure.facecolor'),\n 'plugins.directory': ('2.2', None),\n 'pgf.debug': ('3.0', None),\n}\n\n# rcParams deprecated; can use None to suppress warnings; remain actually\n# listed in the rcParams (not included in _all_deprecated).\n# Values are typles of (version,)\n_deprecated_remain_as_none = {\n 'axes.hold': ('2.1',),\n 'backend.qt4': ('2.2',),\n 'backend.qt5': ('2.2',),\n 'text.latex.unicode': ('3.0',),\n}\n\n\n_all_deprecated = {*_deprecated_map, *_deprecated_ignore_map}\n\n\nclass RcParams(MutableMapping, dict):\n\n \"\"\"\n A dictionary object including validation\n\n validating functions are defined and associated with rc parameters in\n :mod:`matplotlib.rcsetup`\n \"\"\"\n\n validate = {key: converter\n for key, (default, converter) in defaultParams.items()\n if key not in _all_deprecated}\n\n @property\n @cbook.deprecated(\"3.0\")\n def msg_depr(self):\n return \"%s is deprecated and replaced with %s; please use the latter.\"\n\n @property\n @cbook.deprecated(\"3.0\")\n def msg_depr_ignore(self):\n return \"%s is deprecated and ignored. Use %s instead.\"\n\n @property\n @cbook.deprecated(\"3.0\")\n def msg_depr_set(self):\n return (\"%s is deprecated. Please remove it from your matplotlibrc \"\n \"and/or style files.\")\n\n @property\n @cbook.deprecated(\"3.0\")\n def msg_obsolete(self):\n return (\"%s is obsolete. Please remove it from your matplotlibrc \"\n \"and/or style files.\")\n\n @property\n @cbook.deprecated(\"3.0\")\n def msg_backend_obsolete(self):\n return (\"The {} rcParam was deprecated in version 2.2. In order to \"\n \"force the use of a specific Qt binding, either import that \"\n \"binding first, or set the QT_API environment variable.\")\n\n # validate values on the way in\n def __init__(self, *args, **kwargs):\n self.update(*args, **kwargs)\n\n def __setitem__(self, key, val):\n try:\n if key in _deprecated_map:\n version, alt_key, alt_val, inverse_alt = _deprecated_map[key]\n cbook.warn_deprecated(\n version, key, obj_type=\"rcparam\", alternative=alt_key)\n key = alt_key\n val = alt_val(val)\n elif key in _deprecated_remain_as_none and val is not None:\n version, = _deprecated_remain_as_none[key]\n addendum = ''\n if key.startswith('backend'):\n addendum = (\n \"In order to force the use of a specific Qt binding, \"\n \"either import that binding first, or set the QT_API \"\n \"environment variable.\")\n cbook.warn_deprecated(\n \"2.2\", name=key, obj_type=\"rcparam\", addendum=addendum)\n elif key in _deprecated_ignore_map:\n version, alt_key = _deprecated_ignore_map[key]\n cbook.warn_deprecated(\n version, name=key, obj_type=\"rcparam\", alternative=alt_key)\n return\n elif key == 'examples.directory':\n cbook.warn_deprecated(\n \"3.0\", \"{} is deprecated; in the future, examples will be \"\n \"found relative to the 'datapath' directory.\".format(key))\n elif key == 'backend':\n if val is rcsetup._auto_backend_sentinel:\n if 'backend' in self:\n return\n try:\n cval = self.validate[key](val)\n except ValueError as ve:\n raise ValueError(\"Key %s: %s\" % (key, str(ve)))\n dict.__setitem__(self, key, cval)\n except KeyError:\n raise KeyError(\n '%s is not a valid rc parameter. See rcParams.keys() for a '\n 'list of valid parameters.' % (key,))\n\n def __getitem__(self, key):\n if key in _deprecated_map:\n version, alt_key, alt_val, inverse_alt = _deprecated_map[key]\n cbook.warn_deprecated(\n version, key, obj_type=\"rcparam\", alternative=alt_key)\n return inverse_alt(dict.__getitem__(self, alt_key))\n\n elif key in _deprecated_ignore_map:\n version, alt_key = _deprecated_ignore_map[key]\n cbook.warn_deprecated(\n version, key, obj_type=\"rcparam\", alternative=alt_key)\n return dict.__getitem__(self, alt_key) if alt_key else None\n\n elif key == 'examples.directory':\n cbook.warn_deprecated(\n \"3.0\", \"{} is deprecated; in the future, examples will be \"\n \"found relative to the 'datapath' directory.\".format(key))\n\n elif key == \"backend\":\n val = dict.__getitem__(self, key)\n if val is rcsetup._auto_backend_sentinel:\n from matplotlib import pyplot as plt\n plt.switch_backend(rcsetup._auto_backend_sentinel)\n\n return dict.__getitem__(self, key)\n\n def __repr__(self):\n class_name = self.__class__.__name__\n indent = len(class_name) + 1\n repr_split = pprint.pformat(dict(self), indent=1,\n width=80 - indent).split('\\n')\n repr_indented = ('\\n' + ' ' * indent).join(repr_split)\n return '{}({})'.format(class_name, repr_indented)\n\n def __str__(self):\n return '\\n'.join(map('{0[0]}: {0[1]}'.format, sorted(self.items())))\n\n def __iter__(self):\n \"\"\"Yield sorted list of keys.\"\"\"\n yield from sorted(dict.__iter__(self))\n\n def find_all(self, pattern):\n \"\"\"\n Return the subset of this RcParams dictionary whose keys match,\n using :func:`re.search`, the given ``pattern``.\n\n .. note::\n\n Changes to the returned dictionary are *not* propagated to\n the parent RcParams dictionary.\n\n \"\"\"\n pattern_re = re.compile(pattern)\n return RcParams((key, value)\n for key, value in self.items()\n if pattern_re.search(key))\n\n\ndef rc_params(fail_on_error=False):\n \"\"\"Return a :class:`matplotlib.RcParams` instance from the\n default matplotlib rc file.\n \"\"\"\n fname = matplotlib_fname()\n if not os.path.exists(fname):\n # this should never happen, default in mpl-data should always be found\n message = 'could not find rc file; returning defaults'\n ret = RcParams([(key, default) for key, (default, _) in\n defaultParams.items()\n if key not in _all_deprecated])\n warnings.warn(message)\n return ret\n\n return rc_params_from_file(fname, fail_on_error)\n\n\nURL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\\\')\n\n\ndef is_url(filename):\n \"\"\"Return True if string is an http, ftp, or file URL path.\"\"\"\n return URL_REGEX.match(filename) is not None\n\n\[email protected]\ndef _open_file_or_url(fname):\n if is_url(fname):\n with urllib.request.urlopen(fname) as f:\n yield (line.decode('utf-8') for line in f)\n else:\n fname = os.path.expanduser(fname)\n encoding = locale.getpreferredencoding(do_setlocale=False)\n if encoding is None:\n encoding = \"utf-8\"\n with open(fname, encoding=encoding) as f:\n yield f\n\n\n_error_details_fmt = 'line #%d\\n\\t\"%s\"\\n\\tin file \"%s\"'\n\n\ndef _rc_params_in_file(fname, fail_on_error=False):\n \"\"\"Return :class:`matplotlib.RcParams` from the contents of the given file.\n\n Unlike `rc_params_from_file`, the configuration class only contains the\n parameters specified in the file (i.e. default values are not filled in).\n \"\"\"\n cnt = 0\n rc_temp = {}\n with _open_file_or_url(fname) as fd:\n try:\n for line in fd:\n cnt += 1\n strippedline = line.split('#', 1)[0].strip()\n if not strippedline:\n continue\n tup = strippedline.split(':', 1)\n if len(tup) != 2:\n error_details = _error_details_fmt % (cnt, line, fname)\n warnings.warn('Illegal %s' % error_details)\n continue\n key, val = tup\n key = key.strip()\n val = val.strip()\n if key in rc_temp:\n warnings.warn('Duplicate key in file \"%s\", line #%d' %\n (fname, cnt))\n rc_temp[key] = (val, line, cnt)\n except UnicodeDecodeError:\n warnings.warn(\n ('Cannot decode configuration file %s with '\n 'encoding %s, check LANG and LC_* variables')\n % (fname, locale.getpreferredencoding(do_setlocale=False) or\n 'utf-8 (default)'))\n raise\n\n config = RcParams()\n\n for key in ('verbose.level', 'verbose.fileo'):\n if key in rc_temp:\n val, line, cnt = rc_temp.pop(key)\n if fail_on_error:\n config[key] = val # try to convert to proper type or raise\n else:\n try:\n config[key] = val # try to convert to proper type or skip\n except Exception as msg:\n error_details = _error_details_fmt % (cnt, line, fname)\n warnings.warn('Bad val \"%s\" on %s\\n\\t%s' %\n (val, error_details, msg))\n\n for key, (val, line, cnt) in rc_temp.items():\n if key in defaultParams:\n if fail_on_error:\n config[key] = val # try to convert to proper type or raise\n else:\n try:\n config[key] = val # try to convert to proper type or skip\n except Exception as msg:\n error_details = _error_details_fmt % (cnt, line, fname)\n warnings.warn('Bad val \"%s\" on %s\\n\\t%s' %\n (val, error_details, msg))\n elif key in _deprecated_ignore_map:\n version, alt_key = _deprecated_ignore_map[key]\n cbook.warn_deprecated(\n version, key, alternative=alt_key,\n addendum=\"Please update your matplotlibrc.\")\n else:\n print(\"\"\"\nBad key \"%s\" on line %d in\n%s.\nYou probably need to get an updated matplotlibrc file from\nhttp://github.com/matplotlib/matplotlib/blob/master/matplotlibrc.template\nor from the matplotlib source distribution\"\"\" % (key, cnt, fname),\n file=sys.stderr)\n\n return config\n\n\ndef rc_params_from_file(fname, fail_on_error=False, use_default_template=True):\n \"\"\"Return :class:`matplotlib.RcParams` from the contents of the given file.\n\n Parameters\n ----------\n fname : str\n Name of file parsed for matplotlib settings.\n fail_on_error : bool\n If True, raise an error when the parser fails to convert a parameter.\n use_default_template : bool\n If True, initialize with default parameters before updating with those\n in the given file. If False, the configuration class only contains the\n parameters specified in the file. (Useful for updating dicts.)\n \"\"\"\n config_from_file = _rc_params_in_file(fname, fail_on_error)\n\n if not use_default_template:\n return config_from_file\n\n iter_params = defaultParams.items()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", MatplotlibDeprecationWarning)\n config = RcParams([(key, default) for key, (default, _) in iter_params\n if key not in _all_deprecated])\n config.update(config_from_file)\n\n if config['datapath'] is None:\n config['datapath'] = get_data_path()\n\n if \"\".join(config['text.latex.preamble']):\n _log.info(\"\"\"\n*****************************************************************\nYou have the following UNSUPPORTED LaTeX preamble customizations:\n%s\nPlease do not ask for support with these customizations active.\n*****************************************************************\n\"\"\", '\\n'.join(config['text.latex.preamble']))\n _log.debug('loaded rc file %s', fname)\n\n return config\n\n\n# this is the instance used by the matplotlib classes\nrcParams = rc_params()\n\n# Don't trigger deprecation warning when just fetching.\nif dict.__getitem__(rcParams, 'examples.directory'):\n # paths that are intended to be relative to matplotlib_fname()\n # are allowed for the examples.directory parameter.\n # However, we will need to fully qualify the path because\n # Sphinx requires absolute paths.\n if not os.path.isabs(rcParams['examples.directory']):\n _basedir, _fname = os.path.split(matplotlib_fname())\n # Sometimes matplotlib_fname() can return relative paths,\n # Also, using realpath() guarantees that Sphinx will use\n # the same path that matplotlib sees (in case of weird symlinks).\n _basedir = os.path.realpath(_basedir)\n _fullpath = os.path.join(_basedir, rcParams['examples.directory'])\n rcParams['examples.directory'] = _fullpath\n\n\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", MatplotlibDeprecationWarning)\n rcParamsOrig = RcParams(rcParams.copy())\n rcParamsDefault = RcParams([(key, default) for key, (default, converter) in\n defaultParams.items()\n if key not in _all_deprecated])\n\nrcParams['ps.usedistiller'] = checkdep_ps_distiller(\n rcParams['ps.usedistiller'])\n\nrcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])\n\nif rcParams['axes.formatter.use_locale']:\n locale.setlocale(locale.LC_ALL, '')\n\n\ndef rc(group, **kwargs):\n \"\"\"\n Set the current rc params. *group* is the grouping for the rc, e.g.,\n for ``lines.linewidth`` the group is ``lines``, for\n ``axes.facecolor``, the group is ``axes``, and so on. Group may\n also be a list or tuple of group names, e.g., (*xtick*, *ytick*).\n *kwargs* is a dictionary attribute name/value pairs, e.g.,::\n\n rc('lines', linewidth=2, color='r')\n\n sets the current rc params and is equivalent to::\n\n rcParams['lines.linewidth'] = 2\n rcParams['lines.color'] = 'r'\n\n The following aliases are available to save typing for interactive\n users:\n\n ===== =================\n Alias Property\n ===== =================\n 'lw' 'linewidth'\n 'ls' 'linestyle'\n 'c' 'color'\n 'fc' 'facecolor'\n 'ec' 'edgecolor'\n 'mew' 'markeredgewidth'\n 'aa' 'antialiased'\n ===== =================\n\n Thus you could abbreviate the above rc command as::\n\n rc('lines', lw=2, c='r')\n\n\n Note you can use python's kwargs dictionary facility to store\n dictionaries of default parameters. e.g., you can customize the\n font rc as follows::\n\n font = {'family' : 'monospace',\n 'weight' : 'bold',\n 'size' : 'larger'}\n\n rc('font', **font) # pass in the font dict as kwargs\n\n This enables you to easily switch between several configurations. Use\n ``matplotlib.style.use('default')`` or :func:`~matplotlib.rcdefaults` to\n restore the default rc params after changes.\n \"\"\"\n\n aliases = {\n 'lw': 'linewidth',\n 'ls': 'linestyle',\n 'c': 'color',\n 'fc': 'facecolor',\n 'ec': 'edgecolor',\n 'mew': 'markeredgewidth',\n 'aa': 'antialiased',\n }\n\n if isinstance(group, str):\n group = (group,)\n for g in group:\n for k, v in kwargs.items():\n name = aliases.get(k) or k\n key = '%s.%s' % (g, name)\n try:\n rcParams[key] = v\n except KeyError:\n raise KeyError(('Unrecognized key \"%s\" for group \"%s\" and '\n 'name \"%s\"') % (key, g, name))\n\n\ndef rcdefaults():\n \"\"\"\n Restore the rc params from Matplotlib's internal default style.\n\n Style-blacklisted rc params (defined in\n `matplotlib.style.core.STYLE_BLACKLIST`) are not updated.\n\n See Also\n --------\n rc_file_defaults :\n Restore the rc params from the rc file originally loaded by Matplotlib.\n matplotlib.style.use :\n Use a specific style file. Call ``style.use('default')`` to restore\n the default style.\n \"\"\"\n # Deprecation warnings were already handled when creating rcParamsDefault,\n # no need to reemit them here.\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", mplDeprecation)\n from .style.core import STYLE_BLACKLIST\n rcParams.clear()\n rcParams.update({k: v for k, v in rcParamsDefault.items()\n if k not in STYLE_BLACKLIST})\n\n\ndef rc_file_defaults():\n \"\"\"\n Restore the rc params from the original rc file loaded by Matplotlib.\n\n Style-blacklisted rc params (defined in\n `matplotlib.style.core.STYLE_BLACKLIST`) are not updated.\n \"\"\"\n # Deprecation warnings were already handled when creating rcParamsOrig, no\n # need to reemit them here.\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", mplDeprecation)\n from .style.core import STYLE_BLACKLIST\n rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig\n if k not in STYLE_BLACKLIST})\n\n\ndef rc_file(fname):\n \"\"\"\n Update rc params from file.\n\n Style-blacklisted rc params (defined in\n `matplotlib.style.core.STYLE_BLACKLIST`) are not updated.\n \"\"\"\n # Deprecation warnings were already handled in rc_params_from_file, no need\n # to reemit them here.\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", mplDeprecation)\n from .style.core import STYLE_BLACKLIST\n rc_from_file = rc_params_from_file(fname)\n rcParams.update({k: rc_from_file[k] for k in rc_from_file\n if k not in STYLE_BLACKLIST})\n\n\nclass rc_context:\n \"\"\"\n Return a context manager for managing rc settings.\n\n This allows one to do::\n\n with mpl.rc_context(fname='screen.rc'):\n plt.plot(x, a)\n with mpl.rc_context(fname='print.rc'):\n plt.plot(x, b)\n plt.plot(x, c)\n\n The 'a' vs 'x' and 'c' vs 'x' plots would have settings from\n 'screen.rc', while the 'b' vs 'x' plot would have settings from\n 'print.rc'.\n\n A dictionary can also be passed to the context manager::\n\n with mpl.rc_context(rc={'text.usetex': True}, fname='screen.rc'):\n plt.plot(x, a)\n\n The 'rc' dictionary takes precedence over the settings loaded from\n 'fname'. Passing a dictionary only is also valid. For example a\n common usage is::\n\n with mpl.rc_context(rc={'interactive': False}):\n fig, ax = plt.subplots()\n ax.plot(range(3), range(3))\n fig.savefig('A.png', format='png')\n plt.close(fig)\n \"\"\"\n # While it may seem natural to implement rc_context using\n # contextlib.contextmanager, that would entail always calling the finally:\n # clause of the contextmanager (which restores the original rcs) including\n # during garbage collection; as a result, something like `plt.xkcd();\n # gc.collect()` would result in the style being lost (as `xkcd()` is\n # implemented on top of rc_context, and nothing is holding onto context\n # manager except possibly circular references.\n\n def __init__(self, rc=None, fname=None):\n self._orig = rcParams.copy()\n try:\n if fname:\n rc_file(fname)\n if rc:\n rcParams.update(rc)\n except Exception:\n self.__fallback()\n raise\n\n def __fallback(self):\n # If anything goes wrong, revert to the original rcs.\n updated_backend = self._orig['backend']\n dict.update(rcParams, self._orig)\n # except for the backend. If the context block triggered resloving\n # the auto backend resolution keep that value around\n if self._orig['backend'] is rcsetup._auto_backend_sentinel:\n rcParams['backend'] = updated_backend\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self.__fallback()\n\n\ndef use(arg, warn=True, force=False):\n \"\"\"\n Set the matplotlib backend to one of the known backends.\n\n To find out which backend is currently set, see\n :func:`matplotlib.get_backend`.\n\n\n Parameters\n ----------\n arg : str\n The backend to switch to. This can either be one of the\n 'standard' backend names or a string of the form\n ``module://my.module.name``. This value is case-insensitive.\n\n warn : bool, optional\n If True, warn if this is called after pyplot has been imported\n and a backend is set up.\n\n defaults to True\n\n force : bool, optional\n If True, attempt to switch the backend. This defaults to\n False.\n\n\n \"\"\"\n name = validate_backend(arg)\n\n # if setting back to the same thing, do nothing\n if (dict.__getitem__(rcParams, 'backend') == name):\n pass\n\n # Check if we have already imported pyplot and triggered\n # backend selection, do a bit more work\n elif 'matplotlib.pyplot' in sys.modules:\n # If we are here then the requested is different than the current.\n # If we are going to force the switch, never warn, else, if warn\n # is True, then direct users to `plt.switch_backend`\n if (not force) and warn:\n warnings.warn(\n (\"matplotlib.pyplot as already been imported, \"\n \"this call will have no effect.\"),\n stacklevel=2)\n\n # if we are going to force switching the backend, pull in\n # `switch_backend` from pyplot. This will only happen if\n # pyplot is already imported.\n if force:\n from matplotlib.pyplot import switch_backend\n switch_backend(name)\n # Finally if pyplot is not imported update both rcParams and\n # rcDefaults so restoring the defaults later with rcdefaults\n # won't change the backend. This is a bit of overkill as 'backend'\n # is already in style.core.STYLE_BLACKLIST, but better to be safe.\n else:\n rcParams['backend'] = rcParamsDefault['backend'] = name\n\n\nif os.environ.get('MPLBACKEND'):\n rcParams['backend'] = os.environ.get('MPLBACKEND')\n\n\ndef get_backend():\n \"\"\"Return the name of the current backend.\"\"\"\n return rcParams['backend']\n\n\ndef interactive(b):\n \"\"\"\n Set interactive mode to boolean b.\n\n If b is True, then draw after every plotting command, e.g., after xlabel\n \"\"\"\n rcParams['interactive'] = b\n\n\ndef is_interactive():\n 'Return true if plot mode is interactive'\n return rcParams['interactive']\n\n\ndef tk_window_focus():\n \"\"\"Return true if focus maintenance under TkAgg on win32 is on.\n This currently works only for python.exe and IPython.exe.\n Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on.\"\"\"\n if rcParams['backend'] != 'TkAgg':\n return False\n return rcParams['tk.window_focus']\n\n\ndefault_test_modules = [\n 'matplotlib.tests',\n 'matplotlib.sphinxext.tests',\n 'mpl_toolkits.tests',\n]\n\n\ndef _init_tests():\n # CPython's faulthandler since v3.6 handles exceptions on Windows\n # https://bugs.python.org/issue23848 but until v3.6.4 it was printing\n # non-fatal exceptions https://bugs.python.org/issue30557\n import platform\n if not (sys.platform == 'win32' and\n (3, 6) < sys.version_info < (3, 6, 4) and\n platform.python_implementation() == 'CPython'):\n import faulthandler\n faulthandler.enable()\n\n # The version of FreeType to install locally for running the\n # tests. This must match the value in `setupext.py`\n LOCAL_FREETYPE_VERSION = '2.6.1'\n\n from matplotlib import ft2font\n if (ft2font.__freetype_version__ != LOCAL_FREETYPE_VERSION or\n ft2font.__freetype_build_type__ != 'local'):\n warnings.warn(\n \"Matplotlib is not built with the correct FreeType version to run \"\n \"tests. Set local_freetype=True in setup.cfg and rebuild. \"\n \"Expect many image comparison failures below. \"\n \"Expected freetype version {0}. \"\n \"Found freetype version {1}. \"\n \"Freetype build type is {2}local\".format(\n LOCAL_FREETYPE_VERSION,\n ft2font.__freetype_version__,\n \"\" if ft2font.__freetype_build_type__ == 'local' else \"not \"\n )\n )\n\n try:\n import pytest\n except ImportError:\n print(\"matplotlib.test requires pytest to run.\")\n raise\n\n\ndef test(verbosity=None, coverage=False, switch_backend_warn=True,\n recursionlimit=0, **kwargs):\n \"\"\"run the matplotlib test suite\"\"\"\n _init_tests()\n if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'tests')):\n raise ImportError(\"Matplotlib test data is not installed\")\n\n old_backend = get_backend()\n old_recursionlimit = sys.getrecursionlimit()\n try:\n use('agg')\n if recursionlimit:\n sys.setrecursionlimit(recursionlimit)\n import pytest\n\n args = kwargs.pop('argv', [])\n provide_default_modules = True\n use_pyargs = True\n for arg in args:\n if any(arg.startswith(module_path)\n for module_path in default_test_modules):\n provide_default_modules = False\n break\n if os.path.exists(arg):\n provide_default_modules = False\n use_pyargs = False\n break\n if use_pyargs:\n args += ['--pyargs']\n if provide_default_modules:\n args += default_test_modules\n\n if coverage:\n args += ['--cov']\n\n if verbosity:\n args += ['-' + 'v' * verbosity]\n\n retcode = pytest.main(args, **kwargs)\n finally:\n if old_backend.lower() != 'agg':\n use(old_backend, warn=switch_backend_warn)\n if recursionlimit:\n sys.setrecursionlimit(old_recursionlimit)\n\n return retcode\n\n\ntest.__test__ = False # pytest: this function is not a test\n\n\ndef _replacer(data, key):\n \"\"\"Either returns data[key] or passes data back. Also\n converts input data to a sequence as needed.\n \"\"\"\n # if key isn't a string don't bother\n if not isinstance(key, str):\n return key\n # try to use __getitem__\n try:\n return sanitize_sequence(data[key])\n # key does not exist, silently fall back to key\n except KeyError:\n return key\n\n\n_DATA_DOC_APPENDIX = \"\"\"\n\n.. note::\n In addition to the above described arguments, this function can take a\n **data** keyword argument. If such a **data** argument is given, the\n following arguments are replaced by **data[<arg>]**:\n\n {replaced}\n\n Objects passed as **data** must support item access (``data[<arg>]``) and\n membership test (``<arg> in data``).\n\"\"\"\n\n\ndef _add_data_doc(docstring, replace_names, replace_all_args):\n \"\"\"Add documentation for a *data* field to the given docstring.\n\n Parameters\n ----------\n docstring : str\n The input docstring.\n replace_names : list of strings or None\n The list of parameter names which arguments should be replaced by\n `data[name]`. If None, all arguments are replaced if they are\n included in `data`.\n replace_all_args : bool\n If True, all arguments in *args get replaced, even if they are not\n in replace_names.\n\n Returns\n -------\n The augmented docstring.\n \"\"\"\n if docstring is None:\n docstring = ''\n else:\n docstring = dedent(docstring)\n _repl = \"\"\n if replace_names is None:\n _repl = \"* All positional and all keyword arguments.\"\n else:\n if len(replace_names) != 0:\n _repl = \"* All arguments with the following names: '{names}'.\"\n if replace_all_args:\n _repl += \"\\n * All positional arguments.\"\n _repl = _repl.format(names=\"', '\".join(sorted(replace_names)))\n return docstring + _DATA_DOC_APPENDIX.format(replaced=_repl)\n\n\ndef _preprocess_data(replace_names=None, replace_all_args=False,\n label_namer=None, positional_parameter_names=None):\n \"\"\"\n A decorator to add a 'data' kwarg to any a function. The signature\n of the input function must include the ax argument at the first position ::\n\n def foo(ax, *args, **kwargs)\n\n so this is suitable for use with Axes methods.\n\n Parameters\n ----------\n replace_names : list of strings, optional, default: None\n The list of parameter names which arguments should be replaced by\n `data[name]`. If None, all arguments are replaced if they are\n included in `data`.\n replace_all_args : bool, default: False\n If True, all arguments in *args get replaced, even if they are not\n in replace_names.\n label_namer : string, optional, default: None\n The name of the parameter which argument should be used as label, if\n label is not set. If None, the label keyword argument is not set.\n positional_parameter_names : list of strings or callable, optional\n The full list of positional parameter names (excluding an explicit\n `ax`/'self' argument at the first place and including all possible\n positional parameter in `*args`), in the right order. Can also include\n all other keyword parameter. Only needed if the wrapped function does\n contain `*args` and (replace_names is not None or replace_all_args is\n False). If it is a callable, it will be called with the actual\n tuple of *args and the data and should return a list like\n above.\n NOTE: callables should only be used when the names and order of *args\n can only be determined at runtime. Please use list of names\n when the order and names of *args is clear before runtime!\n\n .. note:: decorator also converts MappingView input data to list.\n \"\"\"\n if replace_names is not None:\n replace_names = set(replace_names)\n\n def param(func):\n sig = inspect.signature(func)\n _has_varargs = False\n _has_varkwargs = False\n _arg_names = []\n params = list(sig.parameters.values())\n for p in params:\n if p.kind is Parameter.VAR_POSITIONAL:\n _has_varargs = True\n elif p.kind is Parameter.VAR_KEYWORD:\n _has_varkwargs = True\n else:\n _arg_names.append(p.name)\n data_param = Parameter('data', Parameter.KEYWORD_ONLY, default=None)\n if _has_varkwargs:\n params.insert(-1, data_param)\n else:\n params.append(data_param)\n new_sig = sig.replace(parameters=params)\n # Import-time check: do we have enough information to replace *args?\n arg_names_at_runtime = False\n # there can't be any positional arguments behind *args and no\n # positional args can end up in **kwargs, so only *varargs make\n # problems.\n # http://stupidpythonideas.blogspot.de/2013/08/arguments-and-parameters.html\n if not _has_varargs:\n # all args are \"named\", so no problem\n # remove the first \"ax\" / self arg\n arg_names = _arg_names[1:]\n else:\n # Here we have \"unnamed\" variables and we need a way to determine\n # whether to replace a arg or not\n if replace_names is None:\n # all argnames should be replaced\n arg_names = None\n elif len(replace_names) == 0:\n # No argnames should be replaced\n arg_names = []\n elif len(_arg_names) > 1 and (positional_parameter_names is None):\n # we got no manual parameter names but more than an 'ax' ...\n if len(replace_names - set(_arg_names[1:])) == 0:\n # all to be replaced arguments are in the list\n arg_names = _arg_names[1:]\n else:\n raise AssertionError(\n \"Got unknown 'replace_names' and wrapped function \"\n \"{!r} uses '*args', need 'positional_parameter_names'\"\n .format(func.__name__))\n else:\n if positional_parameter_names is not None:\n if callable(positional_parameter_names):\n # determined by the function at runtime\n arg_names_at_runtime = True\n # so that we don't compute the label_pos at import time\n arg_names = []\n else:\n arg_names = positional_parameter_names\n else:\n if replace_all_args:\n arg_names = []\n else:\n raise AssertionError(\n \"Got 'replace_names' and wrapped function {!r} \"\n \"uses *args, need 'positional_parameter_names' or \"\n \"'replace_all_args'\".format(func.__name__))\n\n # compute the possible label_namer and label position in positional\n # arguments\n label_pos = 9999 # bigger than all \"possible\" argument lists\n label_namer_pos = 9999 # bigger than all \"possible\" argument lists\n if (label_namer and # we actually want a label here ...\n arg_names and # and we can determine a label in *args ...\n label_namer in arg_names): # and it is in *args\n label_namer_pos = arg_names.index(label_namer)\n if \"label\" in arg_names:\n label_pos = arg_names.index(\"label\")\n\n # Check the case we know a label_namer but we can't find it the\n # arg_names... Unfortunately the label_namer can be in **kwargs,\n # which we can't detect here and which results in a non-set label\n # which might surprise the user :-(\n if label_namer and not arg_names_at_runtime and not _has_varkwargs:\n if not arg_names:\n raise AssertionError(\n \"label_namer {!r} can't be found as the parameter without \"\n \"'positional_parameter_names'\".format(label_namer))\n elif label_namer not in arg_names:\n raise AssertionError(\n \"label_namer {!r} can't be found in the parameter names \"\n \"(known argnames: %s).\".format(label_namer, arg_names))\n else:\n # this is the case when the name is in arg_names\n pass\n\n @functools.wraps(func)\n def inner(ax, *args, data=None, **kwargs):\n # this is needed because we want to change these values if\n # arg_names_at_runtime==True, but python does not allow assigning\n # to a variable in a outer scope. So use some new local ones and\n # set them to the already computed values.\n _label_pos = label_pos\n _label_namer_pos = label_namer_pos\n _arg_names = arg_names\n\n label = None\n\n if data is None: # data validation\n args = tuple(sanitize_sequence(a) for a in args)\n else:\n if arg_names_at_runtime:\n # update the information about replace names and\n # label position\n _arg_names = positional_parameter_names(args, data)\n if (label_namer and # we actually want a label here ...\n _arg_names and # and we can find a label in *args\n (label_namer in _arg_names)): # and it is in *args\n _label_namer_pos = _arg_names.index(label_namer)\n if \"label\" in _arg_names:\n _label_pos = arg_names.index(\"label\")\n\n # save the current label_namer value so that it can be used as\n # a label\n if _label_namer_pos < len(args):\n label = args[_label_namer_pos]\n else:\n label = kwargs.get(label_namer, None)\n # ensure a string, as label can't be anything else\n if not isinstance(label, str):\n label = None\n\n if (replace_names is None) or (replace_all_args is True):\n # all should be replaced\n args = tuple(_replacer(data, a) for\n j, a in enumerate(args))\n else:\n # An arg is replaced if the arg_name of that position is\n # in replace_names ...\n if len(_arg_names) < len(args):\n raise RuntimeError(\n \"Got more args than function expects\")\n args = tuple(_replacer(data, a)\n if _arg_names[j] in replace_names else a\n for j, a in enumerate(args))\n\n if replace_names is None:\n # replace all kwargs ...\n kwargs = {k: _replacer(data, v) for k, v in kwargs.items()}\n else:\n # ... or only if a kwarg of that name is in replace_names\n kwargs = {\n k: _replacer(data, v) if k in replace_names else v\n for k, v in kwargs.items()}\n\n # replace the label if this func \"wants\" a label arg and the user\n # didn't set one. Note: if the user puts in \"label=None\", it does\n # *NOT* get replaced!\n user_supplied_label = (\n len(args) >= _label_pos or # label is included in args\n 'label' in kwargs # ... or in kwargs\n )\n if label_namer and not user_supplied_label:\n if _label_namer_pos < len(args):\n kwargs['label'] = get_label(args[_label_namer_pos], label)\n elif label_namer in kwargs:\n kwargs['label'] = get_label(kwargs[label_namer], label)\n else:\n warnings.warn(\n \"Tried to set a label via parameter %r in func %r but \"\n \"couldn't find such an argument.\\n\"\n \"(This is a programming error, please report to \"\n \"the Matplotlib list!)\" % (label_namer, func.__name__),\n RuntimeWarning, stacklevel=2)\n return func(ax, *args, **kwargs)\n\n inner.__doc__ = _add_data_doc(inner.__doc__,\n replace_names, replace_all_args)\n inner.__signature__ = new_sig\n return inner\n\n return param\n\n_log.debug('matplotlib version %s', __version__)\n_log.debug('interactive is %s', is_interactive())\n_log.debug('platform is %s', sys.platform)\n_log.debug('loaded modules: %s', list(sys.modules))\n", "# art3d.py, original mplot3d version by John Porter\n# Parts rewritten by Reinier Heeres <[email protected]>\n# Minor additions by Ben Axelrod <[email protected]>\n\n\"\"\"\nModule containing 3D artist code and functions to convert 2D\nartists into 3D versions which can be added to an Axes3D.\n\"\"\"\n\nimport math\n\nimport numpy as np\n\nfrom matplotlib import (\n artist, cbook, colors as mcolors, lines, text as mtext, path as mpath)\nfrom matplotlib.collections import (\n Collection, LineCollection, PolyCollection, PatchCollection,\n PathCollection)\nfrom matplotlib.colors import Normalize\nfrom matplotlib.patches import Patch\nfrom . import proj3d\n\n\ndef norm_angle(a):\n \"\"\"Return the given angle normalized to -180 < *a* <= 180 degrees.\"\"\"\n a = (a + 360) % 360\n if a > 180:\n a = a - 360\n return a\n\n\ndef norm_text_angle(a):\n \"\"\"Return the given angle normalized to -90 < *a* <= 90 degrees.\"\"\"\n a = (a + 180) % 180\n if a > 90:\n a = a - 180\n return a\n\n\ndef get_dir_vector(zdir):\n \"\"\"\n Return a direction vector.\n\n Parameters\n ----------\n zdir : {'x', 'y', 'z', None, 3-tuple}\n The direction. Possible values are:\n - 'x': equivalent to (1, 0, 0)\n - 'y': euqivalent to (0, 1, 0)\n - 'z': equivalent to (0, 0, 1)\n - *None*: euqivalent to (0, 0, 0)\n - an iterable (x, y, z) is returned unchanged.\n\n Returns\n -------\n x, y, z : array-like\n The direction vector. This is either a numpy.array or *zdir* itself if\n *zdir* is already a length-3 iterable.\n\n \"\"\"\n if zdir == 'x':\n return np.array((1, 0, 0))\n elif zdir == 'y':\n return np.array((0, 1, 0))\n elif zdir == 'z':\n return np.array((0, 0, 1))\n elif zdir is None:\n return np.array((0, 0, 0))\n elif cbook.iterable(zdir) and len(zdir) == 3:\n return zdir\n else:\n raise ValueError(\"'x', 'y', 'z', None or vector of length 3 expected\")\n\n\nclass Text3D(mtext.Text):\n \"\"\"\n Text object with 3D position and direction.\n\n Parameters\n ----------\n x, y, z\n The position of the text.\n text : str\n The text string to display.\n zdir : {'x', 'y', 'z', None, 3-tuple}\n The direction of the text. See `.get_dir_vector` for a description of\n the values.\n\n Other Parameters\n ----------------\n **kwargs\n All other parameters are passed on to `~matplotlib.text.Text`.\n \"\"\"\n\n def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):\n mtext.Text.__init__(self, x, y, text, **kwargs)\n self.set_3d_properties(z, zdir)\n\n def set_3d_properties(self, z=0, zdir='z'):\n x, y = self.get_position()\n self._position3d = np.array((x, y, z))\n self._dir_vec = get_dir_vector(zdir)\n self.stale = True\n\n @artist.allow_rasterization\n def draw(self, renderer):\n proj = proj3d.proj_trans_points(\n [self._position3d, self._position3d + self._dir_vec], renderer.M)\n dx = proj[0][1] - proj[0][0]\n dy = proj[1][1] - proj[1][0]\n if dx==0. and dy==0.:\n # atan2 raises ValueError: math domain error on 0,0\n angle = 0.\n else:\n angle = math.degrees(math.atan2(dy, dx))\n self.set_position((proj[0][0], proj[1][0]))\n self.set_rotation(norm_text_angle(angle))\n mtext.Text.draw(self, renderer)\n self.stale = False\n\n\ndef text_2d_to_3d(obj, z=0, zdir='z'):\n \"\"\"Convert a Text to a Text3D object.\"\"\"\n obj.__class__ = Text3D\n obj.set_3d_properties(z, zdir)\n\n\nclass Line3D(lines.Line2D):\n \"\"\"\n 3D line object.\n \"\"\"\n\n def __init__(self, xs, ys, zs, *args, **kwargs):\n \"\"\"\n Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.\n \"\"\"\n lines.Line2D.__init__(self, [], [], *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def set_3d_properties(self, zs=0, zdir='z'):\n xs = self.get_xdata()\n ys = self.get_ydata()\n\n try:\n # If *zs* is a list or array, then this will fail and\n # just proceed to juggle_axes().\n zs = float(zs)\n zs = [zs for x in xs]\n except TypeError:\n pass\n self._verts3d = juggle_axes(xs, ys, zs, zdir)\n self.stale = True\n\n @artist.allow_rasterization\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_data(xs, ys)\n lines.Line2D.draw(self, renderer)\n self.stale = False\n\n\ndef line_2d_to_3d(line, zs=0, zdir='z'):\n \"\"\"Convert a 2D line to 3D.\"\"\"\n\n line.__class__ = Line3D\n line.set_3d_properties(zs, zdir)\n\n\ndef path_to_3d_segment(path, zs=0, zdir='z'):\n \"\"\"Convert a path to a 3D segment.\"\"\"\n\n zs = np.broadcast_to(zs, len(path))\n pathsegs = path.iter_segments(simplify=False, curves=False)\n seg = [(x, y, z) for (((x, y), code), z) in zip(pathsegs, zs)]\n seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]\n return seg3d\n\n\ndef paths_to_3d_segments(paths, zs=0, zdir='z'):\n \"\"\"Convert paths from a collection object to 3D segments.\"\"\"\n\n zs = np.broadcast_to(zs, len(paths))\n segs = [path_to_3d_segment(path, pathz, zdir)\n for path, pathz in zip(paths, zs)]\n return segs\n\n\ndef path_to_3d_segment_with_codes(path, zs=0, zdir='z'):\n \"\"\"Convert a path to a 3D segment with path codes.\"\"\"\n\n zs = np.broadcast_to(zs, len(path))\n seg = []\n codes = []\n pathsegs = path.iter_segments(simplify=False, curves=False)\n for (((x, y), code), z) in zip(pathsegs, zs):\n seg.append((x, y, z))\n codes.append(code)\n seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]\n return seg3d, codes\n\n\ndef paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):\n \"\"\"\n Convert paths from a collection object to 3D segments with path codes.\n \"\"\"\n\n zs = np.broadcast_to(zs, len(paths))\n segments = []\n codes_list = []\n for path, pathz in zip(paths, zs):\n segs, codes = path_to_3d_segment_with_codes(path, pathz, zdir)\n segments.append(segs)\n codes_list.append(codes)\n return segments, codes_list\n\n\nclass Line3DCollection(LineCollection):\n \"\"\"\n A collection of 3D lines.\n \"\"\"\n\n def set_sort_zpos(self, val):\n \"\"\"Set the position to use for z-sorting.\"\"\"\n self._sort_zpos = val\n self.stale = True\n\n def set_segments(self, segments):\n \"\"\"\n Set 3D segments.\n \"\"\"\n self._segments3d = np.asanyarray(segments)\n LineCollection.set_segments(self, [])\n\n def do_3d_projection(self, renderer):\n \"\"\"\n Project the points according to renderer matrix.\n \"\"\"\n xyslist = [\n proj3d.proj_trans_points(points, renderer.M) for points in\n self._segments3d]\n segments_2d = [np.column_stack([xs, ys]) for xs, ys, zs in xyslist]\n LineCollection.set_segments(self, segments_2d)\n\n # FIXME\n minz = 1e9\n for xs, ys, zs in xyslist:\n minz = min(minz, min(zs))\n return minz\n\n @artist.allow_rasterization\n def draw(self, renderer, project=False):\n if project:\n self.do_3d_projection(renderer)\n LineCollection.draw(self, renderer)\n\n\ndef line_collection_2d_to_3d(col, zs=0, zdir='z'):\n \"\"\"Convert a LineCollection to a Line3DCollection object.\"\"\"\n segments3d = paths_to_3d_segments(col.get_paths(), zs, zdir)\n col.__class__ = Line3DCollection\n col.set_segments(segments3d)\n\n\nclass Patch3D(Patch):\n \"\"\"\n 3D patch object.\n \"\"\"\n\n def __init__(self, *args, zs=(), zdir='z', **kwargs):\n Patch.__init__(self, *args, **kwargs)\n self.set_3d_properties(zs, zdir)\n\n def set_3d_properties(self, verts, zs=0, zdir='z'):\n zs = np.broadcast_to(zs, len(verts))\n self._segment3d = [juggle_axes(x, y, z, zdir)\n for ((x, y), z) in zip(verts, zs)]\n self._facecolor3d = Patch.get_facecolor(self)\n\n def get_path(self):\n return self._path2d\n\n def get_facecolor(self):\n return self._facecolor2d\n\n def do_3d_projection(self, renderer):\n s = self._segment3d\n xs, ys, zs = zip(*s)\n vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)\n self._path2d = mpath.Path(np.column_stack([vxs, vys]))\n # FIXME: coloring\n self._facecolor2d = self._facecolor3d\n return min(vzs)\n\n\nclass PathPatch3D(Patch3D):\n \"\"\"\n 3D PathPatch object.\n \"\"\"\n\n def __init__(self, path, *, zs=(), zdir='z', **kwargs):\n Patch.__init__(self, **kwargs)\n self.set_3d_properties(path, zs, zdir)\n\n def set_3d_properties(self, path, zs=0, zdir='z'):\n Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)\n self._code3d = path.codes\n\n def do_3d_projection(self, renderer):\n s = self._segment3d\n xs, ys, zs = zip(*s)\n vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)\n self._path2d = mpath.Path(np.column_stack([vxs, vys]), self._code3d)\n # FIXME: coloring\n self._facecolor2d = self._facecolor3d\n return min(vzs)\n\n\ndef get_patch_verts(patch):\n \"\"\"Return a list of vertices for the path of a patch.\"\"\"\n trans = patch.get_patch_transform()\n path = patch.get_path()\n polygons = path.to_polygons(trans)\n if len(polygons):\n return polygons[0]\n else:\n return []\n\n\ndef patch_2d_to_3d(patch, z=0, zdir='z'):\n \"\"\"Convert a Patch to a Patch3D object.\"\"\"\n verts = get_patch_verts(patch)\n patch.__class__ = Patch3D\n patch.set_3d_properties(verts, z, zdir)\n\n\ndef pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):\n \"\"\"Convert a PathPatch to a PathPatch3D object.\"\"\"\n path = pathpatch.get_path()\n trans = pathpatch.get_patch_transform()\n\n mpath = trans.transform_path(path)\n pathpatch.__class__ = PathPatch3D\n pathpatch.set_3d_properties(mpath, z, zdir)\n\n\nclass Patch3DCollection(PatchCollection):\n \"\"\"\n A collection of 3D patches.\n \"\"\"\n\n def __init__(self, *args, zs=0, zdir='z', depthshade=True, **kwargs):\n \"\"\"\n Create a collection of flat 3D patches with its normal vector\n pointed in *zdir* direction, and located at *zs* on the *zdir*\n axis. 'zs' can be a scalar or an array-like of the same length as\n the number of patches in the collection.\n\n Constructor arguments are the same as for\n :class:`~matplotlib.collections.PatchCollection`. In addition,\n keywords *zs=0* and *zdir='z'* are available.\n\n Also, the keyword argument \"depthshade\" is available to\n indicate whether or not to shade the patches in order to\n give the appearance of depth (default is *True*).\n This is typically desired in scatter plots.\n \"\"\"\n self._depthshade = depthshade\n super().__init__(*args, **kwargs)\n self.set_3d_properties(zs, zdir)\n\n def set_sort_zpos(self, val):\n \"\"\"Set the position to use for z-sorting.\"\"\"\n self._sort_zpos = val\n self.stale = True\n\n def set_3d_properties(self, zs, zdir):\n # Force the collection to initialize the face and edgecolors\n # just in case it is a scalarmappable with a colormap.\n self.update_scalarmappable()\n offsets = self.get_offsets()\n if len(offsets) > 0:\n xs, ys = offsets.T\n else:\n xs = []\n ys = []\n self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)\n self._facecolor3d = self.get_facecolor()\n self._edgecolor3d = self.get_edgecolor()\n self.stale = True\n\n def do_3d_projection(self, renderer):\n xs, ys, zs = self._offsets3d\n vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)\n\n fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else\n self._facecolor3d)\n fcs = mcolors.to_rgba_array(fcs, self._alpha)\n self.set_facecolors(fcs)\n\n ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else\n self._edgecolor3d)\n ecs = mcolors.to_rgba_array(ecs, self._alpha)\n self.set_edgecolors(ecs)\n PatchCollection.set_offsets(self, np.column_stack([vxs, vys]))\n\n if vzs.size > 0:\n return min(vzs)\n else:\n return np.nan\n\n\nclass Path3DCollection(PathCollection):\n \"\"\"\n A collection of 3D paths.\n \"\"\"\n\n def __init__(self, *args, zs=0, zdir='z', depthshade=True, **kwargs):\n \"\"\"\n Create a collection of flat 3D paths with its normal vector\n pointed in *zdir* direction, and located at *zs* on the *zdir*\n axis. 'zs' can be a scalar or an array-like of the same length as\n the number of paths in the collection.\n\n Constructor arguments are the same as for\n :class:`~matplotlib.collections.PathCollection`. In addition,\n keywords *zs=0* and *zdir='z'* are available.\n\n Also, the keyword argument \"depthshade\" is available to\n indicate whether or not to shade the patches in order to\n give the appearance of depth (default is *True*).\n This is typically desired in scatter plots.\n \"\"\"\n self._depthshade = depthshade\n super().__init__(*args, **kwargs)\n self.set_3d_properties(zs, zdir)\n\n def set_sort_zpos(self, val):\n \"\"\"Set the position to use for z-sorting.\"\"\"\n self._sort_zpos = val\n self.stale = True\n\n def set_3d_properties(self, zs, zdir):\n # Force the collection to initialize the face and edgecolors\n # just in case it is a scalarmappable with a colormap.\n self.update_scalarmappable()\n offsets = self.get_offsets()\n if len(offsets) > 0:\n xs, ys = offsets.T\n else:\n xs = []\n ys = []\n self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)\n self._facecolor3d = self.get_facecolor()\n self._edgecolor3d = self.get_edgecolor()\n self.stale = True\n\n def do_3d_projection(self, renderer):\n xs, ys, zs = self._offsets3d\n vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)\n\n fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else\n self._facecolor3d)\n fcs = mcolors.to_rgba_array(fcs, self._alpha)\n self.set_facecolors(fcs)\n\n ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else\n self._edgecolor3d)\n ecs = mcolors.to_rgba_array(ecs, self._alpha)\n self.set_edgecolors(ecs)\n PathCollection.set_offsets(self, np.column_stack([vxs, vys]))\n\n if vzs.size > 0 :\n return min(vzs)\n else :\n return np.nan\n\n\ndef patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):\n \"\"\"\n Convert a :class:`~matplotlib.collections.PatchCollection` into a\n :class:`Patch3DCollection` object\n (or a :class:`~matplotlib.collections.PathCollection` into a\n :class:`Path3DCollection` object).\n\n Parameters\n ----------\n za\n The location or locations to place the patches in the collection along\n the *zdir* axis. Default: 0.\n zdir\n The axis in which to place the patches. Default: \"z\".\n depthshade\n Whether to shade the patches to give a sense of depth. Default: *True*.\n\n \"\"\"\n if isinstance(col, PathCollection):\n col.__class__ = Path3DCollection\n elif isinstance(col, PatchCollection):\n col.__class__ = Patch3DCollection\n col._depthshade = depthshade\n col.set_3d_properties(zs, zdir)\n\n\nclass Poly3DCollection(PolyCollection):\n \"\"\"\n A collection of 3D polygons.\n \"\"\"\n\n def __init__(self, verts, *args, zsort=True, **kwargs):\n \"\"\"\n Create a Poly3DCollection.\n\n *verts* should contain 3D coordinates.\n\n Keyword arguments:\n zsort, see set_zsort for options.\n\n Note that this class does a bit of magic with the _facecolors\n and _edgecolors properties.\n \"\"\"\n super().__init__(verts, *args, **kwargs)\n self.set_zsort(zsort)\n self._codes3d = None\n\n _zsort_functions = {\n 'average': np.average,\n 'min': np.min,\n 'max': np.max,\n }\n\n def set_zsort(self, zsort):\n \"\"\"\n Sets the calculation method for the z-order.\n\n Parameters\n ----------\n zsort : bool or {'average', 'min', 'max'}\n For 'average', 'min', 'max' the z-order is determined by applying\n the function to the z-coordinates of the vertices in the viewer's\n coordinate system. *True* is equivalent to 'average'.\n \"\"\"\n\n if zsort is True:\n zsort = 'average'\n\n if zsort is not False:\n if zsort in self._zsort_functions:\n zsortfunc = self._zsort_functions[zsort]\n else:\n return False\n else:\n zsortfunc = None\n\n self._zsort = zsort\n self._sort_zpos = None\n self._zsortfunc = zsortfunc\n self.stale = True\n\n def get_vector(self, segments3d):\n \"\"\"Optimize points for projection.\"\"\"\n si = 0\n ei = 0\n segis = []\n points = []\n for p in segments3d:\n points.extend(p)\n ei = si + len(p)\n segis.append((si, ei))\n si = ei\n\n if len(segments3d):\n xs, ys, zs = zip(*points)\n else :\n # We need this so that we can skip the bad unpacking from zip()\n xs, ys, zs = [], [], []\n\n ones = np.ones(len(xs))\n self._vec = np.array([xs, ys, zs, ones])\n self._segis = segis\n\n def set_verts(self, verts, closed=True):\n \"\"\"Set 3D vertices.\"\"\"\n self.get_vector(verts)\n # 2D verts will be updated at draw time\n PolyCollection.set_verts(self, [], False)\n self._closed = closed\n\n def set_verts_and_codes(self, verts, codes):\n \"\"\"Sets 3D vertices with path codes.\"\"\"\n # set vertices with closed=False to prevent PolyCollection from\n # setting path codes\n self.set_verts(verts, closed=False)\n # and set our own codes instead.\n self._codes3d = codes\n\n def set_3d_properties(self):\n # Force the collection to initialize the face and edgecolors\n # just in case it is a scalarmappable with a colormap.\n self.update_scalarmappable()\n self._sort_zpos = None\n self.set_zsort(True)\n self._facecolors3d = PolyCollection.get_facecolor(self)\n self._edgecolors3d = PolyCollection.get_edgecolor(self)\n self._alpha3d = PolyCollection.get_alpha(self)\n self.stale = True\n\n def set_sort_zpos(self,val):\n \"\"\"Set the position to use for z-sorting.\"\"\"\n self._sort_zpos = val\n self.stale = True\n\n def do_3d_projection(self, renderer):\n \"\"\"\n Perform the 3D projection for this object.\n \"\"\"\n # FIXME: This may no longer be needed?\n if self._A is not None:\n self.update_scalarmappable()\n self._facecolors3d = self._facecolors\n\n txs, tys, tzs = proj3d.proj_transform_vec(self._vec, renderer.M)\n xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei])\n for si, ei in self._segis]\n\n # This extra fuss is to re-order face / edge colors\n cface = self._facecolors3d\n cedge = self._edgecolors3d\n if len(cface) != len(xyzlist):\n cface = cface.repeat(len(xyzlist), axis=0)\n if len(cedge) != len(xyzlist):\n if len(cedge) == 0:\n cedge = cface\n else:\n cedge = cedge.repeat(len(xyzlist), axis=0)\n\n # if required sort by depth (furthest drawn first)\n if self._zsort:\n z_segments_2d = sorted(\n ((self._zsortfunc(zs), np.column_stack([xs, ys]), fc, ec, idx)\n for idx, ((xs, ys, zs), fc, ec)\n in enumerate(zip(xyzlist, cface, cedge))),\n key=lambda x: x[0], reverse=True)\n else:\n raise ValueError(\"whoops\")\n\n segments_2d = [s for z, s, fc, ec, idx in z_segments_2d]\n if self._codes3d is not None:\n codes = [self._codes3d[idx] for z, s, fc, ec, idx in z_segments_2d]\n PolyCollection.set_verts_and_codes(self, segments_2d, codes)\n else:\n PolyCollection.set_verts(self, segments_2d, self._closed)\n\n self._facecolors2d = [fc for z, s, fc, ec, idx in z_segments_2d]\n if len(self._edgecolors3d) == len(cface):\n self._edgecolors2d = [ec for z, s, fc, ec, idx in z_segments_2d]\n else:\n self._edgecolors2d = self._edgecolors3d\n\n # Return zorder value\n if self._sort_zpos is not None:\n zvec = np.array([[0], [0], [self._sort_zpos], [1]])\n ztrans = proj3d.proj_transform_vec(zvec, renderer.M)\n return ztrans[2][0]\n elif tzs.size > 0 :\n # FIXME: Some results still don't look quite right.\n # In particular, examine contourf3d_demo2.py\n # with az = -54 and elev = -45.\n return np.min(tzs)\n else :\n return np.nan\n\n def set_facecolor(self, colors):\n PolyCollection.set_facecolor(self, colors)\n self._facecolors3d = PolyCollection.get_facecolor(self)\n\n def set_edgecolor(self, colors):\n PolyCollection.set_edgecolor(self, colors)\n self._edgecolors3d = PolyCollection.get_edgecolor(self)\n\n def set_alpha(self, alpha):\n \"\"\"\n Set the alpha transparencies of the collection.\n\n Parameters\n ----------\n alpha : float or None\n \"\"\"\n if alpha is not None:\n try:\n float(alpha)\n except TypeError:\n raise TypeError('alpha must be a float or None')\n artist.Artist.set_alpha(self, alpha)\n try:\n self._facecolors = mcolors.to_rgba_array(\n self._facecolors3d, self._alpha)\n except (AttributeError, TypeError, IndexError):\n pass\n try:\n self._edgecolors = mcolors.to_rgba_array(\n self._edgecolors3d, self._alpha)\n except (AttributeError, TypeError, IndexError):\n pass\n self.stale = True\n\n def get_facecolor(self):\n return self._facecolors2d\n\n def get_edgecolor(self):\n return self._edgecolors2d\n\n\ndef poly_collection_2d_to_3d(col, zs=0, zdir='z'):\n \"\"\"Convert a PolyCollection to a Poly3DCollection object.\"\"\"\n segments_3d, codes = paths_to_3d_segments_with_codes(col.get_paths(),\n zs, zdir)\n col.__class__ = Poly3DCollection\n col.set_verts_and_codes(segments_3d, codes)\n col.set_3d_properties()\n\n\ndef juggle_axes(xs, ys, zs, zdir):\n \"\"\"\n Reorder coordinates so that 2D xs, ys can be plotted in the plane\n orthogonal to zdir. zdir is normally x, y or z. However, if zdir\n starts with a '-' it is interpreted as a compensation for rotate_axes.\n \"\"\"\n if zdir == 'x':\n return zs, xs, ys\n elif zdir == 'y':\n return xs, zs, ys\n elif zdir[0] == '-':\n return rotate_axes(xs, ys, zs, zdir)\n else:\n return xs, ys, zs\n\n\ndef rotate_axes(xs, ys, zs, zdir):\n \"\"\"\n Reorder coordinates so that the axes are rotated with zdir along\n the original z axis. Prepending the axis with a '-' does the\n inverse transform, so zdir can be x, -x, y, -y, z or -z\n \"\"\"\n if zdir == 'x':\n return ys, zs, xs\n elif zdir == '-x':\n return zs, xs, ys\n\n elif zdir == 'y':\n return zs, xs, ys\n elif zdir == '-y':\n return ys, zs, xs\n\n else:\n return xs, ys, zs\n\n\ndef get_colors(c, num):\n \"\"\"Stretch the color argument to provide the required number *num*.\"\"\"\n return np.broadcast_to(\n mcolors.to_rgba_array(c) if len(c) else [0, 0, 0, 0],\n (num, 4))\n\n\ndef zalpha(colors, zs):\n \"\"\"Modify the alphas of the color list according to depth.\"\"\"\n # FIXME: This only works well if the points for *zs* are well-spaced\n # in all three dimensions. Otherwise, at certain orientations,\n # the min and max zs are very close together.\n # Should really normalize against the viewing depth.\n colors = get_colors(colors, len(zs))\n if len(zs):\n norm = Normalize(min(zs), max(zs))\n sats = 1 - norm(zs) * 0.7\n colors = [(c[0], c[1], c[2], c[3] * s) for c, s in zip(colors, sats)]\n return colors\n", "import numpy as np\nimport pytest\n\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import ticker, rcParams\n\n\ndef example_plot(ax, fontsize=12, nodec=False):\n ax.plot([1, 2])\n ax.locator_params(nbins=3)\n if not nodec:\n ax.set_xlabel('x-label', fontsize=fontsize)\n ax.set_ylabel('y-label', fontsize=fontsize)\n ax.set_title('Title', fontsize=fontsize)\n else:\n ax.set_xticklabels('')\n ax.set_yticklabels('')\n\n\ndef example_pcolor(ax, fontsize=12):\n dx, dy = 0.6, 0.6\n y, x = np.mgrid[slice(-3, 3 + dy, dy),\n slice(-3, 3 + dx, dx)]\n z = (1 - x / 2. + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)\n pcm = ax.pcolormesh(x, y, z, cmap='RdBu_r', vmin=-1., vmax=1.,\n rasterized=True)\n # ax.locator_params(nbins=3)\n ax.set_xlabel('x-label', fontsize=fontsize)\n ax.set_ylabel('y-label', fontsize=fontsize)\n ax.set_title('Title', fontsize=fontsize)\n return pcm\n\n\n@image_comparison(baseline_images=['constrained_layout1'],\n extensions=['png'])\ndef test_constrained_layout1():\n 'Test constrained_layout for a single subplot'\n fig = plt.figure(constrained_layout=True)\n ax = fig.add_subplot(111)\n example_plot(ax, fontsize=24)\n\n\n@image_comparison(baseline_images=['constrained_layout2'],\n extensions=['png'])\ndef test_constrained_layout2():\n 'Test constrained_layout for 2x2 subplots'\n fig, axs = plt.subplots(2, 2, constrained_layout=True)\n for ax in axs.flatten():\n example_plot(ax, fontsize=24)\n\n\n@image_comparison(baseline_images=['constrained_layout3'],\n extensions=['png'])\ndef test_constrained_layout3():\n 'Test constrained_layout for colorbars with subplots'\n fig, axs = plt.subplots(2, 2, constrained_layout=True)\n for nn, ax in enumerate(axs.flatten()):\n pcm = example_pcolor(ax, fontsize=24)\n if nn == 3:\n pad = 0.08\n else:\n pad = 0.02 # default\n fig.colorbar(pcm, ax=ax, pad=pad)\n\n\n@image_comparison(baseline_images=['constrained_layout4'])\ndef test_constrained_layout4():\n 'Test constrained_layout for a single colorbar with subplots'\n fig, axs = plt.subplots(2, 2, constrained_layout=True)\n for ax in axs.flatten():\n pcm = example_pcolor(ax, fontsize=24)\n fig.colorbar(pcm, ax=axs, pad=0.01, shrink=0.6)\n\n\n@image_comparison(baseline_images=['constrained_layout5'],\n tol=5.e-2, extensions=['png'])\ndef test_constrained_layout5():\n '''\n Test constrained_layout for a single colorbar with subplots,\n colorbar bottom\n '''\n fig, axs = plt.subplots(2, 2, constrained_layout=True)\n for ax in axs.flatten():\n pcm = example_pcolor(ax, fontsize=24)\n fig.colorbar(pcm, ax=axs,\n use_gridspec=False, pad=0.01, shrink=0.6,\n location='bottom')\n\n\n@image_comparison(baseline_images=['constrained_layout6'],\n extensions=['png'])\ndef test_constrained_layout6():\n 'Test constrained_layout for nested gridspecs'\n fig = plt.figure(constrained_layout=True)\n gs = fig.add_gridspec(1, 2, figure=fig)\n gsl = gs[0].subgridspec(2, 2)\n gsr = gs[1].subgridspec(1, 2)\n axsl = []\n for gs in gsl:\n ax = fig.add_subplot(gs)\n axsl += [ax]\n example_plot(ax, fontsize=12)\n ax.set_xlabel('x-label\\nMultiLine')\n axsr = []\n for gs in gsr:\n ax = fig.add_subplot(gs)\n axsr += [ax]\n pcm = example_pcolor(ax, fontsize=12)\n\n fig.colorbar(pcm, ax=axsr,\n pad=0.01, shrink=0.99, location='bottom',\n ticks=ticker.MaxNLocator(nbins=5))\n\n\ndef test_constrained_layout7():\n 'Test for proper warning if fig not set in GridSpec'\n with pytest.warns(UserWarning, match='Calling figure.constrained_layout, '\n 'but figure not setup to do constrained layout'):\n fig = plt.figure(constrained_layout=True)\n gs = gridspec.GridSpec(1, 2)\n gsl = gridspec.GridSpecFromSubplotSpec(2, 2, gs[0])\n gsr = gridspec.GridSpecFromSubplotSpec(1, 2, gs[1])\n axsl = []\n for gs in gsl:\n ax = fig.add_subplot(gs)\n # need to trigger a draw to get warning\n fig.draw(fig.canvas.get_renderer())\n\n\n@image_comparison(baseline_images=['constrained_layout8'],\n extensions=['png'])\ndef test_constrained_layout8():\n 'Test for gridspecs that are not completely full'\n fig = plt.figure(figsize=(10, 5), constrained_layout=True)\n gs = gridspec.GridSpec(3, 5, figure=fig)\n axs = []\n for j in [0, 1]:\n if j == 0:\n ilist = [1]\n else:\n ilist = [0, 4]\n for i in ilist:\n ax = fig.add_subplot(gs[j, i])\n axs += [ax]\n pcm = example_pcolor(ax, fontsize=9)\n if i > 0:\n ax.set_ylabel('')\n if j < 1:\n ax.set_xlabel('')\n ax.set_title('')\n ax = fig.add_subplot(gs[2, :])\n axs += [ax]\n pcm = example_pcolor(ax, fontsize=9)\n\n fig.colorbar(pcm, ax=axs, pad=0.01, shrink=0.6)\n\n\n@image_comparison(baseline_images=['constrained_layout9'],\n extensions=['png'])\ndef test_constrained_layout9():\n 'Test for handling suptitle and for sharex and sharey'\n fig, axs = plt.subplots(2, 2, constrained_layout=True,\n sharex=False, sharey=False)\n for ax in axs.flatten():\n pcm = example_pcolor(ax, fontsize=24)\n ax.set_xlabel('')\n ax.set_ylabel('')\n ax.set_aspect(2.)\n fig.colorbar(pcm, ax=axs, pad=0.01, shrink=0.6)\n fig.suptitle('Test Suptitle', fontsize=28)\n\n\n@image_comparison(baseline_images=['constrained_layout10'],\n extensions=['png'])\ndef test_constrained_layout10():\n 'Test for handling legend outside axis'\n fig, axs = plt.subplots(2, 2, constrained_layout=True)\n for ax in axs.flatten():\n ax.plot(np.arange(12), label='This is a label')\n ax.legend(loc='center left', bbox_to_anchor=(0.8, 0.5))\n\n\n@image_comparison(baseline_images=['constrained_layout11'],\n extensions=['png'])\ndef test_constrained_layout11():\n 'Test for multiple nested gridspecs '\n fig = plt.figure(constrained_layout=True, figsize=(10, 3))\n gs0 = gridspec.GridSpec(1, 2, figure=fig)\n gsl = gridspec.GridSpecFromSubplotSpec(1, 2, gs0[0])\n gsl0 = gridspec.GridSpecFromSubplotSpec(2, 2, gsl[1])\n ax = fig.add_subplot(gs0[1])\n example_plot(ax, fontsize=9)\n axs = []\n for gs in gsl0:\n ax = fig.add_subplot(gs)\n axs += [ax]\n pcm = example_pcolor(ax, fontsize=9)\n fig.colorbar(pcm, ax=axs, shrink=0.6, aspect=70.)\n ax = fig.add_subplot(gsl[0])\n example_plot(ax, fontsize=9)\n\n\n@image_comparison(baseline_images=['constrained_layout11rat'],\n extensions=['png'])\ndef test_constrained_layout11rat():\n 'Test for multiple nested gridspecs with width_ratios'\n fig = plt.figure(constrained_layout=True, figsize=(10, 3))\n gs0 = gridspec.GridSpec(1, 2, figure=fig, width_ratios=[6., 1.])\n gsl = gridspec.GridSpecFromSubplotSpec(1, 2, gs0[0])\n gsl0 = gridspec.GridSpecFromSubplotSpec(2, 2, gsl[1],\n height_ratios=[2., 1.])\n ax = fig.add_subplot(gs0[1])\n example_plot(ax, fontsize=9)\n axs = []\n for gs in gsl0:\n ax = fig.add_subplot(gs)\n axs += [ax]\n pcm = example_pcolor(ax, fontsize=9)\n fig.colorbar(pcm, ax=axs, shrink=0.6, aspect=70.)\n ax = fig.add_subplot(gsl[0])\n example_plot(ax, fontsize=9)\n\n\n@image_comparison(baseline_images=['constrained_layout12'],\n extensions=['png'])\ndef test_constrained_layout12():\n 'Test that very unbalanced labeling still works.'\n fig = plt.figure(constrained_layout=True)\n\n gs0 = gridspec.GridSpec(6, 2, figure=fig)\n\n ax1 = fig.add_subplot(gs0[:3, 1])\n ax2 = fig.add_subplot(gs0[3:, 1])\n\n example_plot(ax1, fontsize=24)\n example_plot(ax2, fontsize=24)\n\n ax = fig.add_subplot(gs0[0:2, 0])\n example_plot(ax, nodec=True)\n ax = fig.add_subplot(gs0[2:4, 0])\n example_plot(ax, nodec=True)\n ax = fig.add_subplot(gs0[4:, 0])\n example_plot(ax, nodec=True)\n ax.set_xlabel('x-label')\n\n\n@image_comparison(baseline_images=['constrained_layout13'], tol=2.e-2,\n extensions=['png'])\ndef test_constrained_layout13():\n 'Test that padding works.'\n fig, axs = plt.subplots(2, 2, constrained_layout=True)\n for ax in axs.flatten():\n pcm = example_pcolor(ax, fontsize=12)\n fig.colorbar(pcm, ax=ax, shrink=0.6, aspect=20., pad=0.02)\n fig.set_constrained_layout_pads(w_pad=24./72., h_pad=24./72.)\n\n\n@image_comparison(baseline_images=['constrained_layout14'],\n extensions=['png'])\ndef test_constrained_layout14():\n 'Test that padding works.'\n fig, axs = plt.subplots(2, 2, constrained_layout=True)\n for ax in axs.flatten():\n pcm = example_pcolor(ax, fontsize=12)\n fig.colorbar(pcm, ax=ax, shrink=0.6, aspect=20., pad=0.02)\n fig.set_constrained_layout_pads(\n w_pad=3./72., h_pad=3./72.,\n hspace=0.2, wspace=0.2)\n\n\n@image_comparison(baseline_images=['constrained_layout15'],\n extensions=['png'])\ndef test_constrained_layout15():\n 'Test that rcparams work.'\n rcParams['figure.constrained_layout.use'] = True\n fig, axs = plt.subplots(2, 2)\n for ax in axs.flatten():\n example_plot(ax, fontsize=12)\n\n\n@image_comparison(baseline_images=['constrained_layout16'],\n extensions=['png'])\ndef test_constrained_layout16():\n 'Test ax.set_position.'\n fig, ax = plt.subplots(constrained_layout=True)\n example_plot(ax, fontsize=12)\n ax2 = fig.add_axes([0.2, 0.2, 0.4, 0.4])\n\n\n@image_comparison(baseline_images=['constrained_layout17'],\n extensions=['png'])\ndef test_constrained_layout17():\n 'Test uneven gridspecs'\n fig = plt.figure(constrained_layout=True)\n gs = gridspec.GridSpec(3, 3, figure=fig)\n\n ax1 = fig.add_subplot(gs[0, 0])\n ax2 = fig.add_subplot(gs[0, 1:])\n ax3 = fig.add_subplot(gs[1:, 0:2])\n ax4 = fig.add_subplot(gs[1:, -1])\n\n example_plot(ax1)\n example_plot(ax2)\n example_plot(ax3)\n example_plot(ax4)\n\n\ndef test_constrained_layout18():\n 'Test twinx'\n fig, ax = plt.subplots(constrained_layout=True)\n ax2 = ax.twinx()\n example_plot(ax)\n example_plot(ax2, fontsize=24)\n fig.canvas.draw()\n assert all(ax.get_position().extents == ax2.get_position().extents)\n\n\ndef test_constrained_layout19():\n 'Test twiny'\n fig, ax = plt.subplots(constrained_layout=True)\n ax2 = ax.twiny()\n example_plot(ax)\n example_plot(ax2, fontsize=24)\n ax2.set_title('')\n ax.set_title('')\n fig.canvas.draw()\n assert all(ax.get_position().extents == ax2.get_position().extents)\n\n\ndef test_constrained_layout20():\n 'Smoke test cl does not mess up added axes'\n gx = np.linspace(-5, 5, 4)\n img = np.hypot(gx, gx[:, None])\n\n fig = plt.figure()\n ax = fig.add_axes([0, 0, 1, 1])\n mesh = ax.pcolormesh(gx, gx, img)\n fig.colorbar(mesh)\n\n\ndef test_constrained_layout21():\n '#11035: repeated calls to suptitle should not alter the layout'\n fig, ax = plt.subplots(constrained_layout=True)\n\n fig.suptitle(\"Suptitle0\")\n fig.canvas.draw()\n extents0 = np.copy(ax.get_position().extents)\n\n fig.suptitle(\"Suptitle1\")\n fig.canvas.draw()\n extents1 = np.copy(ax.get_position().extents)\n\n np.testing.assert_allclose(extents0, extents1)\n\n\ndef test_constrained_layout22():\n '#11035: suptitle should not be include in CL if manually positioned'\n fig, ax = plt.subplots(constrained_layout=True)\n\n fig.canvas.draw()\n extents0 = np.copy(ax.get_position().extents)\n\n fig.suptitle(\"Suptitle\", y=0.5)\n fig.canvas.draw()\n extents1 = np.copy(ax.get_position().extents)\n\n np.testing.assert_allclose(extents0, extents1)\n\n\ndef test_constrained_layout23():\n '''\n Comment in #11035: suptitle used to cause an exception when\n reusing a figure w/ CL with ``clear=True``.\n '''\n\n for i in range(2):\n fig, ax = plt.subplots(num=\"123\", constrained_layout=True, clear=True)\n fig.suptitle(\"Suptitle{}\".format(i))\n" ]
[ [ "matplotlib.pyplot.switch_backend", "matplotlib.cbook.get_label", "matplotlib.cbook.dedent", "matplotlib.rcsetup.validate_backend", "matplotlib.rcsetup.defaultParams.items", "matplotlib.cbook.sanitize_sequence" ], [ "matplotlib.text.Text.__init__", "matplotlib.lines.Line2D.__init__", "matplotlib.patches.Patch.get_facecolor", "matplotlib.collections.PolyCollection.get_alpha", "matplotlib.collections.PolyCollection.set_facecolor", "matplotlib.collections.PolyCollection.get_facecolor", "matplotlib.collections.PolyCollection.set_verts_and_codes", "matplotlib.patches.Patch.__init__", "matplotlib.cbook.iterable", "numpy.atleast_1d", "numpy.asanyarray", "numpy.column_stack", "matplotlib.artist.Artist.set_alpha", "numpy.min", "matplotlib.collections.PolyCollection.get_edgecolor", "matplotlib.collections.LineCollection.set_segments", "matplotlib.collections.PolyCollection.set_verts", "numpy.array", "matplotlib.collections.LineCollection.draw", "matplotlib.text.Text.draw", "matplotlib.lines.Line2D.draw", "matplotlib.collections.PolyCollection.set_edgecolor", "matplotlib.colors.to_rgba_array" ], [ "matplotlib.gridspec.GridSpecFromSubplotSpec", "numpy.linspace", "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.testing.decorators.image_comparison", "matplotlib.gridspec.GridSpec", "matplotlib.ticker.MaxNLocator", "numpy.testing.assert_allclose", "numpy.exp", "numpy.hypot", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cilinyan/image-processing
[ "7375bdf4af0ead415e5a957e6ac57d517de37d58", "7375bdf4af0ead415e5a957e6ac57d517de37d58", "7375bdf4af0ead415e5a957e6ac57d517de37d58", "7375bdf4af0ead415e5a957e6ac57d517de37d58" ]
[ "pytorch_object_detection/faster_rcnn_copy/predict.py", "pytorch_classification/ShuffleNet/model.py", "pytorch_classification/AlexNet/predict.py", "pytorch_classification/AlexNet/model.py" ]
[ "import os\nimport time\nimport json\n\nimport torch\nimport torchvision\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nfrom torchvision import transforms\nfrom network_files import FasterRCNN, FastRCNNPredictor, AnchorsGenerator\nfrom backbone import resnet50_fpn_backbone, MobileNetV2\nfrom draw_box_utils import draw_box\n\n\ndef create_model(num_classes):\n # mobileNetv2+faster_RCNN\n # backbone = MobileNetV2().features\n # backbone.out_channels = 1280\n #\n # anchor_generator = AnchorsGenerator(sizes=((32, 64, 128, 256, 512),),\n # aspect_ratios=((0.5, 1.0, 2.0),))\n #\n # roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],\n # output_size=[7, 7],\n # sampling_ratio=2)\n #\n # model = FasterRCNN(backbone=backbone,\n # num_classes=num_classes,\n # rpn_anchor_generator=anchor_generator,\n # box_roi_pool=roi_pooler)\n\n # resNet50+fpn+faster_RCNN\n # 注意,这里的norm_layer要和训练脚本中保持一致\n backbone = resnet50_fpn_backbone(norm_layer=torch.nn.BatchNorm2d)\n model = FasterRCNN(backbone=backbone, num_classes=num_classes, rpn_score_thresh=0.5)\n\n return model\n\n\ndef time_synchronized():\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n return time.time()\n\n\ndef main():\n # get devices\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(\"using {} device.\".format(device))\n\n # create model\n model = create_model(num_classes=21)\n\n # load train weights\n train_weights = \"./save_weights/resNetFpn-model-15.pth\"\n assert os.path.exists(train_weights), \"{} file dose not exist.\".format(train_weights)\n model.load_state_dict(torch.load(train_weights, map_location=device)[\"model\"])\n model.to(device)\n\n # read class_indict\n label_json_path = './pascal_voc_classes.json'\n assert os.path.exists(label_json_path), \"json file {} dose not exist.\".format(label_json_path)\n json_file = open(label_json_path, 'r')\n class_dict = json.load(json_file)\n json_file.close()\n category_index = {v: k for k, v in class_dict.items()}\n\n # load image\n original_img = Image.open(\"../test.jpg\")\n\n # from pil image to tensor, do not normalize image\n data_transform = transforms.Compose([transforms.ToTensor()])\n img = data_transform(original_img)\n # expand batch dimension\n img = torch.unsqueeze(img, dim=0)\n\n model.eval() # 进入验证模式\n with torch.no_grad():\n # init\n img_height, img_width = img.shape[-2:]\n init_img = torch.zeros((1, 3, img_height, img_width), device=device)\n model(init_img)\n\n t_start = time_synchronized()\n predictions = model(img.to(device))[0]\n t_end = time_synchronized()\n print(\"inference+NMS time: {}\".format(t_end - t_start))\n\n predict_boxes = predictions[\"boxes\"].to(\"cpu\").numpy()\n predict_classes = predictions[\"labels\"].to(\"cpu\").numpy()\n predict_scores = predictions[\"scores\"].to(\"cpu\").numpy()\n\n if len(predict_boxes) == 0:\n print(\"没有检测到任何目标!\")\n\n draw_box(original_img,\n predict_boxes,\n predict_classes,\n predict_scores,\n category_index,\n thresh=0.5,\n line_thickness=3)\n plt.imshow(original_img)\n plt.show()\n # 保存预测的图片结果\n original_img.save(\"test_result.jpg\")\n\n\nif __name__ == '__main__':\n main()\n\n", "from typing import List, Callable\n\nimport torch\nfrom torch import Tensor\nimport torch.nn as nn\n\n\ndef channel_shuffle(x: Tensor, groups: int) -> Tensor:\n batch_size, num_channels, height, width = x.size()\n channels_per_group = num_channels // groups\n\n # reshape\n # [batch_size, num_channels, height, width] -> [batch_size, groups, channels_per_group, height, width]\n x = x.view(batch_size, groups, channels_per_group, height, width)\n\n x = torch.transpose(x, 1, 2).contiguous()\n\n # flatten\n x = x.view(batch_size, -1, height, width)\n\n return x\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, input_c: int, output_c: int, stride: int):\n super(InvertedResidual, self).__init__()\n\n if stride not in [1, 2]:\n raise ValueError(\"illegal stride value.\")\n self.stride = stride\n\n assert output_c % 2 == 0\n branch_features = output_c // 2\n # 当stride为1时,input_channel应该是branch_features的两倍\n # python中 '<<' 是位运算,可理解为计算×2的快速方法\n assert (self.stride != 1) or (input_c == branch_features << 1)\n\n if self.stride == 2:\n self.branch1 = nn.Sequential(\n self.depthwise_conv(input_c, input_c, kernel_s=3, stride=self.stride, padding=1),\n nn.BatchNorm2d(input_c),\n nn.Conv2d(input_c, branch_features, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(branch_features),\n nn.ReLU(inplace=True)\n )\n else:\n self.branch1 = nn.Sequential()\n\n self.branch2 = nn.Sequential(\n nn.Conv2d(input_c if self.stride > 1 else branch_features, branch_features, kernel_size=1,\n stride=1, padding=0, bias=False),\n nn.BatchNorm2d(branch_features),\n nn.ReLU(inplace=True),\n self.depthwise_conv(branch_features, branch_features, kernel_s=3, stride=self.stride, padding=1),\n nn.BatchNorm2d(branch_features),\n nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(branch_features),\n nn.ReLU(inplace=True)\n )\n\n @staticmethod\n def depthwise_conv(input_c: int,\n output_c: int,\n kernel_s: int,\n stride: int = 1,\n padding: int = 0,\n bias: bool = False) -> nn.Conv2d:\n return nn.Conv2d(in_channels=input_c, out_channels=output_c, kernel_size=kernel_s,\n stride=stride, padding=padding, bias=bias, groups=input_c)\n\n def forward(self, x: Tensor) -> Tensor:\n if self.stride == 1:\n x1, x2 = x.chunk(2, dim=1)\n out = torch.cat((x1, self.branch2(x2)), dim=1)\n else:\n out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)\n\n out = channel_shuffle(out, 2)\n\n return out\n\n\nclass ShuffleNetV2(nn.Module):\n def __init__(self,\n stages_repeats: List[int],\n stages_out_channels: List[int],\n num_classes: int = 1000,\n inverted_residual: Callable[..., nn.Module] = InvertedResidual):\n super(ShuffleNetV2, self).__init__()\n\n if len(stages_repeats) != 3:\n raise ValueError(\"expected stages_repeats as list of 3 positive ints\")\n if len(stages_out_channels) != 5:\n raise ValueError(\"expected stages_out_channels as list of 5 positive ints\")\n self._stage_out_channels = stages_out_channels\n\n # input RGB image\n input_channels = 3\n output_channels = self._stage_out_channels[0]\n\n self.conv1 = nn.Sequential(\n nn.Conv2d(input_channels, output_channels, kernel_size=3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(output_channels),\n nn.ReLU(inplace=True)\n )\n input_channels = output_channels\n\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n # Static annotations for mypy\n self.stage2: nn.Sequential\n self.stage3: nn.Sequential\n self.stage4: nn.Sequential\n\n stage_names = [\"stage{}\".format(i) for i in [2, 3, 4]]\n for name, repeats, output_channels in zip(stage_names, stages_repeats,\n self._stage_out_channels[1:]):\n seq = [inverted_residual(input_channels, output_channels, 2)]\n for i in range(repeats - 1):\n seq.append(inverted_residual(output_channels, output_channels, 1))\n setattr(self, name, nn.Sequential(*seq))\n input_channels = output_channels\n\n output_channels = self._stage_out_channels[-1]\n self.conv5 = nn.Sequential(\n nn.Conv2d(input_channels, output_channels, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(output_channels),\n nn.ReLU(inplace=True)\n )\n\n self.fc = nn.Linear(output_channels, num_classes)\n\n def _forward_impl(self, x: Tensor) -> Tensor:\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.maxpool(x)\n x = self.stage2(x)\n x = self.stage3(x)\n x = self.stage4(x)\n x = self.conv5(x)\n x = x.mean([2, 3]) # global pool\n x = self.fc(x)\n return x\n\n def forward(self, x: Tensor) -> Tensor:\n return self._forward_impl(x)\n\n\ndef shufflenet_v2_x1_0(num_classes=1000):\n \"\"\"\n Constructs a ShuffleNetV2 with 1.0x output channels, as described in\n `\"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design\"\n <https://arxiv.org/abs/1807.11164>`.\n weight: https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth\n\n :param num_classes:\n :return:\n \"\"\"\n model = ShuffleNetV2(stages_repeats=[4, 8, 4],\n stages_out_channels=[24, 116, 232, 464, 1024],\n num_classes=num_classes)\n\n return model\n\n\ndef shufflenet_v2_x0_5(num_classes=1000):\n \"\"\"\n Constructs a ShuffleNetV2 with 0.5x output channels, as described in\n `\"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design\"\n <https://arxiv.org/abs/1807.11164>`.\n weight: https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth\n\n :param num_classes:\n :return:\n \"\"\"\n model = ShuffleNetV2(stages_repeats=[4, 8, 4],\n stages_out_channels=[24, 48, 96, 192, 1024],\n num_classes=num_classes)\n\n return model\n", "import os\nimport json\n\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\nimport matplotlib.pyplot as plt\n\nfrom model import AlexNet\n\n\ndef main():\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n data_transform = transforms.Compose(\n [transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n # load image\n img_path = \"../tulip.jpg\"\n assert os.path.exists(img_path), f\"file: '{img_path}' dose not exist.\"\n img = Image.open(img_path)\n\n plt.imshow(img)\n # [N, C, H, W]\n img = data_transform(img)\n # expand batch dimension\n img = torch.unsqueeze(img, dim=0)\n\n # read class_indict\n json_path = './class_indices.json'\n assert os.path.exists(json_path), f\"file: '{json_path}' dose not exist.\"\n\n json_file = open(json_path, \"r\")\n class_indict = json.load(json_file)\n\n # create model\n model = AlexNet(num_classes=5).to(device)\n\n # load model weights\n weights_path = \"./AlexNet.pth\"\n assert os.path.exists(weights_path), f\"file: '{weights_path}' dose not exist.\"\n model.load_state_dict(torch.load(weights_path))\n\n model.eval()\n with torch.no_grad():\n # predict class\n output = torch.squeeze(model(img.to(device))).cpu()\n predict = torch.softmax(output, dim=0)\n predict_cla = torch.argmax(predict).numpy()\n\n print_res = f\"class: {class_indict[str(predict_cla)]} prob: {predict[predict_cla].numpy():.3}\"\n plt.title(print_res)\n for i in range(len(predict)):\n print(f\"class: {class_indict[str(i)]:10} prob: {predict[i].numpy():.3}\")\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "import torch\nimport torch.nn as nn\nfrom collections import OrderedDict\n\n\nclass AlexNet(nn.Module):\n def __init__(self, num_classes=1000, init_weights=False):\n super(AlexNet, self).__init__()\n self.features = nn.Sequential(OrderedDict([ # OrderedDIct input\n ('conv1', nn.Conv2d(3, 48, kernel_size=11, stride=4, padding=2)), # input(3,224,224) output(48,55,55)\n ('relu1', nn.ReLU(inplace=True)),\n ('pool1', nn.MaxPool2d(kernel_size=3, stride=2)), # output(48,27,27)\n ('conv2', nn.Conv2d(48, 128, kernel_size=5, padding=2)), # output(128,27,27)\n ('relu2', nn.ReLU(inplace=True)),\n ('pool2', nn.MaxPool2d(kernel_size=3, stride=2)), # output(128,13,13)\n ('conv3', nn.Conv2d(128, 192, kernel_size=3, padding=1)), # output(192,13,13)\n ('relu3', nn.ReLU(inplace=True)),\n ('conv4', nn.Conv2d(192, 192, kernel_size=3, padding=1)), # output(192,13,13)\n ('relu4', nn.ReLU(inplace=True)),\n ('conv5', nn.Conv2d(192, 128, kernel_size=3, padding=1)), # output(128,13,13)\n ('relu5', nn.ReLU(inplace=True)),\n ('pool3', nn.MaxPool2d(kernel_size=3, stride=2)), # output(128,6,6)\n ]))\n\n self.classifier = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(in_features=128 * 6 * 6, out_features=2048),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5),\n nn.Linear(2048, 2048),\n nn.ReLU(inplace=True),\n nn.Linear(2048, num_classes)\n )\n\n if init_weights:\n self.__initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = torch.flatten(x, start_dim=1)\n x = self.classifier(x)\n return x\n\n def __initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\nif __name__ == '__main__':\n input1 = torch.rand((11, 3, 224, 224))\n model = AlexNet(num_classes=5, init_weights=True)\n print(model)\n output1 = model(input1)\n print(output1.shape)\n" ]
[ [ "torch.cuda.synchronize", "matplotlib.pyplot.imshow", "torch.zeros", "torch.load", "torch.unsqueeze", "torch.no_grad", "torch.cuda.is_available", "matplotlib.pyplot.show" ], [ "torch.nn.Sequential", "torch.transpose", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ], [ "matplotlib.pyplot.imshow", "torch.softmax", "matplotlib.pyplot.title", "torch.load", "torch.unsqueeze", "torch.no_grad", "torch.cuda.is_available", "matplotlib.pyplot.show", "torch.argmax" ], [ "torch.nn.Dropout", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.init.normal_", "torch.rand", "torch.flatten", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jseppanen/ray
[ "0c02427da25fe647c6e14d2390eae46aa5394f45" ]
[ "python/ray/util/sgd/torch/torch_runner.py" ]
[ "import logging\nimport io\nimport itertools\n\nimport ray\nimport torch\n\nfrom ray.util.sgd.torch.constants import USE_FP16, NUM_STEPS\nfrom ray.util.sgd import utils\n\nlogger = logging.getLogger(__name__)\namp = None\n\ntry:\n from apex import amp\nexcept ImportError:\n logger.debug(\"apex is not installed.\")\n pass\n\n\nclass TorchRunner:\n \"\"\"Manages a PyTorch model for training.\"\"\"\n\n def __init__(self,\n training_operator_cls,\n config=None,\n use_gpu=False,\n serialize_data_creation=True,\n use_fp16=False,\n use_tqdm=False,\n apex_args=None,\n scheduler_step_freq=None):\n self.training_operator_cls = training_operator_cls\n self.config = {} if config is None else config\n\n self.timers = utils.TimerCollection()\n self.epochs = 0\n self.training_operator = None\n self.serialize_data_creation = serialize_data_creation\n self.use_gpu = use_gpu\n self.use_fp16 = use_fp16\n self.use_tqdm = use_tqdm\n self.apex_args = apex_args or {}\n if use_fp16 and not amp:\n raise ImportError(\n \"Please install apex from \"\n \"https://www.github.com/nvidia/apex to use fp16 training.\")\n self.scheduler_step_freq = scheduler_step_freq\n\n # Training and Validation iterators\n self.train_iterator = None\n self._should_reset_train_loader = True\n\n self.val_iterator = None\n self._should_reset_val_loader = True\n\n def setup_operator(self):\n \"\"\"Create the training operator.\"\"\"\n self.training_operator = self.training_operator_cls(\n self.config,\n world_rank=0,\n local_rank=0,\n is_distributed=False,\n use_gpu=self.use_gpu,\n use_fp16=self.use_fp16,\n use_tqdm=self.use_tqdm,\n apex_args=self.apex_args,\n scheduler_step_freq=self.scheduler_step_freq)\n\n def get_iterator(self, training=True):\n if training:\n # In training.\n if self._should_reset_train_loader:\n self.epochs += 1\n self.train_iterator = iter(self.train_loader)\n self._should_reset_train_loader = False\n return self.train_iterator\n else:\n # In validation.\n if self._should_reset_val_loader:\n self.val_iterator = iter(self.validation_loader)\n self._should_reset_val_loader = False\n return self.val_iterator\n\n def make_iterator(self, training=True, num_steps=None):\n steps = 0\n # Needed to make sure we don't loop forever if iterator is empty\n has_at_least_one = False\n while True:\n iterator = self.get_iterator(training=training)\n if num_steps is not None and steps >= num_steps:\n # Stop iterating after reaching num_steps.\n break\n try:\n item = next(iterator)\n steps += 1\n if not has_at_least_one:\n has_at_least_one = True\n yield item\n except StopIteration:\n # Set should reset iterator on next cycle to True.\n if training:\n self._should_reset_train_loader = True\n else:\n self._should_reset_val_loader = True\n if num_steps is None or not has_at_least_one:\n # End after current epoch or if iterator has no elements.\n break\n else:\n # Else, start cycling through the iterator again.\n pass\n\n def train_epoch(self,\n num_steps=None,\n profile=False,\n info=None,\n iterator=None):\n \"\"\"Runs a training epoch and updates the model parameters.\"\"\"\n logger.debug(f\"Begin Training Step {self.epochs + 1}\")\n info = info or {}\n self._toggle_profiling(profile=profile)\n\n info.update({\n NUM_STEPS: num_steps,\n USE_FP16: self.use_fp16,\n \"epoch_idx\": self.epochs,\n })\n with self.timers.record(\"train_epoch\"):\n if iterator is not None:\n # Dataset will provide us with a list of tuples but we\n # need two lists.\n def format_batch(batch):\n features, targets = zip(*batch)\n return torch.cat(features), torch.cat(targets)\n\n iterator = map(format_batch, iterator)\n if num_steps:\n iterator = itertools.islice(iterator, num_steps)\n self.epochs += 1\n else:\n iterator = self.make_iterator(\n training=True, num_steps=num_steps)\n train_stats = self.training_operator.train_epoch(iterator, info)\n\n # This is so that `epochs` is first in ordering.\n stats = dict(epoch=self.epochs, **train_stats)\n if profile:\n stats.update(profile=self.timers.stats())\n return stats\n\n def validate(self, num_steps=None, profile=False, info=None):\n \"\"\"Evaluates the model on the validation data set.\"\"\"\n info = info or {}\n self._toggle_profiling(profile=profile)\n\n with self.timers.record(\"validation\"):\n iterator = self.make_iterator(training=False, num_steps=num_steps)\n validation_stats = self.training_operator.validate(\n iterator, info=info)\n if profile:\n validation_stats.update(profile=self.timers.stats())\n return validation_stats\n\n def _toggle_profiling(self, profile=False):\n \"\"\"Enables/Disables and resets timing profiles.\"\"\"\n if profile:\n self.timers.enable()\n self.timers.reset()\n else:\n self.timers.disable()\n self.training_operator._set_timers(self.timers)\n\n def state_dict(self):\n \"\"\"Returns the state of the runner.\"\"\"\n model_states = [model.state_dict() for model in self.models]\n optimizer_states = [\n optimizer.state_dict() for optimizer in self.optimizers\n ]\n state = {\n \"epoch\": self.epochs,\n \"operator\": self.training_operator.state_dict(),\n \"models\": model_states,\n \"optimizers\": optimizer_states\n }\n schedulers = self.schedulers\n if schedulers:\n state.update({\n \"schedulers\": [\n scheduler.state_dict() for scheduler in schedulers\n ]\n })\n # Check if fp16 is True and if NVIDIA Apex is imported.\n if self.use_fp16 and self.training_operator._amp:\n state.update({\"amp\": self.training_operator._amp.state_dict()})\n\n return state\n\n def load_state_dict(self, state):\n \"\"\"Sets the state of the model.\"\"\"\n models = self.models\n for model, state_dict in zip(models, state[\"models\"]):\n model.load_state_dict(state_dict)\n optimizers = self.optimizers\n for optimizer, state_dict in zip(optimizers, state[\"optimizers\"]):\n optimizer.load_state_dict(state_dict)\n schedulers = self.schedulers\n if schedulers:\n for scheduler, state_dict in zip(schedulers, state[\"schedulers\"]):\n scheduler.load_state_dict(state_dict)\n\n if self.use_fp16 and \"amp\" in state and self.training_operator._amp:\n self.training_operator._amp.load_state_dict(state[\"amp\"])\n self.epochs = state[\"epoch\"]\n self.training_operator.load_state_dict(state[\"operator\"])\n\n def state_stream(self):\n \"\"\"Returns a bytes object for the state dict.\"\"\"\n state_dict = self.state_dict()\n _buffer = io.BytesIO()\n torch.save(state_dict, _buffer)\n return _buffer.getvalue()\n\n def load_state_stream(self, byte_obj):\n \"\"\"Loads a bytes object the training state dict.\n\n This is needed because we don't want to deserialize the tensor\n onto the same device (which is from the driver process). We want to\n map it onto the actor's specific device.\n\n From: github.com/pytorch/pytorch/issues/10622#issuecomment-474733769\n \"\"\"\n _buffer = io.BytesIO(byte_obj)\n to_gpu = self.use_gpu and torch.cuda.is_available()\n state_dict = torch.load(\n _buffer,\n map_location=(\"cpu\" if not to_gpu else\n lambda storage, loc: storage.cuda()))\n return self.load_state_dict(state_dict)\n\n def apply(self, fn):\n return fn()\n\n def apply_operator(self, fn):\n return fn(self.training_operator)\n\n def shutdown(self):\n \"\"\"Attempts to shut down the worker.\"\"\"\n del self.train_iterator\n del self.val_iterator\n del self.training_operator\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n def get_models(self):\n \"\"\"Getter method. Needed for remote actor calls.\"\"\"\n return self.models\n\n def get_node_ip(self):\n return ray.services.get_node_ip_address()\n\n @property\n def models(self):\n return self.training_operator._get_original_models()\n\n @property\n def optimizers(self):\n return self.training_operator._get_optimizers()\n\n @property\n def schedulers(self):\n return self.training_operator._get_schedulers()\n\n @property\n def train_loader(self):\n return self.training_operator._get_train_loader()\n\n @property\n def validation_loader(self):\n return self.training_operator._get_validation_loader()\n\n @property\n def criterion(self):\n return self.training_operator._criterion\n\n @property\n def given_models(self):\n if len(self.models) > 1:\n return self.models\n else:\n return self.models[0]\n\n @property\n def given_optimizers(self):\n if len(self.optimizers) > 1:\n return self.optimizers\n else:\n return self.optimizers[0]\n\n @property\n def given_schedulers(self):\n if not self.schedulers:\n return self.schedulers\n if len(self.schedulers) > 1:\n return self.schedulers\n else:\n return self.schedulers[0]\n" ]
[ [ "torch.cat", "torch.cuda.empty_cache", "torch.cuda.is_available", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mmiikeke/Lane_Detection
[ "52b5572da04752240c450bc373ba22d4e5676a27", "52b5572da04752240c450bc373ba22d4e5676a27" ]
[ "detection_program/demo_class.py", "detection_program/model.py" ]
[ "from pathlib import Path\nimport os, cv2, torch\nimport threading\nfrom detection_program.options import opt\nimport numpy as np\nfrom torch.autograd import Variable\nimport copy, time\nfrom utils.eval_utils import generate_result, eliminate_fewer_points, sort_along_y, eliminate_out, draw_points, sort_lane\nfrom PySide2.QtCore import Signal\nfrom PySide2 import QtCore\nimport queue\n\nclass Lane_Detection(QtCore.QObject):\n\n update_progressbar = Signal(float)\n detect_callback = Signal(list)\n update_output_imgs = Signal(str, int, int)\n\n def __init__(self, input_path, output_path, is_inputvideo, is_outputvideo, is_outputclips, widget):\n super().__init__()\n\n self.input_path = input_path\n self.output_clips_path = os.path.join(output_path, 'clips')\n self.output_video_path = os.path.join(output_path, 'video')\n self.is_inputvideo = is_inputvideo\n self.is_outputvideo = is_outputvideo\n self.is_outputclips = is_outputclips\n self.model = torch.load(\"detection_program/model/model.pth\", map_location='cuda:'+str(opt.cuda_devices))\n torch.save(self.model, \"detection_program/model/model2.pth\")\n self.clips = list()\n self.subpaths = list()\n self.fps = 30\n self.num_lanes = 0\n\n if not os.path.isdir(self.output_clips_path):\n os.makedirs(self.output_clips_path)\n \n if not os.path.isdir(self.output_video_path):\n os.makedirs(self.output_video_path)\n\n self.widget = widget\n \n def run(self):\n \n t = threading.currentThread()\n\n # Video to clips\n if self.is_inputvideo:\n vid = cv2.VideoCapture(self.input_path)\n length = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))\n #self.update_progressbar.emit(60) #Succeed\n #self.widget.progressBar.setValue(60) #Failed\n i = 0\n q = queue.Queue(maxsize=5)\n frame_time = 0.0\n start_time = time.time()\n while(vid.isOpened() and getattr(t, \"do_run\", True)):\n self.update_progressbar.emit(i*100/length)\n\n # Detect\n ret, frame = vid.read()\n if ret == False:\n break\n self.detect(frame, str(i).zfill(5)+'.jpg', i)\n\n # Caculate time\n tt = time.time() - start_time\n frame_time += tt\n if i >= 5:\n frame_time = frame_time - q.get()\n fps = 5/frame_time\n else:\n fps = i/frame_time\n q.put(tt)\n\n self.widget.label_info.setText(f'Detect lane: \\t{str(i).zfill(5)}.jpg\\nExecution time of the frame: \\t{tt:.2f} second\\nFrame per second: \\t{fps:.2f}')\n start_time = time.time()\n i+=1\n \n vid.release()\n cv2.destroyAllWindows()\n \n # Read clips\n if not self.is_inputvideo:\n images = os.listdir(self.input_path)\n length = len(images)\n\n i = 0\n for num, path in enumerate(images):\n if not getattr(t, \"do_run\", True):\n break\n\n frame = cv2.imread(str(Path(self.input_path).joinpath(path)))\n self.widget.label_info.setText(f'Detect lane: \\t{path}\\n')\n self.update_progressbar.emit(i*100/length)\n self.detect(frame, path, num)\n i+=1\n \n # Generate video\n if self.is_outputvideo:\n self.gen_video()\n print(\"generate video complete\")\n \n self.detect_callback.emit(self.subpaths)\n\n def detect(self, image, name, num):\n image = cv2.resize(image, (512,256))\n image = np.rollaxis(image, axis=2, start=0)/255.0\n _, _, result_image = self.gen_test(np.array([image]))\n\n image_path = os.path.join(self.output_clips_path, name)\n cv2.imwrite(str(image_path), result_image[0])\n self.subpaths.append(name)\n\n self.update_output_imgs.emit(image_path, num, 0)\n \n if self.is_outputvideo:\n self.clips.append(result_image[0])\n\n def gen_test(self, test_images, thresh = 0.81):\n test_images = torch.from_numpy(test_images).float()\n test_images = Variable(test_images).cuda(opt.cuda_devices)\n result = self.model(test_images)\n confidences, offsets, instances = result[-1]\n \n num_batch = len(test_images)\n\n out_x = []\n out_y = []\n out_images = []\n\n test_images = test_images.cpu().numpy()\n\n for i in range(num_batch):\n # test on test data set\n image = copy.deepcopy(test_images[i])\n image = np.rollaxis(image, axis=2, start=0)\n image = np.rollaxis(image, axis=2, start=0)*255.0\n image = image.astype(np.uint8).copy()\n\n confidence = confidences[i].view(opt.grid_y, opt.grid_x).cpu().data.numpy()\n\n offset = offsets[i].cpu().data.numpy()\n offset = np.rollaxis(offset, axis=2, start=0)\n offset = np.rollaxis(offset, axis=2, start=0)\n \n instance = instances[i].cpu().data.numpy()\n instance = np.rollaxis(instance, axis=2, start=0)\n instance = np.rollaxis(instance, axis=2, start=0)\n\n # generate point and cluster\n raw_x, raw_y = generate_result(confidence, offset, instance, thresh)\n\n # eliminate fewer points\n in_x, in_y = eliminate_fewer_points(raw_x, raw_y)\n\n # sort points along y \n in_x, in_y = sort_along_y(in_x, in_y)\n in_x, in_y = eliminate_out(in_x, in_y, confidence, copy.deepcopy(image))\n in_x, in_y = sort_along_y(in_x, in_y)\n in_x, in_y = eliminate_fewer_points(in_x, in_y)\n in_x, in_y, location = sort_lane(in_x, in_y, 200)\n\n #for i in range(len(in_x)):\n # print(f'in_x[{i}] length = {len(in_x[i])}')\n \n if self.num_lanes == 0:\n self.num_lanes = len(in_x)\n else:\n self.num_lanes = self.num_lanes*0.8 + len(in_x)*0.2\n\n # Remove lane\n while (len(in_x) - self.num_lanes) > 0.5:\n min = len(in_x[0])\n flag = 0\n for i in range(len(in_x)-1):\n if len(in_x[i+1]) < min:\n min = len(in_x[i+1])\n flag = i+1\n in_x.remove(in_x[flag])\n in_y.remove(in_y[flag])\n\n result_image = draw_points(in_x, in_y, copy.deepcopy(image))\n\n out_x.append(in_x)\n out_y.append(in_y)\n out_images.append(result_image)\n\n return out_x, out_y, out_images\n \n def gen_video(self):\n height, width, layers = self.clips[0].shape\n size = (width,height)\n\n out = cv2.VideoWriter(os.path.join(self.output_video_path, 'video.avi'),cv2.VideoWriter_fourcc(*'DIVX'), self.fps, size)\n\n for clip in self.clips:\n # writing to a image array\n out.write(clip)\n out.release()", "import torch\nimport torch.nn as nn\nfrom options import opt\n\n\nclass HourglassModel(nn.Module):\n def __init__(self):\n super(HourglassModel, self).__init__()\n\n self.resizing = resize_layer(3, 128)\n\n #feature extraction\n self.layer1 = hourglass_block(128, 128)\n self.layer2 = hourglass_block(128, 128)\n\n\n def forward(self, inputs):\n #feature extraction\n out = self.resizing(inputs)\n result1, out = self.layer1(out)\n result2, out = self.layer2(out) \n\n return [result1, result2]\n \n\nclass resize_layer(nn.Module):\n def __init__(self, in_channels, out_channels, acti = True):\n super(resize_layer, self).__init__()\n self.conv = Conv2D_BatchNorm_Relu(in_channels, out_channels//2, 7, 3, 2)\n self.maxpool = nn.MaxPool2d(2, 2)\n self.re1 = bottleneck(out_channels//2, out_channels//2)\n self.re2 = bottleneck(out_channels//2, out_channels//2)\n self.re3 = bottleneck(out_channels//2, out_channels)\n\n def forward(self, inputs):\n outputs = self.conv(inputs)\n outputs = self.re1(outputs)\n outputs = self.maxpool(outputs)\n outputs = self.re2(outputs)\n outputs = self.maxpool(outputs)\n outputs = self.re3(outputs)\n\n return outputs \n\n\nclass hourglass_block(nn.Module):\n def __init__(self, in_channels, out_channels, acti = True, input_re=True):\n super(hourglass_block, self).__init__()\n self.layer1 = hourglass_same(in_channels, out_channels)\n self.re1 = bottleneck(out_channels, out_channels)\n self.re2 = bottleneck(out_channels, out_channels)\n self.re3 = bottleneck(1, out_channels) \n\n self.out_confidence = Output(out_channels, 1) \n self.out_offset = Output(out_channels, 2) \n self.out_instance = Output(out_channels, 4) \n self.input_re = input_re \n\n def forward(self, inputs):\n outputs = self.layer1(inputs)\n outputs = self.re1(outputs)\n\n out_confidence = self.out_confidence(outputs)\n out_offset = self.out_offset(outputs)\n out_instance = self.out_instance(outputs)\n\n out = out_confidence\n\n outputs = self.re2(outputs)\n out = self.re3(out)\n\n if self.input_re:\n outputs = outputs + out + inputs\n else:\n outputs = outputs + out\n\n return [out_confidence, out_offset, out_instance], outputs\n\n\nclass hourglass_same(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(hourglass_same, self).__init__()\n self.down1 = bottleneck_down(in_channels, out_channels)\n self.down2 = bottleneck_down(out_channels, out_channels)\n self.down3 = bottleneck_down(out_channels, out_channels)\n self.down4 = bottleneck_down(out_channels, out_channels)\n\n self.same1 = bottleneck(out_channels, out_channels)\n self.same2 = bottleneck(out_channels, out_channels)\n\n self.up2 = bottleneck_up(out_channels, out_channels)\n self.up3 = bottleneck_up(out_channels, out_channels)\n self.up4 = bottleneck_up(out_channels, out_channels)\n self.up5 = bottleneck_up(out_channels, out_channels)\n\n self.residual1 = bottleneck_down(in_channels, out_channels)\n self.residual2 = bottleneck_down(out_channels, out_channels)\n self.residual3 = bottleneck_down(out_channels, out_channels)\n self.residual4 = bottleneck_down(out_channels, out_channels)\n\n def forward(self, inputs):\n outputs1 = self.down1(inputs) # 512*256 -> 256*128\n outputs2 = self.down2(outputs1) # 256*128 -> 128*64\n outputs3 = self.down3(outputs2) # 128*64 -> 64*32\n outputs4 = self.down4(outputs3) # 64*32 -> 32*16\n\n outputs = self.same1(outputs4) # 16*8 -> 16*8\n outputs = self.same2(outputs) # 16*8 -> 16*8\n \n outputs = self.up2(outputs + self.residual4(outputs3)) # 32*16 -> 64*32\n outputs = self.up3(outputs + self.residual3(outputs2)) # 64*32 -> 128*64\n outputs = self.up4(outputs + self.residual2(outputs1)) # 128*64 -> 256*128\n outputs = self.up5(outputs + self.residual1(inputs)) # 256*128 -> 512*256\n\n return outputs \n\n\nclass Conv2D_BatchNorm_Relu(nn.Module):\n def __init__(self, in_channels, n_filters, k_size, padding, stride, bias=True, acti=True):\n super(Conv2D_BatchNorm_Relu, self).__init__()\n\n if acti:\n self.cbr_unit = nn.Sequential(nn.Conv2d(in_channels, n_filters, k_size, \n padding=padding, stride=stride, bias=bias),\n nn.BatchNorm2d(n_filters),\n nn.ReLU(inplace=True),)\n else:\n self.cbr_unit = nn.Conv2d(in_channels, n_filters, k_size, padding=padding, stride=stride, bias=bias)\n\n def forward(self, inputs):\n outputs = self.cbr_unit(inputs)\n return outputs\n\n\nclass bottleneck(nn.Module):\n def __init__(self, in_channels, out_channels, acti=True):\n super(bottleneck, self).__init__()\n self.acti = acti\n temp_channels = in_channels//4\n if in_channels < 4:\n temp_channels = in_channels\n self.conv1 = Conv2D_BatchNorm_Relu(in_channels, temp_channels, 1, 0, 1)\n self.conv2 = Conv2D_BatchNorm_Relu(temp_channels, temp_channels, 3, 1, 1)\n self.conv3 = Conv2D_BatchNorm_Relu(temp_channels, out_channels, 1, 0, 1, acti = self.acti)\n\n self.residual = Conv2D_BatchNorm_Relu(in_channels, out_channels, 1, 0, 1)\n\n def forward(self, x):\n re = x\n\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n if not self.acti:\n return out\n\n re = self.residual(x)\n out = out + re\n\n return out\n\n\nclass bottleneck_down(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(bottleneck_down, self).__init__()\n temp_channels = in_channels//4\n if in_channels < 4:\n temp_channels = in_channels\n self.conv1 = Conv2D_BatchNorm_Relu(in_channels, temp_channels, 1, 0, 1)\n self.conv2 = Conv2D_BatchNorm_Relu(temp_channels, temp_channels, 3, 1, 2)\n self.conv3 = Conv2D_BatchNorm_Relu(temp_channels, out_channels, 1, 0, 1)\n\n self.residual = Conv2D_BatchNorm_Relu(in_channels, out_channels, 3, 1, 2)\n\n def forward(self, x):\n re = x\n\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n\n re = self.residual(x)\n\n out = out + re\n\n return out\n\n\nclass bottleneck_up(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(bottleneck_up, self).__init__()\n temp_channels = in_channels//4\n if in_channels < 4:\n temp_channels = in_channels\n self.conv1 = Conv2D_BatchNorm_Relu(in_channels, temp_channels,1, 0, 1)\n self.conv2 = nn.Sequential( nn.ConvTranspose2d(temp_channels, temp_channels, 3, 2, 1, 1),\n nn.BatchNorm2d(temp_channels),\n nn.ReLU() )\n self.conv3 = Conv2D_BatchNorm_Relu(temp_channels, out_channels, 1, 0, 1)\n\n self.residual = nn.Sequential( nn.ConvTranspose2d(in_channels, out_channels, 3, 2, 1, 1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU() )\n\n def forward(self, x):\n re = x\n\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n \n re = self.residual(re)\n\n out = out + re\n\n return out\n\n\nclass Output(nn.Module):\n def __init__(self, in_size, out_size):\n super(Output, self).__init__()\n self.conv = bottleneck(in_size, out_size, acti=False)\n\n def forward(self, inputs):\n outputs = self.conv(inputs)\n return outputs\n\n" ]
[ [ "numpy.rollaxis", "torch.from_numpy", "torch.save", "numpy.array", "torch.autograd.Variable" ], [ "torch.nn.ConvTranspose2d", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
plopd/plop-msc-thesis
[ "c61fcf53c670b288ac8593790f9cc3f3abd50989", "c61fcf53c670b288ac8593790f9cc3f3abd50989" ]
[ "results/plot_prediction_problem.py", "results/fig_9_4.py" ]
[ "import argparse\nimport os\nfrom pathlib import Path\n\nimport gym\nimport gym_puddle # noqa f401\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\nfrom utils.utils import minmax_normalization_ab\n\nmatplotlib.rcParams.update({\"font.size\": 24})\nsns.set()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--num_obs\", type=int, required=True)\nparser.add_argument(\"--env_id\", type=str, required=True)\nparser.add_argument(\"--discount_rate\", type=float, required=True)\nparser.add_argument(\"--policy_name\", type=str, required=True)\nparser.add_argument(\"--problem\", type=str, required=True)\n\nargs = parser.parse_args()\n\n\nsave_rootpath = Path(f\"{os.environ.get('SCRATCH')}\") / args.problem\nnum_obs = args.num_obs\nenv_id = args.env_id\ndiscount_rate = args.discount_rate\npolicy_name = args.policy_name\n\nobservations = np.load(save_rootpath / \"S.npy\")\nenv = gym.make(env_id)\n\n# Plot states S\nplt.figure()\nplt.scatter(observations[:, 0], observations[:, 1], alpha=0.15)\nplt.xlim((env.observation_space.low[0], env.observation_space.high[0]))\nplt.ylim((env.observation_space.low[1], env.observation_space.high[1]))\nplt.savefig((Path(save_rootpath) / \"observation_space\"))\n\n# Plot true values\nfilename = f\"true_values-discount_rate_{discount_rate}\".replace(\".\", \"_\")\ntrue_values = np.load(Path(save_rootpath) / f\"{filename}.npy\")\ncolors = minmax_normalization_ab(\n true_values,\n true_values.min(),\n true_values.max(),\n true_values.min(),\n true_values.max(),\n)\nplt.figure()\nsc = plt.scatter(observations[:, 0], observations[:, 1], c=colors, cmap=\"hot\")\nplt.xlim((env.observation_space.low[0], env.observation_space.high[0]))\nplt.ylim((env.observation_space.low[1], env.observation_space.high[1]))\nplt.colorbar(sc)\nplt.title(f\"{env_id} {policy_name} Prediction\")\nplt.tight_layout()\nplt.savefig(\n (\n Path(save_rootpath)\n / f\"true_values-discount_rate_{discount_rate}\".replace(\".\", \"_\")\n )\n)\n", "import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom representations.representations import get_representation\n\n\ndef get_fig(name):\n num_states = 10000\n num_dims = 2\n num_features = 36\n states = np.random.uniform(0, 1, (num_states, num_dims))\n\n FR = get_representation(\n name=name,\n **{\n \"order\": 5,\n \"num_dims\": num_dims,\n \"min_x\": states.min(),\n \"max_x\": states.max(),\n }\n )\n\n features = np.array([FR[states[i]] for i in range(num_states)])\n\n fig = plt.figure(figsize=(25, 25))\n fig.subplots_adjust(hspace=0.4, wspace=0.4)\n\n for i in range(1, num_features + 1):\n ax = fig.add_subplot(int(np.sqrt(num_features)), int(np.sqrt(num_features)), i)\n ax.scatter(states[:, 0], states[:, 1], c=features[:, i - 1], cmap=\"bone\")\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == \"__main__\":\n get_fig(\"F\")\n get_fig(\"P\")\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlim", "matplotlib.rcParams.update", "numpy.load", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.tight_layout", "numpy.sqrt", "numpy.random.uniform", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rcarneva/pandas
[ "f76bad2d267b4891e4914ba2bfa59c2695315faa" ]
[ "pandas/io/parsers.py" ]
[ "\"\"\"\nModule contains tools for processing files into DataFrames or other objects\n\"\"\"\nfrom __future__ import print_function\nfrom pandas.compat import range, lrange, StringIO, lzip, zip, string_types, map\nfrom pandas import compat\nfrom collections import defaultdict\nimport re\nimport csv\nimport warnings\n\nimport numpy as np\n\nfrom pandas.core.index import Index, MultiIndex\nfrom pandas.core.frame import DataFrame\nimport datetime\nimport pandas.core.common as com\nfrom pandas.core.common import AbstractMethodError\nfrom pandas.core.config import get_option\nfrom pandas.io.date_converters import generic_parser\nfrom pandas.io.common import (get_filepath_or_buffer, _validate_header_arg,\n _get_handle, UnicodeReader, UTF8Recoder,\n BaseIterator)\nfrom pandas.tseries import tools\n\nfrom pandas.util.decorators import Appender\n\nimport pandas.lib as lib\nimport pandas.parser as _parser\n\n# common NA values\n# no longer excluding inf representations\n# '1.#INF','-1.#INF', '1.#INF000000',\n_NA_VALUES = set([\n '-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A',\n 'N/A', 'NA', '#NA', 'NULL', 'NaN', '-NaN', 'nan', '-nan', ''\n])\n\n\nclass ParserWarning(Warning):\n pass\n\n_parser_params = \"\"\"Also supports optionally iterating or breaking of the file\ninto chunks.\n\nAdditional help can be found in the `online docs for IO Tools\n<http://pandas.pydata.org/pandas-docs/stable/io.html>`_.\n\nParameters\n----------\nfilepath_or_buffer : str, pathlib.Path, py._path.local.LocalPath or any \\\nobject with a read() method (such as a file handle or StringIO)\n The string could be a URL. Valid URL schemes include http, ftp, s3, and\n file. For file URLs, a host is expected. For instance, a local file could\n be file ://localhost/path/to/table.csv\n%s\ndelimiter : str, default None\n Alternative argument name for sep.\nheader : int or list of ints, default 'infer'\n Row number(s) to use as the column names, and the start of the data.\n Default behavior is as if set to 0 if no ``names`` passed, otherwise\n ``None``. Explicitly pass ``header=0`` to be able to replace existing\n names. The header can be a list of integers that specify row locations for\n a multi-index on the columns e.g. [0,1,3]. Intervening rows that are not\n specified will be skipped (e.g. 2 in this example is skipped). Note that\n this parameter ignores commented lines and empty lines if\n ``skip_blank_lines=True``, so header=0 denotes the first line of data\n rather than the first line of the file.\nnames : array-like, default None\n List of column names to use. If file contains no header row, then you\n should explicitly pass header=None\nindex_col : int or sequence or False, default None\n Column to use as the row labels of the DataFrame. If a sequence is given, a\n MultiIndex is used. If you have a malformed file with delimiters at the end\n of each line, you might consider index_col=False to force pandas to _not_\n use the first column as the index (row names)\nusecols : array-like, default None\n Return a subset of the columns.\n Results in much faster parsing time and lower memory usage.\nsqueeze : boolean, default False\n If the parsed data only contains one column then return a Series\nprefix : str, default None\n Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...\nmangle_dupe_cols : boolean, default True\n Duplicate columns will be specified as 'X.0'...'X.N', rather than 'X'...'X'\ndtype : Type name or dict of column -> type, default None\n Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}\n (Unsupported with engine='python'). Use `str` or `object` to preserve and\n not interpret dtype.\n%s\nconverters : dict, default None\n Dict of functions for converting values in certain columns. Keys can either\n be integers or column labels\ntrue_values : list, default None\n Values to consider as True\nfalse_values : list, default None\n Values to consider as False\nskipinitialspace : boolean, default False\n Skip spaces after delimiter.\nskiprows : list-like or integer, default None\n Line numbers to skip (0-indexed) or number of lines to skip (int)\n at the start of the file\nskipfooter : int, default 0\n Number of lines at bottom of file to skip (Unsupported with engine='c')\nnrows : int, default None\n Number of rows of file to read. Useful for reading pieces of large files\nna_values : str or list-like or dict, default None\n Additional strings to recognize as NA/NaN. If dict passed, specific\n per-column NA values. By default the following values are interpreted as\n NaN: `'\"\"\" + \"'`, `'\".join(sorted(_NA_VALUES)) + \"\"\"'`.\nkeep_default_na : bool, default True\n If na_values are specified and keep_default_na is False the default NaN\n values are overridden, otherwise they're appended to.\nna_filter : boolean, default True\n Detect missing value markers (empty strings and the value of na_values). In\n data without any NAs, passing na_filter=False can improve the performance\n of reading a large file\nverbose : boolean, default False\n Indicate number of NA values placed in non-numeric columns\nskip_blank_lines : boolean, default True\n If True, skip over blank lines rather than interpreting as NaN values\nparse_dates : boolean or list of ints or names or list of lists or dict, \\\ndefault False\n * boolean. If True -> try parsing the index.\n * list of ints or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3\n each as a separate date column.\n * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as\n a single date column.\n * dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call result\n 'foo'\n Note: A fast-path exists for iso8601-formatted dates.\ninfer_datetime_format : boolean, default False\n If True and parse_dates is enabled for a column, attempt to infer\n the datetime format to speed up the processing\nkeep_date_col : boolean, default False\n If True and parse_dates specifies combining multiple columns then\n keep the original columns.\ndate_parser : function, default None\n Function to use for converting a sequence of string columns to an array of\n datetime instances. The default uses ``dateutil.parser.parser`` to do the\n conversion. Pandas will try to call date_parser in three different ways,\n advancing to the next if an exception occurs: 1) Pass one or more arrays\n (as defined by parse_dates) as arguments; 2) concatenate (row-wise) the\n string values from the columns defined by parse_dates into a single array\n and pass that; and 3) call date_parser once for each row using one or more\n strings (corresponding to the columns defined by parse_dates) as arguments.\ndayfirst : boolean, default False\n DD/MM format dates, international and European format\niterator : boolean, default False\n Return TextFileReader object for iteration or getting chunks with\n ``get_chunk()``.\nchunksize : int, default None\n Return TextFileReader object for iteration. `See IO Tools docs for more\n information\n <http://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_ on\n ``iterator`` and ``chunksize``.\ncompression : {'infer', 'gzip', 'bz2', None}, default 'infer'\n For on-the-fly decompression of on-disk data. If 'infer', then use gzip or\n bz2 if filepath_or_buffer is a string ending in '.gz' or '.bz2',\n respectively, and no decompression otherwise. Set to None for no\n decompression.\nthousands : str, default None\n Thousands separator\ndecimal : str, default '.'\n Character to recognize as decimal point (e.g. use ',' for European data).\nlineterminator : str (length 1), default None\n Character to break file into lines. Only valid with C parser.\nquotechar : str (length 1), optional\n The character used to denote the start and end of a quoted item. Quoted\n items can include the delimiter and it will be ignored.\nquoting : int or csv.QUOTE_* instance, default None\n Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of\n QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).\n Default (None) results in QUOTE_MINIMAL behavior.\nescapechar : str (length 1), default None\n One-character string used to escape delimiter when quoting is QUOTE_NONE.\ncomment : str, default None\n Indicates remainder of line should not be parsed. If found at the beginning\n of a line, the line will be ignored altogether. This parameter must be a\n single character. Like empty lines (as long as ``skip_blank_lines=True``),\n fully commented lines are ignored by the parameter `header` but not by\n `skiprows`. For example, if comment='#', parsing '#empty\\\\na,b,c\\\\n1,2,3'\n with `header=0` will result in 'a,b,c' being\n treated as the header.\nencoding : str, default None\n Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python\n standard encodings\n <https://docs.python.org/3/library/codecs.html#standard-encodings>`_\ndialect : str or csv.Dialect instance, default None\n If None defaults to Excel dialect. Ignored if sep longer than 1 char\n See csv.Dialect documentation for more details\ntupleize_cols : boolean, default False\n Leave a list of tuples on columns as is (default is to convert to\n a Multi Index on the columns)\nerror_bad_lines : boolean, default True\n Lines with too many fields (e.g. a csv line with too many commas) will by\n default cause an exception to be raised, and no DataFrame will be returned.\n If False, then these \"bad lines\" will dropped from the DataFrame that is\n returned. (Only valid with C parser)\nwarn_bad_lines : boolean, default True\n If error_bad_lines is False, and warn_bad_lines is True, a warning for each\n \"bad line\" will be output. (Only valid with C parser).\n\nReturns\n-------\nresult : DataFrame or TextParser\n\"\"\"\n\n# engine is not used in read_fwf() so is factored out of the shared docstring\n_engine_doc = \"\"\"engine : {'c', 'python'}, optional\n Parser engine to use. The C engine is faster while the python engine is\n currently more feature-complete.\"\"\"\n\n_sep_doc = \"\"\"sep : str, default {default}\n Delimiter to use. If sep is None, will try to automatically determine\n this. Regular expressions are accepted and will force use of the python\n parsing engine and will ignore quotes in the data.\"\"\"\n\n_read_csv_doc = \"\"\"\nRead CSV (comma-separated) file into DataFrame\n\n%s\n\"\"\" % (_parser_params % (_sep_doc.format(default=\"','\"), _engine_doc))\n\n_read_table_doc = \"\"\"\nRead general delimited file into DataFrame\n\n%s\n\"\"\" % (_parser_params % (_sep_doc.format(default=\"\\\\t (tab-stop)\"),\n _engine_doc))\n\n_fwf_widths = \"\"\"\\\ncolspecs : list of pairs (int, int) or 'infer'. optional\n A list of pairs (tuples) giving the extents of the fixed-width\n fields of each line as half-open intervals (i.e., [from, to[ ).\n String value 'infer' can be used to instruct the parser to try\n detecting the column specifications from the first 100 rows of\n the data (default='infer').\nwidths : list of ints. optional\n A list of field widths which can be used instead of 'colspecs' if\n the intervals are contiguous.\n\"\"\"\n\n_read_fwf_doc = \"\"\"\nRead a table of fixed-width formatted lines into DataFrame\n\n%s\n\nAlso, 'delimiter' is used to specify the filler character of the\nfields if it is not spaces (e.g., '~').\n\"\"\" % (_parser_params % (_fwf_widths, ''))\n\n\ndef _read(filepath_or_buffer, kwds):\n \"Generic reader of line files.\"\n encoding = kwds.get('encoding', None)\n skipfooter = kwds.pop('skipfooter', None)\n if skipfooter is not None:\n kwds['skip_footer'] = skipfooter\n\n # If the input could be a filename, check for a recognizable compression\n # extension. If we're reading from a URL, the `get_filepath_or_buffer`\n # will use header info to determine compression, so use what it finds in\n # that case.\n inferred_compression = kwds.get('compression')\n if inferred_compression == 'infer':\n if isinstance(filepath_or_buffer, compat.string_types):\n if filepath_or_buffer.endswith('.gz'):\n inferred_compression = 'gzip'\n elif filepath_or_buffer.endswith('.bz2'):\n inferred_compression = 'bz2'\n else:\n inferred_compression = None\n else:\n inferred_compression = None\n\n filepath_or_buffer, _, compression = get_filepath_or_buffer(\n filepath_or_buffer, encoding,\n compression=kwds.get('compression', None))\n kwds['compression'] = (inferred_compression if compression == 'infer'\n else compression)\n\n if kwds.get('date_parser', None) is not None:\n if isinstance(kwds['parse_dates'], bool):\n kwds['parse_dates'] = True\n\n # Extract some of the arguments (pass chunksize on).\n iterator = kwds.get('iterator', False)\n nrows = kwds.pop('nrows', None)\n chunksize = kwds.get('chunksize', None)\n\n # Create the parser.\n parser = TextFileReader(filepath_or_buffer, **kwds)\n\n if (nrows is not None) and (chunksize is not None):\n raise NotImplementedError(\"'nrows' and 'chunksize' can not be used\"\n \" together yet.\")\n elif nrows is not None:\n return parser.read(nrows)\n elif chunksize or iterator:\n return parser\n\n return parser.read()\n\n_parser_defaults = {\n 'delimiter': None,\n\n 'doublequote': True,\n 'escapechar': None,\n 'quotechar': '\"',\n 'quoting': csv.QUOTE_MINIMAL,\n 'skipinitialspace': False,\n 'lineterminator': None,\n\n 'header': 'infer',\n 'index_col': None,\n 'names': None,\n 'prefix': None,\n 'skiprows': None,\n 'na_values': None,\n 'true_values': None,\n 'false_values': None,\n 'skip_footer': 0,\n 'converters': None,\n\n 'keep_default_na': True,\n 'thousands': None,\n 'comment': None,\n\n # 'engine': 'c',\n 'parse_dates': False,\n 'keep_date_col': False,\n 'dayfirst': False,\n 'date_parser': None,\n\n 'usecols': None,\n\n # 'nrows': None,\n # 'iterator': False,\n 'chunksize': None,\n 'verbose': False,\n 'encoding': None,\n 'squeeze': False,\n 'compression': None,\n 'mangle_dupe_cols': True,\n 'tupleize_cols': False,\n 'infer_datetime_format': False,\n 'skip_blank_lines': True\n}\n\n\n_c_parser_defaults = {\n 'delim_whitespace': False,\n 'as_recarray': False,\n 'na_filter': True,\n 'compact_ints': False,\n 'use_unsigned': False,\n 'low_memory': True,\n 'memory_map': False,\n 'buffer_lines': None,\n 'error_bad_lines': True,\n 'warn_bad_lines': True,\n 'dtype': None,\n 'decimal': b'.',\n 'float_precision': None\n}\n\n_fwf_defaults = {\n 'colspecs': 'infer',\n 'widths': None,\n}\n\n_c_unsupported = set(['skip_footer'])\n_python_unsupported = set(_c_parser_defaults.keys())\n\n\ndef _make_parser_function(name, sep=','):\n\n default_sep = sep\n\n def parser_f(filepath_or_buffer,\n sep=sep,\n delimiter=None,\n\n # Column and Index Locations and Names\n header='infer',\n names=None,\n index_col=None,\n usecols=None,\n squeeze=False,\n prefix=None,\n mangle_dupe_cols=True,\n\n # General Parsing Configuration\n dtype=None,\n engine=None,\n converters=None,\n true_values=None,\n false_values=None,\n skipinitialspace=False,\n skiprows=None,\n skipfooter=None,\n nrows=None,\n\n # NA and Missing Data Handling\n na_values=None,\n keep_default_na=True,\n na_filter=True,\n verbose=False,\n skip_blank_lines=True,\n\n # Datetime Handling\n parse_dates=False,\n infer_datetime_format=False,\n keep_date_col=False,\n date_parser=None,\n dayfirst=False,\n\n # Iteration\n iterator=False,\n chunksize=None,\n\n # Quoting, Compression, and File Format\n compression='infer',\n thousands=None,\n decimal=b'.',\n lineterminator=None,\n quotechar='\"',\n quoting=csv.QUOTE_MINIMAL,\n escapechar=None,\n comment=None,\n encoding=None,\n dialect=None,\n tupleize_cols=False,\n\n # Error Handling\n error_bad_lines=True,\n warn_bad_lines=True,\n\n # Deprecated\n skip_footer=0,\n\n # Internal\n doublequote=True,\n delim_whitespace=False,\n as_recarray=False,\n compact_ints=False,\n use_unsigned=False,\n low_memory=_c_parser_defaults['low_memory'],\n buffer_lines=None,\n memory_map=False,\n float_precision=None):\n\n # Alias sep -> delimiter.\n if delimiter is None:\n delimiter = sep\n\n if delim_whitespace and delimiter is not default_sep:\n raise ValueError(\"Specified a delimiter with both sep and\"\n \" delim_whitespace=True; you can only\"\n \" specify one.\")\n\n if engine is not None:\n engine_specified = True\n else:\n engine = 'c'\n engine_specified = False\n\n kwds = dict(delimiter=delimiter,\n engine=engine,\n dialect=dialect,\n compression=compression,\n engine_specified=engine_specified,\n\n doublequote=doublequote,\n escapechar=escapechar,\n quotechar=quotechar,\n quoting=quoting,\n skipinitialspace=skipinitialspace,\n lineterminator=lineterminator,\n\n header=header,\n index_col=index_col,\n names=names,\n prefix=prefix,\n skiprows=skiprows,\n na_values=na_values,\n true_values=true_values,\n false_values=false_values,\n keep_default_na=keep_default_na,\n thousands=thousands,\n comment=comment,\n decimal=decimal,\n\n parse_dates=parse_dates,\n keep_date_col=keep_date_col,\n dayfirst=dayfirst,\n date_parser=date_parser,\n\n nrows=nrows,\n iterator=iterator,\n chunksize=chunksize,\n skipfooter=skipfooter or skip_footer,\n converters=converters,\n dtype=dtype,\n usecols=usecols,\n verbose=verbose,\n encoding=encoding,\n squeeze=squeeze,\n memory_map=memory_map,\n float_precision=float_precision,\n\n na_filter=na_filter,\n compact_ints=compact_ints,\n use_unsigned=use_unsigned,\n delim_whitespace=delim_whitespace,\n as_recarray=as_recarray,\n warn_bad_lines=warn_bad_lines,\n error_bad_lines=error_bad_lines,\n low_memory=low_memory,\n buffer_lines=buffer_lines,\n mangle_dupe_cols=mangle_dupe_cols,\n tupleize_cols=tupleize_cols,\n infer_datetime_format=infer_datetime_format,\n skip_blank_lines=skip_blank_lines)\n\n return _read(filepath_or_buffer, kwds)\n\n parser_f.__name__ = name\n\n return parser_f\n\nread_csv = _make_parser_function('read_csv', sep=',')\nread_csv = Appender(_read_csv_doc)(read_csv)\n\nread_table = _make_parser_function('read_table', sep='\\t')\nread_table = Appender(_read_table_doc)(read_table)\n\n\n@Appender(_read_fwf_doc)\ndef read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds):\n # Check input arguments.\n if colspecs is None and widths is None:\n raise ValueError(\"Must specify either colspecs or widths\")\n elif colspecs not in (None, 'infer') and widths is not None:\n raise ValueError(\"You must specify only one of 'widths' and \"\n \"'colspecs'\")\n\n # Compute 'colspecs' from 'widths', if specified.\n if widths is not None:\n colspecs, col = [], 0\n for w in widths:\n colspecs.append((col, col + w))\n col += w\n\n kwds['colspecs'] = colspecs\n kwds['engine'] = 'python-fwf'\n return _read(filepath_or_buffer, kwds)\n\n\nclass TextFileReader(BaseIterator):\n \"\"\"\n\n Passed dialect overrides any of the related parser options\n\n \"\"\"\n\n def __init__(self, f, engine=None, **kwds):\n\n self.f = f\n\n if engine is not None:\n engine_specified = True\n else:\n engine = 'python'\n engine_specified = False\n\n self._engine_specified = kwds.get('engine_specified', engine_specified)\n\n if kwds.get('dialect') is not None:\n dialect = kwds['dialect']\n if dialect in csv.list_dialects():\n dialect = csv.get_dialect(dialect)\n kwds['delimiter'] = dialect.delimiter\n kwds['doublequote'] = dialect.doublequote\n kwds['escapechar'] = dialect.escapechar\n kwds['skipinitialspace'] = dialect.skipinitialspace\n kwds['quotechar'] = dialect.quotechar\n kwds['quoting'] = dialect.quoting\n\n if kwds.get('header', 'infer') == 'infer':\n kwds['header'] = 0 if kwds.get('names') is None else None\n\n self.orig_options = kwds\n\n # miscellanea\n self.engine = engine\n self._engine = None\n\n options = self._get_options_with_defaults(engine)\n\n self.chunksize = options.pop('chunksize', None)\n self.squeeze = options.pop('squeeze', False)\n\n # might mutate self.engine\n self.options, self.engine = self._clean_options(options, engine)\n if 'has_index_names' in kwds:\n self.options['has_index_names'] = kwds['has_index_names']\n\n self._make_engine(self.engine)\n\n def _get_options_with_defaults(self, engine):\n kwds = self.orig_options\n\n options = {}\n\n for argname, default in compat.iteritems(_parser_defaults):\n options[argname] = kwds.get(argname, default)\n\n for argname, default in compat.iteritems(_c_parser_defaults):\n if argname in kwds:\n value = kwds[argname]\n\n if engine != 'c' and value != default:\n raise ValueError('The %r option is not supported with the'\n ' %r engine' % (argname, engine))\n else:\n value = default\n options[argname] = value\n\n if engine == 'python-fwf':\n for argname, default in compat.iteritems(_fwf_defaults):\n options[argname] = kwds.get(argname, default)\n\n return options\n\n def _clean_options(self, options, engine):\n result = options.copy()\n\n engine_specified = self._engine_specified\n fallback_reason = None\n\n sep = options['delimiter']\n delim_whitespace = options['delim_whitespace']\n\n # C engine not supported yet\n if engine == 'c':\n if options['skip_footer'] > 0:\n fallback_reason = \"the 'c' engine does not support\"\\\n \" skip_footer\"\n engine = 'python'\n\n if sep is None and not delim_whitespace:\n if engine == 'c':\n fallback_reason = \"the 'c' engine does not support\"\\\n \" sep=None with delim_whitespace=False\"\n engine = 'python'\n elif sep is not None and len(sep) > 1:\n if engine == 'c' and sep == '\\s+':\n result['delim_whitespace'] = True\n del result['delimiter']\n elif engine not in ('python', 'python-fwf'):\n # wait until regex engine integrated\n fallback_reason = \"the 'c' engine does not support\"\\\n \" regex separators\"\n engine = 'python'\n\n if fallback_reason and engine_specified:\n raise ValueError(fallback_reason)\n\n if engine == 'c':\n for arg in _c_unsupported:\n del result[arg]\n\n if 'python' in engine:\n for arg in _python_unsupported:\n if fallback_reason and result[arg] != _c_parser_defaults[arg]:\n msg = (\"Falling back to the 'python' engine because\"\n \" {reason}, but this causes {option!r} to be\"\n \" ignored as it is not supported by the 'python'\"\n \" engine.\").format(reason=fallback_reason,\n option=arg)\n if arg == 'dtype':\n msg += \" (Note the 'converters' option provides\"\\\n \" similar functionality.)\"\n raise ValueError(msg)\n del result[arg]\n\n if fallback_reason:\n warnings.warn((\"Falling back to the 'python' engine because\"\n \" {0}; you can avoid this warning by specifying\"\n \" engine='python'.\").format(fallback_reason),\n ParserWarning, stacklevel=5)\n\n index_col = options['index_col']\n names = options['names']\n converters = options['converters']\n na_values = options['na_values']\n skiprows = options['skiprows']\n\n # really delete this one\n keep_default_na = result.pop('keep_default_na')\n\n _validate_header_arg(options['header'])\n\n if index_col is True:\n raise ValueError(\"The value of index_col couldn't be 'True'\")\n if _is_index_col(index_col):\n if not isinstance(index_col, (list, tuple, np.ndarray)):\n index_col = [index_col]\n result['index_col'] = index_col\n\n names = list(names) if names is not None else names\n\n # type conversion-related\n if converters is not None:\n if not isinstance(converters, dict):\n raise TypeError('Type converters must be a dict or'\n ' subclass, input was '\n 'a {0!r}'.format(type(converters).__name__))\n else:\n converters = {}\n\n # Converting values to NA\n na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)\n\n if com.is_integer(skiprows):\n skiprows = lrange(skiprows)\n skiprows = set() if skiprows is None else set(skiprows)\n\n # put stuff back\n result['names'] = names\n result['converters'] = converters\n result['na_values'] = na_values\n result['na_fvalues'] = na_fvalues\n result['skiprows'] = skiprows\n\n return result, engine\n\n def __next__(self):\n return self.get_chunk()\n\n def _make_engine(self, engine='c'):\n if engine == 'c':\n self._engine = CParserWrapper(self.f, **self.options)\n else:\n if engine == 'python':\n klass = PythonParser\n elif engine == 'python-fwf':\n klass = FixedWidthFieldParser\n self._engine = klass(self.f, **self.options)\n\n def _failover_to_python(self):\n raise AbstractMethodError(self)\n\n def read(self, nrows=None):\n if nrows is not None:\n if self.options.get('skip_footer'):\n raise ValueError('skip_footer not supported for iteration')\n\n ret = self._engine.read(nrows)\n\n if self.options.get('as_recarray'):\n return ret\n\n # May alter columns / col_dict\n index, columns, col_dict = self._create_index(ret)\n\n df = DataFrame(col_dict, columns=columns, index=index)\n\n if self.squeeze and len(df.columns) == 1:\n return df[df.columns[0]].copy()\n return df\n\n def _create_index(self, ret):\n index, columns, col_dict = ret\n return index, columns, col_dict\n\n def get_chunk(self, size=None):\n if size is None:\n size = self.chunksize\n return self.read(nrows=size)\n\n\ndef _is_index_col(col):\n return col is not None and col is not False\n\n\nclass ParserBase(object):\n\n def __init__(self, kwds):\n self.names = kwds.get('names')\n self.orig_names = None\n self.prefix = kwds.pop('prefix', None)\n\n self.index_col = kwds.get('index_col', None)\n self.index_names = None\n self.col_names = None\n\n self.parse_dates = kwds.pop('parse_dates', False)\n self.date_parser = kwds.pop('date_parser', None)\n self.dayfirst = kwds.pop('dayfirst', False)\n self.keep_date_col = kwds.pop('keep_date_col', False)\n\n self.na_values = kwds.get('na_values')\n self.na_fvalues = kwds.get('na_fvalues')\n self.true_values = kwds.get('true_values')\n self.false_values = kwds.get('false_values')\n self.tupleize_cols = kwds.get('tupleize_cols', False)\n self.infer_datetime_format = kwds.pop('infer_datetime_format', False)\n\n self._date_conv = _make_date_converter(\n date_parser=self.date_parser,\n dayfirst=self.dayfirst,\n infer_datetime_format=self.infer_datetime_format\n )\n\n # validate header options for mi\n self.header = kwds.get('header')\n if isinstance(self.header, (list, tuple, np.ndarray)):\n if kwds.get('as_recarray'):\n raise ValueError(\"cannot specify as_recarray when \"\n \"specifying a multi-index header\")\n if kwds.get('usecols'):\n raise ValueError(\"cannot specify usecols when \"\n \"specifying a multi-index header\")\n if kwds.get('names'):\n raise ValueError(\"cannot specify names when \"\n \"specifying a multi-index header\")\n\n # validate index_col that only contains integers\n if self.index_col is not None:\n is_sequence = isinstance(self.index_col, (list, tuple,\n np.ndarray))\n if not (is_sequence and\n all(map(com.is_integer, self.index_col)) or\n com.is_integer(self.index_col)):\n raise ValueError(\"index_col must only contain row numbers \"\n \"when specifying a multi-index header\")\n\n self._name_processed = False\n\n self._first_chunk = True\n\n @property\n def _has_complex_date_col(self):\n return (isinstance(self.parse_dates, dict) or\n (isinstance(self.parse_dates, list) and\n len(self.parse_dates) > 0 and\n isinstance(self.parse_dates[0], list)))\n\n def _should_parse_dates(self, i):\n if isinstance(self.parse_dates, bool):\n return self.parse_dates\n else:\n name = self.index_names[i]\n j = self.index_col[i]\n\n if lib.isscalar(self.parse_dates):\n return (j == self.parse_dates) or (name == self.parse_dates)\n else:\n return (j in self.parse_dates) or (name in self.parse_dates)\n\n def _extract_multi_indexer_columns(self, header, index_names, col_names,\n passed_names=False):\n \"\"\" extract and return the names, index_names, col_names\n header is a list-of-lists returned from the parsers \"\"\"\n if len(header) < 2:\n return header[0], index_names, col_names, passed_names\n\n # the names are the tuples of the header that are not the index cols\n # 0 is the name of the index, assuming index_col is a list of column\n # numbers\n ic = self.index_col\n if ic is None:\n ic = []\n\n if not isinstance(ic, (list, tuple, np.ndarray)):\n ic = [ic]\n sic = set(ic)\n\n # clean the index_names\n index_names = header.pop(-1)\n index_names, names, index_col = _clean_index_names(index_names,\n self.index_col)\n\n # extract the columns\n field_count = len(header[0])\n\n def extract(r):\n return tuple([r[i] for i in range(field_count) if i not in sic])\n\n columns = lzip(*[extract(r) for r in header])\n names = ic + columns\n\n def tostr(x):\n return str(x) if not isinstance(x, compat.string_types) else x\n\n # if we find 'Unnamed' all of a single level, then our header was too\n # long\n for n in range(len(columns[0])):\n if all(['Unnamed' in tostr(c[n]) for c in columns]):\n raise _parser.CParserError(\n \"Passed header=[%s] are too many rows for this \"\n \"multi_index of columns\"\n % ','.join([str(x) for x in self.header])\n )\n\n # clean the column names (if we have an index_col)\n if len(ic):\n col_names = [r[0] if len(r[0]) and 'Unnamed' not in r[0] else None\n for r in header]\n else:\n col_names = [None] * len(header)\n\n passed_names = True\n\n return names, index_names, col_names, passed_names\n\n def _maybe_make_multi_index_columns(self, columns, col_names=None):\n # possibly create a column mi here\n if (not self.tupleize_cols and len(columns) and\n not isinstance(columns, MultiIndex) and\n all([isinstance(c, tuple) for c in columns])):\n columns = MultiIndex.from_tuples(columns, names=col_names)\n return columns\n\n def _make_index(self, data, alldata, columns, indexnamerow=False):\n if not _is_index_col(self.index_col) or not self.index_col:\n index = None\n\n elif not self._has_complex_date_col:\n index = self._get_simple_index(alldata, columns)\n index = self._agg_index(index)\n\n elif self._has_complex_date_col:\n if not self._name_processed:\n (self.index_names, _,\n self.index_col) = _clean_index_names(list(columns),\n self.index_col)\n self._name_processed = True\n index = self._get_complex_date_index(data, columns)\n index = self._agg_index(index, try_parse_dates=False)\n\n # add names for the index\n if indexnamerow:\n coffset = len(indexnamerow) - len(columns)\n index = index.set_names(indexnamerow[:coffset])\n\n # maybe create a mi on the columns\n columns = self._maybe_make_multi_index_columns(columns, self.col_names)\n\n return index, columns\n\n _implicit_index = False\n\n def _get_simple_index(self, data, columns):\n def ix(col):\n if not isinstance(col, compat.string_types):\n return col\n raise ValueError('Index %s invalid' % col)\n index = None\n\n to_remove = []\n index = []\n for idx in self.index_col:\n i = ix(idx)\n to_remove.append(i)\n index.append(data[i])\n\n # remove index items from content and columns, don't pop in\n # loop\n for i in reversed(sorted(to_remove)):\n data.pop(i)\n if not self._implicit_index:\n columns.pop(i)\n\n return index\n\n def _get_complex_date_index(self, data, col_names):\n def _get_name(icol):\n if isinstance(icol, compat.string_types):\n return icol\n\n if col_names is None:\n raise ValueError(('Must supply column order to use %s as '\n 'index') % str(icol))\n\n for i, c in enumerate(col_names):\n if i == icol:\n return c\n\n index = None\n\n to_remove = []\n index = []\n for idx in self.index_col:\n name = _get_name(idx)\n to_remove.append(name)\n index.append(data[name])\n\n # remove index items from content and columns, don't pop in\n # loop\n for c in reversed(sorted(to_remove)):\n data.pop(c)\n col_names.remove(c)\n\n return index\n\n def _agg_index(self, index, try_parse_dates=True):\n arrays = []\n for i, arr in enumerate(index):\n\n if (try_parse_dates and self._should_parse_dates(i)):\n arr = self._date_conv(arr)\n\n col_na_values = self.na_values\n col_na_fvalues = self.na_fvalues\n\n if isinstance(self.na_values, dict):\n col_name = self.index_names[i]\n if col_name is not None:\n col_na_values, col_na_fvalues = _get_na_values(\n col_name, self.na_values, self.na_fvalues)\n\n arr, _ = self._convert_types(arr, col_na_values | col_na_fvalues)\n arrays.append(arr)\n\n index = MultiIndex.from_arrays(arrays, names=self.index_names)\n\n return index\n\n def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,\n converters=None):\n result = {}\n for c, values in compat.iteritems(dct):\n conv_f = None if converters is None else converters.get(c, None)\n col_na_values, col_na_fvalues = _get_na_values(c, na_values,\n na_fvalues)\n coerce_type = True\n if conv_f is not None:\n try:\n values = lib.map_infer(values, conv_f)\n except ValueError:\n mask = lib.ismember(values, na_values).view(np.uint8)\n values = lib.map_infer_mask(values, conv_f, mask)\n coerce_type = False\n\n cvals, na_count = self._convert_types(\n values, set(col_na_values) | col_na_fvalues, coerce_type)\n result[c] = cvals\n if verbose and na_count:\n print('Filled %d NA values in column %s' % (na_count, str(c)))\n return result\n\n def _convert_types(self, values, na_values, try_num_bool=True):\n na_count = 0\n if issubclass(values.dtype.type, (np.number, np.bool_)):\n mask = lib.ismember(values, na_values)\n na_count = mask.sum()\n if na_count > 0:\n if com.is_integer_dtype(values):\n values = values.astype(np.float64)\n np.putmask(values, mask, np.nan)\n return values, na_count\n\n if try_num_bool:\n try:\n result = lib.maybe_convert_numeric(values, na_values, False)\n except Exception:\n result = values\n if values.dtype == np.object_:\n na_count = lib.sanitize_objects(result, na_values, False)\n else:\n result = values\n if values.dtype == np.object_:\n na_count = lib.sanitize_objects(values, na_values, False)\n\n if result.dtype == np.object_ and try_num_bool:\n result = lib.maybe_convert_bool(values,\n true_values=self.true_values,\n false_values=self.false_values)\n\n return result, na_count\n\n def _do_date_conversions(self, names, data):\n # returns data, columns\n if self.parse_dates is not None:\n data, names = _process_date_conversion(\n data, self._date_conv, self.parse_dates, self.index_col,\n self.index_names, names, keep_date_col=self.keep_date_col)\n\n return names, data\n\n\nclass CParserWrapper(ParserBase):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, src, **kwds):\n self.kwds = kwds\n kwds = kwds.copy()\n\n self.as_recarray = kwds.get('as_recarray', False)\n ParserBase.__init__(self, kwds)\n\n if 'utf-16' in (kwds.get('encoding') or ''):\n if isinstance(src, compat.string_types):\n src = open(src, 'rb')\n src = UTF8Recoder(src, kwds['encoding'])\n kwds['encoding'] = 'utf-8'\n\n # #2442\n kwds['allow_leading_cols'] = self.index_col is not False\n\n self._reader = _parser.TextReader(src, **kwds)\n\n # XXX\n self.usecols = self._reader.usecols\n\n passed_names = self.names is None\n\n if self._reader.header is None:\n self.names = None\n else:\n if len(self._reader.header) > 1:\n # we have a multi index in the columns\n self.names, self.index_names, self.col_names, passed_names = (\n self._extract_multi_indexer_columns(\n self._reader.header, self.index_names, self.col_names,\n passed_names\n )\n )\n else:\n self.names = list(self._reader.header[0])\n\n if self.names is None:\n if self.prefix:\n self.names = ['%s%d' % (self.prefix, i)\n for i in range(self._reader.table_width)]\n else:\n self.names = lrange(self._reader.table_width)\n\n # If the names were inferred (not passed by user) and usedcols is\n # defined, then ensure names refers to the used columns, not the\n # document's columns.\n if self.usecols and passed_names:\n col_indices = []\n for u in self.usecols:\n if isinstance(u, string_types):\n col_indices.append(self.names.index(u))\n else:\n col_indices.append(u)\n self.names = [n for i, n in enumerate(self.names)\n if i in col_indices]\n if len(self.names) < len(self.usecols):\n raise ValueError(\"Usecols do not match names.\")\n\n self._set_noconvert_columns()\n\n self.orig_names = self.names\n\n if not self._has_complex_date_col:\n if (self._reader.leading_cols == 0 and\n _is_index_col(self.index_col)):\n\n self._name_processed = True\n (index_names, self.names,\n self.index_col) = _clean_index_names(self.names,\n self.index_col)\n\n if self.index_names is None:\n self.index_names = index_names\n\n if self._reader.header is None and not passed_names:\n self.index_names = [None] * len(self.index_names)\n\n self._implicit_index = self._reader.leading_cols > 0\n\n def _set_noconvert_columns(self):\n names = self.names\n\n def _set(x):\n if com.is_integer(x):\n self._reader.set_noconvert(x)\n else:\n self._reader.set_noconvert(names.index(x))\n\n if isinstance(self.parse_dates, list):\n for val in self.parse_dates:\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif isinstance(self.parse_dates, dict):\n for val in self.parse_dates.values():\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n def set_error_bad_lines(self, status):\n self._reader.set_error_bad_lines(int(status))\n\n def read(self, nrows=None):\n try:\n data = self._reader.read(nrows)\n except StopIteration:\n if self._first_chunk:\n self._first_chunk = False\n return _get_empty_meta(self.orig_names,\n self.index_col,\n self.index_names,\n dtype=self.kwds.get('dtype'))\n else:\n raise\n\n # Done with first read, next time raise StopIteration\n self._first_chunk = False\n\n if self.as_recarray:\n # what to do if there are leading columns?\n return data\n\n names = self.names\n\n if self._reader.leading_cols:\n if self._has_complex_date_col:\n raise NotImplementedError('file structure not yet supported')\n\n # implicit index, no index names\n arrays = []\n\n for i in range(self._reader.leading_cols):\n if self.index_col is None:\n values = data.pop(i)\n else:\n values = data.pop(self.index_col[i])\n\n values = self._maybe_parse_dates(values, i,\n try_parse_dates=True)\n arrays.append(values)\n\n index = MultiIndex.from_arrays(arrays)\n\n if self.usecols is not None:\n names = self._filter_usecols(names)\n\n # rename dict keys\n data = sorted(data.items())\n data = dict((k, v) for k, (i, v) in zip(names, data))\n\n names, data = self._do_date_conversions(names, data)\n\n else:\n # rename dict keys\n data = sorted(data.items())\n\n # ugh, mutation\n names = list(self.orig_names)\n\n if self.usecols is not None:\n names = self._filter_usecols(names)\n\n # columns as list\n alldata = [x[1] for x in data]\n\n data = dict((k, v) for k, (i, v) in zip(names, data))\n\n names, data = self._do_date_conversions(names, data)\n index, names = self._make_index(data, alldata, names)\n\n # maybe create a mi on the columns\n names = self._maybe_make_multi_index_columns(names, self.col_names)\n\n return index, names, data\n\n def _filter_usecols(self, names):\n # hackish\n if self.usecols is not None and len(names) != len(self.usecols):\n names = [name for i, name in enumerate(names)\n if i in self.usecols or name in self.usecols]\n return names\n\n def _get_index_names(self):\n names = list(self._reader.header[0])\n idx_names = None\n\n if self._reader.leading_cols == 0 and self.index_col is not None:\n (idx_names, names,\n self.index_col) = _clean_index_names(names, self.index_col)\n\n return names, idx_names\n\n def _maybe_parse_dates(self, values, index, try_parse_dates=True):\n if try_parse_dates and self._should_parse_dates(index):\n values = self._date_conv(values)\n return values\n\n\ndef TextParser(*args, **kwds):\n \"\"\"\n Converts lists of lists/tuples into DataFrames with proper type inference\n and optional (e.g. string to datetime) conversion. Also enables iterating\n lazily over chunks of large files\n\n Parameters\n ----------\n data : file-like object or list\n delimiter : separator character to use\n dialect : str or csv.Dialect instance, default None\n Ignored if delimiter is longer than 1 character\n names : sequence, default\n header : int, default 0\n Row to use to parse column labels. Defaults to the first row. Prior\n rows will be discarded\n index_col : int or list, default None\n Column or columns to use as the (possibly hierarchical) index\n has_index_names: boolean, default False\n True if the cols defined in index_col have an index name and are\n not in the header\n na_values : iterable, default None\n Custom NA values\n keep_default_na : bool, default True\n thousands : str, default None\n Thousands separator\n comment : str, default None\n Comment out remainder of line\n parse_dates : boolean, default False\n keep_date_col : boolean, default False\n date_parser : function, default None\n skiprows : list of integers\n Row numbers to skip\n skip_footer : int\n Number of line at bottom of file to skip\n converters : dict, default None\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the cell (not column) content, and return the\n transformed content.\n encoding : string, default None\n Encoding to use for UTF when reading/writing (ex. 'utf-8')\n squeeze : boolean, default False\n returns Series if only one column\n infer_datetime_format: boolean, default False\n If True and `parse_dates` is True for a column, try to infer the\n datetime format based on the first datetime string. If the format\n can be inferred, there often will be a large parsing speed-up.\n float_precision : string, default None\n Specifies which converter the C engine should use for floating-point\n values. The options are None for the ordinary converter,\n 'high' for the high-precision converter, and 'round_trip' for the\n round-trip converter.\n \"\"\"\n kwds['engine'] = 'python'\n return TextFileReader(*args, **kwds)\n\n\ndef count_empty_vals(vals):\n return sum([1 for v in vals if v == '' or v is None])\n\n\ndef _wrap_compressed(f, compression, encoding=None):\n \"\"\"wraps compressed fileobject in a decompressing fileobject\n NOTE: For all files in Python 3.2 and for bzip'd files under all Python\n versions, this means reading in the entire file and then re-wrapping it in\n StringIO.\n \"\"\"\n compression = compression.lower()\n encoding = encoding or get_option('display.encoding')\n\n if compression == 'gzip':\n import gzip\n\n f = gzip.GzipFile(fileobj=f)\n if compat.PY3:\n from io import TextIOWrapper\n\n f = TextIOWrapper(f)\n return f\n elif compression == 'bz2':\n import bz2\n\n if compat.PY3:\n f = bz2.open(f, 'rt', encoding=encoding)\n else:\n # Python 2's bz2 module can't take file objects, so have to\n # run through decompress manually\n data = bz2.decompress(f.read())\n f = StringIO(data)\n return f\n else:\n raise ValueError('do not recognize compression method %s'\n % compression)\n\n\nclass PythonParser(ParserBase):\n\n def __init__(self, f, **kwds):\n \"\"\"\n Workhorse function for processing nested list into DataFrame\n\n Should be replaced by np.genfromtxt eventually?\n \"\"\"\n ParserBase.__init__(self, kwds)\n\n self.data = None\n self.buf = []\n self.pos = 0\n self.line_pos = 0\n\n self.encoding = kwds['encoding']\n self.compression = kwds['compression']\n self.skiprows = kwds['skiprows']\n\n self.skip_footer = kwds['skip_footer']\n self.delimiter = kwds['delimiter']\n\n self.quotechar = kwds['quotechar']\n self.escapechar = kwds['escapechar']\n self.doublequote = kwds['doublequote']\n self.skipinitialspace = kwds['skipinitialspace']\n self.lineterminator = kwds['lineterminator']\n self.quoting = kwds['quoting']\n self.mangle_dupe_cols = kwds.get('mangle_dupe_cols', True)\n self.usecols = kwds['usecols']\n self.skip_blank_lines = kwds['skip_blank_lines']\n\n self.names_passed = kwds['names'] or None\n\n self.has_index_names = False\n if 'has_index_names' in kwds:\n self.has_index_names = kwds['has_index_names']\n\n self.verbose = kwds['verbose']\n self.converters = kwds['converters']\n\n self.thousands = kwds['thousands']\n self.comment = kwds['comment']\n self._comment_lines = []\n\n if isinstance(f, compat.string_types):\n f = _get_handle(f, 'r', encoding=self.encoding,\n compression=self.compression)\n elif self.compression:\n f = _wrap_compressed(f, self.compression, self.encoding)\n # in Python 3, convert BytesIO or fileobjects passed with an encoding\n elif compat.PY3 and isinstance(f, compat.BytesIO):\n from io import TextIOWrapper\n\n f = TextIOWrapper(f, encoding=self.encoding)\n\n # Set self.data to something that can read lines.\n if hasattr(f, 'readline'):\n self._make_reader(f)\n else:\n self.data = f\n\n # Get columns in two steps: infer from data, then\n # infer column indices from self.usecols if is is specified.\n self._col_indices = None\n self.columns, self.num_original_columns = self._infer_columns()\n\n # Now self.columns has the set of columns that we will process.\n # The original set is stored in self.original_columns.\n if len(self.columns) > 1:\n # we are processing a multi index column\n self.columns, self.index_names, self.col_names, _ = (\n self._extract_multi_indexer_columns(\n self.columns, self.index_names, self.col_names\n )\n )\n # Update list of original names to include all indices.\n self.num_original_columns = len(self.columns)\n else:\n self.columns = self.columns[0]\n\n # get popped off for index\n self.orig_names = list(self.columns)\n\n # needs to be cleaned/refactored\n # multiple date column thing turning into a real spaghetti factory\n\n if not self._has_complex_date_col:\n (index_names, self.orig_names, self.columns) = (\n self._get_index_name(self.columns))\n self._name_processed = True\n if self.index_names is None:\n self.index_names = index_names\n\n if self.parse_dates:\n self._no_thousands_columns = self._set_no_thousands_columns()\n else:\n self._no_thousands_columns = None\n\n def _set_no_thousands_columns(self):\n # Create a set of column ids that are not to be stripped of thousands\n # operators.\n noconvert_columns = set()\n\n def _set(x):\n if com.is_integer(x):\n noconvert_columns.add(x)\n else:\n noconvert_columns.add(self.columns.index(x))\n\n if isinstance(self.parse_dates, list):\n for val in self.parse_dates:\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif isinstance(self.parse_dates, dict):\n for val in self.parse_dates.values():\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n return noconvert_columns\n\n def _make_reader(self, f):\n sep = self.delimiter\n\n if sep is None or len(sep) == 1:\n if self.lineterminator:\n raise ValueError('Custom line terminators not supported in '\n 'python parser (yet)')\n\n class MyDialect(csv.Dialect):\n delimiter = self.delimiter\n quotechar = self.quotechar\n escapechar = self.escapechar\n doublequote = self.doublequote\n skipinitialspace = self.skipinitialspace\n quoting = self.quoting\n lineterminator = '\\n'\n\n dia = MyDialect\n\n sniff_sep = True\n\n if sep is not None:\n sniff_sep = False\n dia.delimiter = sep\n # attempt to sniff the delimiter\n if sniff_sep:\n line = f.readline()\n while self.pos in self.skiprows:\n self.pos += 1\n line = f.readline()\n\n line = self._check_comments([line])[0]\n\n self.pos += 1\n self.line_pos += 1\n sniffed = csv.Sniffer().sniff(line)\n dia.delimiter = sniffed.delimiter\n if self.encoding is not None:\n self.buf.extend(list(\n UnicodeReader(StringIO(line),\n dialect=dia,\n encoding=self.encoding)))\n else:\n self.buf.extend(list(csv.reader(StringIO(line),\n dialect=dia)))\n\n if self.encoding is not None:\n reader = UnicodeReader(f, dialect=dia,\n encoding=self.encoding,\n strict=True)\n else:\n reader = csv.reader(f, dialect=dia,\n strict=True)\n\n else:\n def _read():\n line = next(f)\n pat = re.compile(sep)\n yield pat.split(line.strip())\n for line in f:\n yield pat.split(line.strip())\n reader = _read()\n\n self.data = reader\n\n def read(self, rows=None):\n try:\n content = self._get_lines(rows)\n except StopIteration:\n if self._first_chunk:\n content = []\n else:\n raise\n\n # done with first read, next time raise StopIteration\n self._first_chunk = False\n\n columns = list(self.orig_names)\n if not len(content): # pragma: no cover\n # DataFrame with the right metadata, even though it's length 0\n return _get_empty_meta(self.orig_names,\n self.index_col,\n self.index_names)\n\n # handle new style for names in index\n count_empty_content_vals = count_empty_vals(content[0])\n indexnamerow = None\n if self.has_index_names and count_empty_content_vals == len(columns):\n indexnamerow = content[0]\n content = content[1:]\n\n alldata = self._rows_to_cols(content)\n data = self._exclude_implicit_index(alldata)\n\n columns, data = self._do_date_conversions(self.columns, data)\n\n data = self._convert_data(data)\n index, columns = self._make_index(data, alldata, columns, indexnamerow)\n\n return index, columns, data\n\n def _exclude_implicit_index(self, alldata):\n\n if self._implicit_index:\n excl_indices = self.index_col\n\n data = {}\n offset = 0\n for i, col in enumerate(self.orig_names):\n while i + offset in excl_indices:\n offset += 1\n data[col] = alldata[i + offset]\n else:\n data = dict((k, v) for k, v in zip(self.orig_names, alldata))\n\n return data\n\n # legacy\n def get_chunk(self, size=None):\n if size is None:\n size = self.chunksize\n return self.read(nrows=size)\n\n def _convert_data(self, data):\n # apply converters\n clean_conv = {}\n\n for col, f in compat.iteritems(self.converters):\n if isinstance(col, int) and col not in self.orig_names:\n col = self.orig_names[col]\n clean_conv[col] = f\n\n return self._convert_to_ndarrays(data, self.na_values, self.na_fvalues,\n self.verbose, clean_conv)\n\n def _infer_columns(self):\n names = self.names\n num_original_columns = 0\n clear_buffer = True\n if self.header is not None:\n header = self.header\n\n # we have a mi columns, so read an extra line\n if isinstance(header, (list, tuple, np.ndarray)):\n have_mi_columns = True\n header = list(header) + [header[-1] + 1]\n else:\n have_mi_columns = False\n header = [header]\n\n columns = []\n for level, hr in enumerate(header):\n line = self._buffered_line()\n\n while self.line_pos <= hr:\n line = self._next_line()\n\n unnamed_count = 0\n this_columns = []\n for i, c in enumerate(line):\n if c == '':\n if have_mi_columns:\n this_columns.append('Unnamed: %d_level_%d'\n % (i, level))\n else:\n this_columns.append('Unnamed: %d' % i)\n unnamed_count += 1\n else:\n this_columns.append(c)\n\n if not have_mi_columns and self.mangle_dupe_cols:\n counts = {}\n for i, col in enumerate(this_columns):\n cur_count = counts.get(col, 0)\n if cur_count > 0:\n this_columns[i] = '%s.%d' % (col, cur_count)\n counts[col] = cur_count + 1\n elif have_mi_columns:\n\n # if we have grabbed an extra line, but its not in our\n # format so save in the buffer, and create an blank extra\n # line for the rest of the parsing code\n if hr == header[-1]:\n lc = len(this_columns)\n ic = (len(self.index_col)\n if self.index_col is not None else 0)\n if lc != unnamed_count and lc - ic > unnamed_count:\n clear_buffer = False\n this_columns = [None] * lc\n self.buf = [self.buf[-1]]\n\n columns.append(this_columns)\n if len(columns) == 1:\n num_original_columns = len(this_columns)\n\n if clear_buffer:\n self._clear_buffer()\n\n if names is not None:\n if ((self.usecols is not None and\n len(names) != len(self.usecols)) or\n (self.usecols is None and\n len(names) != len(columns[0]))):\n raise ValueError('Number of passed names did not match '\n 'number of header fields in the file')\n if len(columns) > 1:\n raise TypeError('Cannot pass names with multi-index '\n 'columns')\n\n if self.usecols is not None:\n # Set _use_cols. We don't store columns because they are\n # overwritten.\n self._handle_usecols(columns, names)\n else:\n self._col_indices = None\n num_original_columns = len(names)\n columns = [names]\n else:\n columns = self._handle_usecols(columns, columns[0])\n else:\n # header is None\n line = self._buffered_line()\n ncols = len(line)\n num_original_columns = ncols\n if not names:\n if self.prefix:\n columns = [['%s%d' % (self.prefix, i)\n for i in range(ncols)]]\n else:\n columns = [lrange(ncols)]\n columns = self._handle_usecols(columns, columns[0])\n else:\n if self.usecols is None or len(names) == num_original_columns:\n columns = self._handle_usecols([names], names)\n num_original_columns = len(names)\n else:\n if self.usecols and len(names) != len(self.usecols):\n raise ValueError(\n 'Number of passed names did not match number of '\n 'header fields in the file'\n )\n # Ignore output but set used columns.\n self._handle_usecols([names], names)\n columns = [names]\n num_original_columns = ncols\n\n return columns, num_original_columns\n\n def _handle_usecols(self, columns, usecols_key):\n \"\"\"\n Sets self._col_indices\n\n usecols_key is used if there are string usecols.\n \"\"\"\n if self.usecols is not None:\n if any([isinstance(u, string_types) for u in self.usecols]):\n if len(columns) > 1:\n raise ValueError(\"If using multiple headers, usecols must \"\n \"be integers.\")\n col_indices = []\n for u in self.usecols:\n if isinstance(u, string_types):\n col_indices.append(usecols_key.index(u))\n else:\n col_indices.append(u)\n else:\n col_indices = self.usecols\n\n columns = [[n for i, n in enumerate(column) if i in col_indices]\n for column in columns]\n self._col_indices = col_indices\n return columns\n\n def _buffered_line(self):\n \"\"\"\n Return a line from buffer, filling buffer if required.\n \"\"\"\n if len(self.buf) > 0:\n return self.buf[0]\n else:\n return self._next_line()\n\n def _empty(self, line):\n return not line or all(not x for x in line)\n\n def _next_line(self):\n if isinstance(self.data, list):\n while self.pos in self.skiprows:\n self.pos += 1\n\n while True:\n try:\n line = self._check_comments([self.data[self.pos]])[0]\n self.pos += 1\n # either uncommented or blank to begin with\n if not self.skip_blank_lines and (self._empty(self.data[\n self.pos - 1]) or line):\n break\n elif self.skip_blank_lines:\n ret = self._check_empty([line])\n if ret:\n line = ret[0]\n break\n except IndexError:\n raise StopIteration\n else:\n while self.pos in self.skiprows:\n self.pos += 1\n next(self.data)\n\n while True:\n orig_line = next(self.data)\n line = self._check_comments([orig_line])[0]\n self.pos += 1\n if (not self.skip_blank_lines and\n (self._empty(orig_line) or line)):\n break\n elif self.skip_blank_lines:\n ret = self._check_empty([line])\n if ret:\n line = ret[0]\n break\n\n self.line_pos += 1\n self.buf.append(line)\n return line\n\n def _check_comments(self, lines):\n if self.comment is None:\n return lines\n ret = []\n for l in lines:\n rl = []\n for x in l:\n if (not isinstance(x, compat.string_types) or\n self.comment not in x):\n rl.append(x)\n else:\n x = x[:x.find(self.comment)]\n if len(x) > 0:\n rl.append(x)\n break\n ret.append(rl)\n return ret\n\n def _check_empty(self, lines):\n ret = []\n for l in lines:\n # Remove empty lines and lines with only one whitespace value\n if (len(l) > 1 or len(l) == 1 and\n (not isinstance(l[0], compat.string_types) or\n l[0].strip())):\n ret.append(l)\n return ret\n\n def _check_thousands(self, lines):\n if self.thousands is None:\n return lines\n nonnum = re.compile('[^-^0-9^%s^.]+' % self.thousands)\n ret = []\n for l in lines:\n rl = []\n for i, x in enumerate(l):\n if (not isinstance(x, compat.string_types) or\n self.thousands not in x or\n (self._no_thousands_columns and\n i in self._no_thousands_columns) or\n nonnum.search(x.strip())):\n rl.append(x)\n else:\n rl.append(x.replace(self.thousands, ''))\n ret.append(rl)\n return ret\n\n def _clear_buffer(self):\n self.buf = []\n\n _implicit_index = False\n\n def _get_index_name(self, columns):\n \"\"\"\n Try several cases to get lines:\n\n 0) There are headers on row 0 and row 1 and their\n total summed lengths equals the length of the next line.\n Treat row 0 as columns and row 1 as indices\n 1) Look for implicit index: there are more columns\n on row 1 than row 0. If this is true, assume that row\n 1 lists index columns and row 0 lists normal columns.\n 2) Get index from the columns if it was listed.\n \"\"\"\n orig_names = list(columns)\n columns = list(columns)\n\n try:\n line = self._next_line()\n except StopIteration:\n line = None\n\n try:\n next_line = self._next_line()\n except StopIteration:\n next_line = None\n\n # implicitly index_col=0 b/c 1 fewer column names\n implicit_first_cols = 0\n if line is not None:\n # leave it 0, #2442\n # Case 1\n if self.index_col is not False:\n implicit_first_cols = len(line) - self.num_original_columns\n\n # Case 0\n if next_line is not None:\n if len(next_line) == len(line) + self.num_original_columns:\n # column and index names on diff rows\n self.index_col = lrange(len(line))\n self.buf = self.buf[1:]\n\n for c in reversed(line):\n columns.insert(0, c)\n\n # Update list of original names to include all indices.\n orig_names = list(columns)\n self.num_original_columns = len(columns)\n return line, orig_names, columns\n\n if implicit_first_cols > 0:\n # Case 1\n self._implicit_index = True\n if self.index_col is None:\n self.index_col = lrange(implicit_first_cols)\n\n index_name = None\n\n else:\n # Case 2\n (index_name, columns_,\n self.index_col) = _clean_index_names(columns, self.index_col)\n\n return index_name, orig_names, columns\n\n def _rows_to_cols(self, content):\n zipped_content = list(lib.to_object_array(content).T)\n\n col_len = self.num_original_columns\n zip_len = len(zipped_content)\n\n if self._implicit_index:\n col_len += len(self.index_col)\n\n if self.skip_footer < 0:\n raise ValueError('skip footer cannot be negative')\n\n # Loop through rows to verify lengths are correct.\n if col_len != zip_len and self.index_col is not False:\n i = 0\n for (i, l) in enumerate(content):\n if len(l) != col_len:\n break\n\n footers = 0\n if self.skip_footer:\n footers = self.skip_footer\n\n row_num = self.pos - (len(content) - i + footers)\n\n msg = ('Expected %d fields in line %d, saw %d' %\n (col_len, row_num + 1, zip_len))\n raise ValueError(msg)\n\n if self.usecols:\n if self._implicit_index:\n zipped_content = [\n a for i, a in enumerate(zipped_content)\n if (i < len(self.index_col) or\n i - len(self.index_col) in self._col_indices)]\n else:\n zipped_content = [a for i, a in enumerate(zipped_content)\n if i in self._col_indices]\n return zipped_content\n\n def _get_lines(self, rows=None):\n source = self.data\n lines = self.buf\n new_rows = None\n\n # already fetched some number\n if rows is not None:\n # we already have the lines in the buffer\n if len(self.buf) >= rows:\n new_rows, self.buf = self.buf[:rows], self.buf[rows:]\n\n # need some lines\n else:\n rows -= len(self.buf)\n\n if new_rows is None:\n if isinstance(source, list):\n if self.pos > len(source):\n raise StopIteration\n if rows is None:\n new_rows = source[self.pos:]\n new_pos = len(source)\n else:\n new_rows = source[self.pos:self.pos + rows]\n new_pos = self.pos + rows\n\n # Check for stop rows. n.b.: self.skiprows is a set.\n if self.skiprows:\n new_rows = [row for i, row in enumerate(new_rows)\n if i + self.pos not in self.skiprows]\n\n lines.extend(new_rows)\n self.pos = new_pos\n\n else:\n new_rows = []\n try:\n if rows is not None:\n for _ in range(rows):\n new_rows.append(next(source))\n lines.extend(new_rows)\n else:\n rows = 0\n while True:\n try:\n new_rows.append(next(source))\n rows += 1\n except csv.Error as inst:\n if 'newline inside string' in str(inst):\n row_num = str(self.pos + rows)\n msg = ('EOF inside string starting with '\n 'line ' + row_num)\n raise Exception(msg)\n raise\n except StopIteration:\n if self.skiprows:\n new_rows = [row for i, row in enumerate(new_rows)\n if self.pos + i not in self.skiprows]\n lines.extend(new_rows)\n if len(lines) == 0:\n raise\n self.pos += len(new_rows)\n\n self.buf = []\n else:\n lines = new_rows\n\n if self.skip_footer:\n lines = lines[:-self.skip_footer]\n\n lines = self._check_comments(lines)\n if self.skip_blank_lines:\n lines = self._check_empty(lines)\n return self._check_thousands(lines)\n\n\ndef _make_date_converter(date_parser=None, dayfirst=False,\n infer_datetime_format=False):\n def converter(*date_cols):\n if date_parser is None:\n strs = _concat_date_cols(date_cols)\n\n try:\n return tools._to_datetime(\n com._ensure_object(strs),\n utc=None,\n box=False,\n dayfirst=dayfirst,\n errors='ignore',\n infer_datetime_format=infer_datetime_format\n )\n except:\n return tools.to_datetime(\n lib.try_parse_dates(strs, dayfirst=dayfirst))\n else:\n try:\n result = tools.to_datetime(\n date_parser(*date_cols), errors='ignore')\n if isinstance(result, datetime.datetime):\n raise Exception('scalar parser')\n return result\n except Exception:\n try:\n return tools.to_datetime(\n lib.try_parse_dates(_concat_date_cols(date_cols),\n parser=date_parser,\n dayfirst=dayfirst),\n errors='ignore')\n except Exception:\n return generic_parser(date_parser, *date_cols)\n\n return converter\n\n\ndef _process_date_conversion(data_dict, converter, parse_spec,\n index_col, index_names, columns,\n keep_date_col=False):\n def _isindex(colspec):\n return ((isinstance(index_col, list) and\n colspec in index_col) or\n (isinstance(index_names, list) and\n colspec in index_names))\n\n new_cols = []\n new_data = {}\n\n orig_names = columns\n columns = list(columns)\n\n date_cols = set()\n\n if parse_spec is None or isinstance(parse_spec, bool):\n return data_dict, columns\n\n if isinstance(parse_spec, list):\n # list of column lists\n for colspec in parse_spec:\n if lib.isscalar(colspec):\n if isinstance(colspec, int) and colspec not in data_dict:\n colspec = orig_names[colspec]\n if _isindex(colspec):\n continue\n data_dict[colspec] = converter(data_dict[colspec])\n else:\n new_name, col, old_names = _try_convert_dates(\n converter, colspec, data_dict, orig_names)\n if new_name in data_dict:\n raise ValueError('New date column already in dict %s' %\n new_name)\n new_data[new_name] = col\n new_cols.append(new_name)\n date_cols.update(old_names)\n\n elif isinstance(parse_spec, dict):\n # dict of new name to column list\n for new_name, colspec in compat.iteritems(parse_spec):\n if new_name in data_dict:\n raise ValueError('Date column %s already in dict' %\n new_name)\n\n _, col, old_names = _try_convert_dates(converter, colspec,\n data_dict, orig_names)\n\n new_data[new_name] = col\n new_cols.append(new_name)\n date_cols.update(old_names)\n\n data_dict.update(new_data)\n new_cols.extend(columns)\n\n if not keep_date_col:\n for c in list(date_cols):\n data_dict.pop(c)\n new_cols.remove(c)\n\n return data_dict, new_cols\n\n\ndef _try_convert_dates(parser, colspec, data_dict, columns):\n colset = set(columns)\n colnames = []\n\n for c in colspec:\n if c in colset:\n colnames.append(c)\n elif isinstance(c, int) and c not in columns:\n colnames.append(str(columns[c]))\n else:\n colnames.append(c)\n\n new_name = '_'.join([str(x) for x in colnames])\n to_parse = [data_dict[c] for c in colnames if c in data_dict]\n\n new_col = parser(*to_parse)\n return new_name, new_col, colnames\n\n\ndef _clean_na_values(na_values, keep_default_na=True):\n\n if na_values is None:\n if keep_default_na:\n na_values = _NA_VALUES\n else:\n na_values = []\n na_fvalues = set()\n elif isinstance(na_values, dict):\n if keep_default_na:\n for k, v in compat.iteritems(na_values):\n v = set(list(v)) | _NA_VALUES\n na_values[k] = v\n na_fvalues = dict([\n (k, _floatify_na_values(v)) for k, v in na_values.items() # noqa\n ])\n else:\n if not com.is_list_like(na_values):\n na_values = [na_values]\n na_values = _stringify_na_values(na_values)\n if keep_default_na:\n na_values = na_values | _NA_VALUES\n\n na_fvalues = _floatify_na_values(na_values)\n\n return na_values, na_fvalues\n\n\ndef _clean_index_names(columns, index_col):\n if not _is_index_col(index_col):\n return None, columns, index_col\n\n columns = list(columns)\n\n cp_cols = list(columns)\n index_names = []\n\n # don't mutate\n index_col = list(index_col)\n\n for i, c in enumerate(index_col):\n if isinstance(c, compat.string_types):\n index_names.append(c)\n for j, name in enumerate(cp_cols):\n if name == c:\n index_col[i] = j\n columns.remove(name)\n break\n else:\n name = cp_cols[c]\n columns.remove(name)\n index_names.append(name)\n\n # hack\n if isinstance(index_names[0], compat.string_types)\\\n and 'Unnamed' in index_names[0]:\n index_names[0] = None\n\n return index_names, columns, index_col\n\n\ndef _get_empty_meta(columns, index_col, index_names, dtype=None):\n columns = list(columns)\n\n if dtype is None:\n dtype = {}\n else:\n if not isinstance(dtype, dict):\n dtype = defaultdict(lambda: dtype)\n # Convert column indexes to column names.\n dtype = dict((columns[k] if com.is_integer(k) else k, v)\n for k, v in compat.iteritems(dtype))\n\n if index_col is None or index_col is False:\n index = Index([])\n else:\n index = [np.empty(0, dtype=dtype.get(index_name, np.object))\n for index_name in index_names]\n index = MultiIndex.from_arrays(index, names=index_names)\n index_col.sort()\n for i, n in enumerate(index_col):\n columns.pop(n - i)\n\n col_dict = dict((col_name,\n np.empty(0, dtype=dtype.get(col_name, np.object)))\n for col_name in columns)\n\n return index, columns, col_dict\n\n\ndef _floatify_na_values(na_values):\n # create float versions of the na_values\n result = set()\n for v in na_values:\n try:\n v = float(v)\n if not np.isnan(v):\n result.add(v)\n except:\n pass\n return result\n\n\ndef _stringify_na_values(na_values):\n \"\"\" return a stringified and numeric for these values \"\"\"\n result = []\n for x in na_values:\n result.append(str(x))\n result.append(x)\n try:\n v = float(x)\n\n # we are like 999 here\n if v == int(v):\n v = int(v)\n result.append(\"%s.0\" % v)\n result.append(str(v))\n\n result.append(v)\n except:\n pass\n try:\n result.append(int(x))\n except:\n pass\n return set(result)\n\n\ndef _get_na_values(col, na_values, na_fvalues):\n if isinstance(na_values, dict):\n if col in na_values:\n return na_values[col], na_fvalues[col]\n else:\n return _NA_VALUES, set()\n else:\n return na_values, na_fvalues\n\n\ndef _get_col_names(colspec, columns):\n colset = set(columns)\n colnames = []\n for c in colspec:\n if c in colset:\n colnames.append(c)\n elif isinstance(c, int):\n colnames.append(columns[c])\n return colnames\n\n\ndef _concat_date_cols(date_cols):\n if len(date_cols) == 1:\n if compat.PY3:\n return np.array([compat.text_type(x) for x in date_cols[0]],\n dtype=object)\n else:\n return np.array([\n str(x) if not isinstance(x, compat.string_types) else x\n for x in date_cols[0]\n ], dtype=object)\n\n rs = np.array([' '.join([compat.text_type(y) for y in x])\n for x in zip(*date_cols)], dtype=object)\n return rs\n\n\nclass FixedWidthReader(BaseIterator):\n \"\"\"\n A reader of fixed-width lines.\n \"\"\"\n\n def __init__(self, f, colspecs, delimiter, comment):\n self.f = f\n self.buffer = None\n self.delimiter = '\\r\\n' + delimiter if delimiter else '\\n\\r\\t '\n self.comment = comment\n if colspecs == 'infer':\n self.colspecs = self.detect_colspecs()\n else:\n self.colspecs = colspecs\n\n if not isinstance(self.colspecs, (tuple, list)):\n raise TypeError(\"column specifications must be a list or tuple, \"\n \"input was a %r\" % type(colspecs).__name__)\n\n for colspec in self.colspecs:\n\n if not (isinstance(colspec, (tuple, list)) and\n len(colspec) == 2 and\n isinstance(colspec[0], (int, np.integer, type(None))) and\n isinstance(colspec[1], (int, np.integer, type(None)))):\n raise TypeError('Each column specification must be '\n '2 element tuple or list of integers')\n\n def get_rows(self, n):\n rows = []\n for i, row in enumerate(self.f, 1):\n rows.append(row)\n if i >= n:\n break\n self.buffer = iter(rows)\n return rows\n\n def detect_colspecs(self, n=100):\n # Regex escape the delimiters\n delimiters = ''.join([r'\\%s' % x for x in self.delimiter])\n pattern = re.compile('([^%s]+)' % delimiters)\n rows = self.get_rows(n)\n max_len = max(map(len, rows))\n mask = np.zeros(max_len + 1, dtype=int)\n if self.comment is not None:\n rows = [row.partition(self.comment)[0] for row in rows]\n for row in rows:\n for m in pattern.finditer(row):\n mask[m.start():m.end()] = 1\n shifted = np.roll(mask, 1)\n shifted[0] = 0\n edges = np.where((mask ^ shifted) == 1)[0]\n return list(zip(edges[::2], edges[1::2]))\n\n def __next__(self):\n if self.buffer is not None:\n try:\n line = next(self.buffer)\n except StopIteration:\n self.buffer = None\n line = next(self.f)\n else:\n line = next(self.f)\n # Note: 'colspecs' is a sequence of half-open intervals.\n return [line[fromm:to].strip(self.delimiter)\n for (fromm, to) in self.colspecs]\n\n\nclass FixedWidthFieldParser(PythonParser):\n \"\"\"\n Specialization that Converts fixed-width fields into DataFrames.\n See PythonParser for details.\n \"\"\"\n\n def __init__(self, f, **kwds):\n # Support iterators, convert to a list.\n self.colspecs = kwds.pop('colspecs')\n\n PythonParser.__init__(self, f, **kwds)\n\n def _make_reader(self, f):\n self.data = FixedWidthReader(f, self.colspecs, self.delimiter,\n self.comment)\n" ]
[ [ "pandas.core.common.is_list_like", "pandas.core.common.is_integer_dtype", "pandas.core.common.AbstractMethodError", "pandas.compat.range", "pandas.compat.map", "pandas.core.common._ensure_object", "pandas.compat.iteritems", "pandas.core.frame.DataFrame", "numpy.where", "numpy.roll", "pandas.parser.TextReader", "pandas.core.config.get_option", "pandas.lib.ismember", "pandas.compat.StringIO", "pandas.compat.text_type", "pandas.core.index.MultiIndex.from_tuples", "pandas.lib.isscalar", "pandas.lib.try_parse_dates", "numpy.zeros", "pandas.util.decorators.Appender", "numpy.putmask", "pandas.io.common.UnicodeReader", "pandas.core.index.MultiIndex.from_arrays", "pandas.lib.maybe_convert_numeric", "numpy.isnan", "pandas.lib.sanitize_objects", "pandas.io.date_converters.generic_parser", "pandas.core.common.is_integer", "pandas.lib.map_infer_mask", "pandas.io.common._validate_header_arg", "pandas.io.common._get_handle", "pandas.lib.map_infer", "pandas.lib.to_object_array", "pandas.io.common.UTF8Recoder", "pandas.lib.maybe_convert_bool", "pandas.compat.zip", "pandas.compat.lrange", "pandas.core.index.Index" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20" ], "scipy": [], "tensorflow": [] } ]
biddy1618/rasaOneTech
[ "7276746f859093543be6a65872b5459c8812598e" ]
[ "onetech/scriptsData/cleaning/cleanData.py" ]
[ "import os\nimport sys\n\nimport json\nimport pandas as pd\n\nimport psycopg2\n\nimport yaml\n\nimport argparse\n\nSOURCE_FILE_NLU_JSON = '/scriptsData/migration/dataNLU/nlu.json'\nSOURCE_FILE_NLU_MD = './scriptsData/migration/dataNLU/nlu.md'\nSOURCE_FILE_YAML = './scriptsData/migration/domain.yml'\nSOURCE_FILE_STORIES = './scriptsData/migration/stories.md'\nSOURCE_FILE_NLU_ERRORS = './errors.json'\n\nCLEAN_FILE_DOMAIN = './scriptsData/cleaning/cleanData/domain.yml'\nCLEAN_FILE_NLU = './scriptsData/cleaning/cleanData/nlu.md'\nCLEAN_FILE_STORIES = './scriptsData/cleaning/cleanData/stories.md'\n\nPATH_LOG = './scriptsData/cleaning/cleanData/logs.txt'\n\n\ndef cleanData(nluPath=SOURCE_FILE_NLU_MD, storiesPath=SOURCE_FILE_STORIES, \n domainPath=SOURCE_FILE_YAML, nluJsonFormat=False, nluErrorsPath = SOURCE_FILE_NLU_ERRORS):\n \n intentsData = []\n if nluJsonFormat:\n print('\\nReading nlu data in JSON format')\n with open(os.path.abspath(nluPath), 'r', encoding=\"utf-8\") as f:\n jsonData = json.loads(f.read())['rasa_nlu_data']\n\n \n for intent in jsonData['common_examples']:\n intentsData.append([\n intent['intent'], \n intent['text']\n ])\n else:\n print('\\nReading nlu data in MD format')\n with open(os.path.abspath(nluPath), 'r') as f:\n nluData = f.readlines()\n\n intentName = None\n for line in nluData:\n if line.strip()[:2] == '##':\n intentName = line.strip()[10:]\n elif line.strip()[:1] == '-':\n expression = line.strip()[2:]\n intentsData.append([intentName, expression])\n else:\n continue\n \n \n intents = pd.DataFrame(\n columns=['intent_name', 'expression_text'],\n data = intentsData\n )\n\n print('\\nReading domain data')\n with open(os.path.abspath(SOURCE_FILE_YAML), 'r', encoding=\"utf-8\") as f:\n ymlFile = yaml.safe_load(f)\n\n for intent in ymlFile['intents']:\n if intent not in intents['intent_name'].values:\n print(f'Found intent that is not in intents but in actions {intent}')\n \n print('\\nReading stories data')\n with open(os.path.abspath(storiesPath), 'r') as f:\n storiesData = f.readlines()\n\n stories = {}\n storyName = None\n story = []\n storyPair = []\n for line in storiesData:\n if line.strip()[:2] == '##':\n if len(story) == 0:\n storyName = line.strip()[3:]\n stories[storyName] = None\n continue\n stories[storyName] = story\n storyName = line.strip()[3:]\n stories[storyName] = None\n story = []\n elif line.strip()[:1] == '*':\n intent_name = line.strip()[2:]\n if intent_name not in intents['intent_name'].values:\n raise Exception(f'Intent {intent_name} \\\n found in stories.md but not in \\\n nlu.md and domain.yml')\n storyPair.append(intent_name)\n elif line.strip()[:1] == '-':\n action_name = line.strip()[2:]\n if action_name not in ymlFile['actions']:\n raise Exception(f'Action {action_name} \\\n found in stories.md but not in domain.yml')\n storyPair.append(action_name)\n story.append(tuple(storyPair))\n storyPair = []\n else:\n continue\n else:\n if storyName is not None:\n stories[storyName] = story\n \n with open(os.path.abspath(nluErrorsPath), 'r', encoding=\"utf-8\") as f:\n jsonData = json.loads(f.read())\n \n errors = set()\n for error in jsonData:\n if error['intent_prediction']['name'].strip() == '':\n continue\n errors.add(tuple(sorted([error['intent'], \\\n error['intent_prediction']['name']])))\n \n errors = sorted(list(errors))\n newIntents = {}\n\n for intent in intents['intent_name'].values:\n newIntents[intent] = intent\n\n '''\n Things to take into account:\n\n Stories in the form: [story1=[(intent, action), (intent, action), ...], \n story2=[(intent, action), (intent, action), ...]]\n THIS ONE MIGHT CAUSE SOME TROUBLE\n\n NLU training data in the form in DataFrame data structure:\n intent name1, expression1\n intent name1, expression2\n intent name1, expression3\n intent name2, expression1\n intent name2, expression2\n ...\n THIS ONE SHOULD BE EASY\n \n Domain training file:\n intents:\n list of intents\n THIS ONE SHOULD BE EASY\n '''\n\n mergedIntents = {}\n with open(os.path.abspath(PATH_LOG), 'w') as logs:\n for i, errorPair in enumerate(errors):\n intent1 = errorPair[0]\n intent2 = errorPair[1]\n \n exp1 = intents[intents['intent_name']==intent1]['expression_text'].values\n exp2 = intents[intents['intent_name']==intent2]['expression_text'].values\n \n print(f'Error number {i+1} out of {len(errors)}\\nMerge intents \"{intent1}\" and \"{intent2}\"')\n logs.write(f'Error number {i+1} out of {len(errors)}\\nMerge intents \"{intent1}\" and \"{intent2}\"\\n')\n for i in range(0, max(len(exp1), len(exp2))):\n e1 = exp1[i] if i < len(exp1) else ''\n e2 = exp2[i] if i < len(exp2) else ''\n print(f'{e1:70s} {e2}')\n logs.write(f'{e1:70s} {e2}\\n')\n \n print(('Please confirm if these intents should be merged (yes/no, default - yes)\\n'\n 'Default name will be used (first intent\\'s name)'))\n logs.write(('Please confirm if these intents should be merged (yes/no, default - yes)\\n'\n 'Default name will be used (first intent\\'s name)\\n'))\n \n answer = input()\n # answer = 'yes'\n\n if answer.lower().strip()=='yes' or answer.strip() == '':\n exists = False\n for k, v in mergedIntents.items():\n if intent1 in v or intent2 in v:\n v.add(intent1)\n v.add(intent2)\n exists = True\n if not exists:\n mergedIntents[intent1] = set([intent1, intent2])\n else:\n print(f'Not merging \"{intent1}\" and \"{intent2}\"\\n')\n logs.write(f'Not merging \"{intent1}\" and \"{intent2}\"\\n')\n continue\n \n for main, children in mergedIntents.items():\n listOfIntents = \"\".join([i+'\\n' for i in sorted(list(children))])\n\n print(f'Merging intents:\\n{listOfIntents}into intent {main}\\n')\n logs.write(f'Merging intents:\\n{listOfIntents}into intent {main}\\n\\n')\n \n \n for intent in children:\n \n # change NLU data\n intents.loc[intents['intent_name']==intent, 'intent_name']=main\n \n # change stories data\n for _, story in stories.items():\n \n for i, pair in enumerate(story):\n if pair[0] == intent:\n story[i] = (main, pair[1])\n \n \n \n ymlFile['intents'] = list(intents['intent_name'].unique())\n\n \n noalias_dumper = yaml.dumper.SafeDumper\n noalias_dumper.ignore_aliases = lambda self, data: True \n with open(os.path.abspath(CLEAN_FILE_DOMAIN), 'w') as f:\n yaml.dump(ymlFile, f, default_flow_style=False, \n allow_unicode=True, Dumper=noalias_dumper)\n \n\n with open(os.path.abspath(CLEAN_FILE_NLU), 'w') as f:\n for intent in intents['intent_name'].unique():\n exp = intents[intents['intent_name']==intent]['expression_text'].values\n f.write(f'## intent:{intent}\\n')\n for e in exp:\n f.write(f' - {e}\\n')\n f.write('\\n')\n\n with open(os.path.abspath(CLEAN_FILE_STORIES), 'w') as f:\n for storyName, story in stories.items():\n f.write(f'## {storyName}\\n')\n for pair in story:\n f.write(f'* {pair[0]}\\n')\n f.write(f' - {pair[1]}\\n')\n f.write('\\n')\n\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='Fix the errors in NLU data')\n parser.add_argument('--json', action='store_true', help='set if NLU file is in JSON format (default md format)')\n parser.add_argument('--nlu', default=None, help='path to NLU file')\n parser.add_argument('--stories', default=SOURCE_FILE_STORIES, help='path to stories file')\n parser.add_argument('--domain', default=SOURCE_FILE_YAML, help='path to domain file')\n args = parser.parse_args()\n if args.json and args.nlu is None:\n args.nlu = SOURCE_FILE_NLU_JSON\n elif not args.json and args.nlu is None:\n args.nlu = SOURCE_FILE_NLU_MD\n cleanData(nluPath=args.nlu, storiesPath=args.stories, domainPath=args.domain, nluJsonFormat=args.json)" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
lokhiufung/quick-and-dirty-dl
[ "f2e4429451543e9e9a44ed5304e268cf3c2aa888" ]
[ "cpc/model.py" ]
[ "import json\nfrom functools import namedtuple\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim import Adam\nfrom torch.utils.data import Dataset, DataLoader\nimport pytorch_lightning as pl\nfrom omegaconf import DictConfig\n\nfrom cpc.dataset import AudioRawDataset\n\n\nclass ConvNetEncoder(nn.Module):\n def __init__(self, hidden_size=512):\n super().__init__()\n self.hidden_size = hidden_size\n self.conv1 = nn.Conv1d(1, hidden_size, kernel_size=10, stride=5, padding=3)\n self.bnorm1 = nn.BatchNorm1d(hidden_size)\n self.conv2 = nn.Conv1d(hidden_size, hidden_size, kernel_size=8, stride=4, padding=2)\n self.bnorm2 = nn.BatchNorm1d(hidden_size)\n self.conv3 = nn.Conv1d(hidden_size, hidden_size, kernel_size=4, stride=2, padding=1)\n self.bnorm3 = nn.BatchNorm1d(hidden_size)\n self.conv4 = nn.Conv1d(hidden_size, hidden_size, kernel_size=4, stride=2, padding=1)\n self.bnorm4 = nn.BatchNorm1d(hidden_size)\n self.conv5 = nn.Conv1d(hidden_size, hidden_size, kernel_size=4, stride=2, padding=1)\n self.bnorm5 = nn.BatchNorm1d(hidden_size)\n \n @property\n def input_port(self):\n return (\n ('audio_signal', ('B', 'C', 'T')),\n )\n @property\n def output_port(self):\n return (\n ('encoder_embedding', ('B', 'T', 'C')),\n )\n\n def forward(self, x):\n x = F.relu(self.bnorm1(self.conv1(x)))\n x = F.relu(self.bnorm2(self.conv2(x)))\n x = F.relu(self.bnorm3(self.conv3(x)))\n x = F.relu(self.bnorm4(self.conv4(x)))\n x = F.relu(self.bnorm5(self.conv5(x)))\n x = x.transpose(1, 2) # Reminder: make the channel last\n return x\n\n\nclass GRUAutoRegressiveModel(nn.Module):\n def __init__(self, embedding_size=512, hidden_size=256, keep_hidden=False):\n super().__init__()\n self.embedding_size = embedding_size\n self.hidden_size = hidden_size\n self.keep_hidden = keep_hidden\n self.hidden = None\n\n self.rnn = nn.GRU(self.embedding_size, hidden_size=self.hidden_size, num_layers=1, batch_first=True)\n\n def input_port(self):\n return (\n ('encoder_embedding', ('B', 'T', 'C')),\n )\n\n def output_port(self):\n return (\n ('ar_embedding', ('B', 'T', 'C')),\n )\n\n def forward(self, x):\n x, h = self.rnn(x, self.hidden) # (batch, seq_len, hidden_size)\n if self.keep_hidden:\n self.hidden = h.detach()\n return x\n\n\nclass LinearPredictionModel(nn.Module):\n def __init__(self, ar_embedding_size=256, enc_embedding_size=512):\n super().__init__()\n self.ar_embedding_size = ar_embedding_size\n self.linear = nn.Linear(ar_embedding_size, enc_embedding_size, bias=False)\n\n def forward(self, x):\n x = self.linear(x)\n return x\n\n\nclass CPCCriterion(nn.Module):\n def __init__(self, ar_embedding_size=256, enc_embedding_size=512, n_predictions=12, n_negs=8):\n super().__init__()\n self.ar_embedding_size = ar_embedding_size\n self.enc_embedding_size = enc_embedding_size\n self.n_predictions = n_predictions # max number of steps for prediction horizon\n self.n_negs = n_negs # number of negative samples to be sampled\n self.loss_function = nn.CrossEntropyLoss() # reminder: mean reduction by defualt\n self.predictors = nn.ModuleList()\n for _ in range(self.n_predictions):\n self.predictors.append(\n LinearPredictionModel(self.ar_embedding_size, self.enc_embedding_size)\n )\n\n @property\n def input_port(self):\n return (\n ('encoder_embedding', ('B', 'T', 'C')),\n ('ar_embedding', ('B', 'T', 'C')),\n )\n\n @property\n def output_port(self):\n return (\n ('loss', ('N',)),\n ('acc', ('N',))\n )\n\n def get_random_samples(self, z_features, window_size):\n samples = []\n batch_size, steps, z_dim = z_features.size()\n\n # randomly sample n_negs * batch_size for each step\n z_neg = z_features.contiguous().view(-1, z_dim)\n sample_idx = torch.randint(low=0, high=batch_size*steps, size=(batch_size*self.n_negs*window_size,), device=z_features.device)\n z_neg = z_neg[sample_idx].view(batch_size, self.n_negs, window_size, z_dim)\n \n labels = torch.zeros(size=(batch_size*window_size,), dtype=torch.long, device=z_features.device)\n for k in range(1, self.n_predictions + 1):\n z_pos = z_features[:, k:k+window_size].unsqueeze(1) \n sample = torch.cat([z_pos, z_neg], dim=1)\n samples.append(sample)\n return samples, labels\n\n def forward(self, c_features, z_features, window_size):\n c_features = c_features[:, :window_size]\n samples, labels = self.get_random_samples(z_features, window_size)\n losses = []\n accs = []\n for k in range(self.n_predictions):\n z_pred = self.predictors[k](c_features)\n z_pred = z_pred.unsqueeze(1)\n prediction = (z_pred * samples[k]).sum(dim=3)\n\n prediction = prediction.permute(0, 2, 1)\n prediction = prediction.contiguous().view(-1, prediction.size(2))\n #####################################################\n # accuracy calculation, direct copy from: facebook/cpc_audio\n _, pred_index = prediction.max(1)\n acc = torch.sum(pred_index == labels).float()\n #####################################################\n loss = self.loss_function(prediction, labels)\n losses.append(loss.view(1, -1))\n accs.append(acc.view(1, -1))\n return torch.cat(losses, dim=1), torch.cat(accs, dim=1) / labels.size(0)\n \n\nclass CPCAudioRawModel(pl.LightningModule):\n def __init__(self, cfg: DictConfig):\n super().__init__()\n self.window_size = cfg.window_size // cfg.downsampling # number of steps in encoded space \n \n self.encoder = ConvNetEncoder(**cfg.encoder)\n self.ar = GRUAutoRegressiveModel(**cfg.ar)\n self.cpc_criterion = CPCCriterion(**cfg.cpc_criterion)\n\n self._train_dataset = None\n self._validation_dataset = None\n self._train_dataloader = None\n self._val_dataloader = None\n self._optimizers = None\n \n self.setup_train_dataloader(cfg.train_data)\n self.setup_val_dataloader(cfg.validation_data)\n self.setup_optimizers(cfg.optim)\n\n # this will save cfg as hyparams to ckpts and tensorboard\n self.hparams = cfg\n\n def setup_optimizers(self, optim_cfg: DictConfig):\n self._optimizers = Adam(self.parameters(), **optim_cfg)\n\n def setup_train_dataloader(self, train_data_cfg: DictConfig):\n self._train_dataset = AudioRawDataset(\n **train_data_cfg.dataset\n )\n self._train_dataloader = DataLoader(\n self._train_dataset,\n collate_fn=self._train_dataset.collate_fn,\n **train_data_cfg.dataloader\n )\n \n def setup_val_dataloader(self, validation_data_cfg: DictConfig):\n self._validation_dataset = AudioRawDataset(\n **validation_data_cfg.dataset\n )\n self._val_dataloader = DataLoader(\n self._validation_dataset,\n collate_fn=self._validation_dataset.collate_fn,\n **validation_data_cfg.dataloader\n )\n\n def forward(self, audio_signal):\n z_features = self.encoder(audio_signal)\n c_features = self.ar(z_features)\n return z_features, c_features\n\n def training_step(self, batch, batch_idx):\n \"\"\"\n batch: audio_signals; tensor (B, C, L)\n \"\"\"\n z_features, c_features = self(batch)\n # c_features = c_features[:, :self.window_size]\n # random_samples = self.get_random_samples(z_features)\n losses, accs = self.cpc_criterion(c_features, z_features, self.window_size)\n total_loss = losses.sum(dim=1)\n aver_acc = accs.mean(dim=1) \n\n self.log('train_aver_acc', aver_acc, on_step=True, prog_bar=True, logger=True)\n self.log('train_loss', total_loss, on_step=True, prog_bar=True, logger=True)\n return total_loss\n\n def validation_step(self, batch, batch_idx):\n \"\"\"\n batch: audio_signals; tensor (B, C, L)\n \"\"\"\n z_features, c_features = self(batch)\n # c_features = c_features[:, :self.window_size]\n # random_samples = self.get_random_samples(z_features)\n losses, accs = self.cpc_criterion(c_features, z_features, self.window_size)\n total_loss = losses.sum(dim=1)\n aver_acc = accs.mean(dim=1) \n \n self.log('val_aver_acc', aver_acc, on_step=True, prog_bar=True, logger=True)\n self.log('val_loss', total_loss, on_step=True, prog_bar=True, logger=True)\n return total_loss\n\n def train_dataloader(self):\n if self._train_dataloader:\n return self._train_dataloader\n else:\n raise AttributeError('Please setup_train_dataloader() first')\n\n def val_dataloader(self):\n if self._val_dataloader:\n return self._val_dataloader\n else:\n raise AttributeError('Please setup_val_dataloader() first')\n \n def configure_optimizers(self):\n if self._optimizers:\n return self._optimizers\n else:\n raise AttributeError('Please setup_optimizers() first')\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.CrossEntropyLoss", "torch.randint", "torch.zeros", "torch.cat", "torch.nn.ModuleList", "torch.nn.GRU", "torch.utils.data.DataLoader", "torch.sum", "torch.nn.Linear", "torch.nn.Conv1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
banr1jnts/kaggle-youtube2nd
[ "21248d563afcf707cc7665703a987d71c94f4c5a" ]
[ "model_utils.py" ]
[ "import tensorflow as tf\n\ndef sample_random_seq(model_input, num_frames, num_samples):\n batch_size = tf.shape(model_input)[0]\n frame_index_offset = tf.tile(tf.expand_dims(tf.range(num_samples), 0),\n [batch_size, 1])\n max_start_frame_index = tf.maximum(num_frames - num_samples, 0)\n start_frame_index = tf.cast(\n tf.multiply(tf.random_uniform([batch_size, 1]),\n tf.cast(max_start_frame_index + 1, tf.float32)), tf.int32)\n frame_index = tf.minimum(start_frame_index + frame_index_offset,\n tf.cast(num_frames - 1, tf.int32))\n batch_index = tf.tile(tf.expand_dims(tf.range(batch_size), 1),\n [1, num_samples])\n index = tf.stack([batch_index, frame_index], 2)\n return tf.gather_nd(model_input, index)\n\ndef sample_random_frames(model_input, num_frames, num_samples):\n batch_size = tf.shape(model_input)[0]\n frame_index = tf.cast(tf.multiply(tf.random_uniform([batch_size,\n num_samples]),\n tf.tile(tf.cast(num_frames, tf.float32),\n [1, num_samples])),\n tf.int32)\n batch_index = tf.tile(tf.expand_dims(tf.range(batch_size), 1),\n [1, num_samples])\n index = tf.stack([batch_index, frame_index], 2)\n return tf.gather_nd(model_input, index)\n\ndef frame_pooling(frames, method, **unused_params):\n if method == \"average\":\n return tf.reduce_mean(frames, 1)\n elif method == \"max\":\n return tf.reduce_max(frames, 1)\n elif method == \"none\":\n feature_size = frames.shape_as_list()[2]\n return tf.reshape(frames, [-1, feature_size])\n else:\n raise ValueError(f\"Unrecognized pooling method: {method}\")\n" ]
[ [ "tensorflow.reduce_max", "tensorflow.gather_nd", "tensorflow.range", "tensorflow.shape", "tensorflow.reduce_mean", "tensorflow.maximum", "tensorflow.stack", "tensorflow.cast", "tensorflow.reshape", "tensorflow.random_uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
ddp5730/Non-binary-deep-transfer-learning-for-image-classification
[ "affb243b0939f82bf364ed9c4c203e203760082c" ]
[ "timm/models/efficientnet.py" ]
[ "\"\"\" PyTorch EfficientNet Family\n\nAn implementation of EfficienNet that covers variety of related models with efficient architectures:\n\n* EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports)\n - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946\n - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971\n - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665\n - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252\n\n* MixNet (Small, Medium, and Large)\n - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595\n\n* MNasNet B1, A1 (SE), Small\n - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626\n\n* FBNet-C\n - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443\n\n* Single-Path NAS Pixel1\n - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877\n\n* And likely more...\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom typing import List\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD\nfrom .efficientnet_blocks import round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT\nfrom .efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights\nfrom .features import FeatureInfo, FeatureHooks\nfrom .helpers import build_model_with_cfg, default_cfg_for_features\nfrom .layers import create_conv2d, create_classifier\nfrom .registry import register_model\n\n__all__ = ['EfficientNet']\n\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),\n 'crop_pct': 0.875, 'interpolation': 'bicubic',\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'conv_stem', 'classifier': 'classifier',\n **kwargs\n }\n\n\ndefault_cfgs = {\n 'mnasnet_050': _cfg(url=''),\n 'mnasnet_075': _cfg(url=''),\n 'mnasnet_100': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth'),\n 'mnasnet_140': _cfg(url=''),\n\n 'semnasnet_050': _cfg(url=''),\n 'semnasnet_075': _cfg(url=''),\n 'semnasnet_100': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth'),\n 'semnasnet_140': _cfg(url=''),\n 'mnasnet_small': _cfg(url=''),\n\n 'mobilenetv2_100': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth'),\n 'mobilenetv2_110d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth'),\n 'mobilenetv2_120d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth'),\n 'mobilenetv2_140': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth'),\n\n 'fbnetc_100': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth',\n interpolation='bilinear'),\n 'spnasnet_100': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth',\n interpolation='bilinear'),\n\n 'efficientnet_b0': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth'),\n 'efficientnet_b1': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth',\n input_size=(3, 240, 240), pool_size=(8, 8)),\n 'efficientnet_b2': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth',\n input_size=(3, 260, 260), pool_size=(9, 9)),\n 'efficientnet_b2a': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth',\n input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0),\n 'efficientnet_b3': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth',\n input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),\n 'efficientnet_b3a': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth',\n input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0),\n 'efficientnet_b4': _cfg(\n url='', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),\n 'efficientnet_b5': _cfg(\n url='', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),\n 'efficientnet_b6': _cfg(\n url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942),\n 'efficientnet_b7': _cfg(\n url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),\n 'efficientnet_b8': _cfg(\n url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954),\n 'efficientnet_l2': _cfg(\n url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961),\n\n 'efficientnet_es': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth'),\n 'efficientnet_em': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth',\n input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),\n 'efficientnet_el': _cfg(\n url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_el.pth', \n input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),\n\n 'efficientnet_es_pruned': _cfg(\n url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_es_pruned75.pth'),\n 'efficientnet_el_pruned': _cfg(\n url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_el_pruned70.pth', \n input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),\n\n 'efficientnet_cc_b0_4e': _cfg(url=''),\n 'efficientnet_cc_b0_8e': _cfg(url=''),\n 'efficientnet_cc_b1_8e': _cfg(url='', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),\n\n 'efficientnet_lite0': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth'),\n 'efficientnet_lite1': _cfg(\n url='',\n input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),\n 'efficientnet_lite2': _cfg(\n url='',\n input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),\n 'efficientnet_lite3': _cfg(\n url='',\n input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),\n 'efficientnet_lite4': _cfg(\n url='', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),\n\n 'efficientnet_b1_pruned': _cfg(\n url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb1_pruned_9ebb3fe6.pth',\n input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),\n 'efficientnet_b2_pruned': _cfg(\n url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb2_pruned_203f55bc.pth',\n input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),\n 'efficientnet_b3_pruned': _cfg(\n url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb3_pruned_5abcc29f.pth',\n input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),\n\n 'tf_efficientnet_b0': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth',\n input_size=(3, 224, 224)),\n 'tf_efficientnet_b1': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth',\n input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),\n 'tf_efficientnet_b2': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth',\n input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),\n 'tf_efficientnet_b3': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth',\n input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),\n 'tf_efficientnet_b4': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth',\n input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),\n 'tf_efficientnet_b5': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth',\n input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),\n 'tf_efficientnet_b6': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth',\n input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942),\n 'tf_efficientnet_b7': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth',\n input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),\n 'tf_efficientnet_b8': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth',\n input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954),\n\n 'tf_efficientnet_b0_ap': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth',\n mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)),\n 'tf_efficientnet_b1_ap': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth',\n mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,\n input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),\n 'tf_efficientnet_b2_ap': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth',\n mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,\n input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),\n 'tf_efficientnet_b3_ap': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth',\n mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,\n input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),\n 'tf_efficientnet_b4_ap': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth',\n mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,\n input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),\n 'tf_efficientnet_b5_ap': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth',\n mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,\n input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),\n 'tf_efficientnet_b6_ap': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth',\n mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,\n input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942),\n 'tf_efficientnet_b7_ap': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth',\n mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,\n input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),\n 'tf_efficientnet_b8_ap': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth',\n mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,\n input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954),\n\n 'tf_efficientnet_b0_ns': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth',\n input_size=(3, 224, 224)),\n 'tf_efficientnet_b1_ns': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth',\n input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),\n 'tf_efficientnet_b2_ns': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth',\n input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),\n 'tf_efficientnet_b3_ns': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth',\n input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),\n 'tf_efficientnet_b4_ns': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth',\n input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),\n 'tf_efficientnet_b5_ns': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth',\n input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),\n 'tf_efficientnet_b6_ns': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth',\n input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942),\n 'tf_efficientnet_b7_ns': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth',\n input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),\n 'tf_efficientnet_l2_ns_475': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth',\n input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936),\n 'tf_efficientnet_l2_ns': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth',\n input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96),\n\n 'tf_efficientnet_es': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth',\n mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),\n input_size=(3, 224, 224), ),\n 'tf_efficientnet_em': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth',\n mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),\n input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),\n 'tf_efficientnet_el': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth',\n mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),\n input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),\n\n 'tf_efficientnet_cc_b0_4e': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth',\n mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),\n 'tf_efficientnet_cc_b0_8e': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth',\n mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),\n 'tf_efficientnet_cc_b1_8e': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth',\n mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,\n input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),\n\n 'tf_efficientnet_lite0': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth',\n mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),\n interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res\n ),\n 'tf_efficientnet_lite1': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth',\n mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),\n input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882,\n interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res\n ),\n 'tf_efficientnet_lite2': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth',\n mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),\n input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890,\n interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res\n ),\n 'tf_efficientnet_lite3': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth',\n mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),\n input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'),\n 'tf_efficientnet_lite4': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth',\n mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),\n input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'),\n\n 'mixnet_s': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth'),\n 'mixnet_m': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth'),\n 'mixnet_l': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth'),\n 'mixnet_xl': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth'),\n 'mixnet_xxl': _cfg(),\n\n 'tf_mixnet_s': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth'),\n 'tf_mixnet_m': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth'),\n 'tf_mixnet_l': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth'),\n}\n\n_DEBUG = False\n\n\nclass EfficientNet(nn.Module):\n \"\"\" (Generic) EfficientNet\n\n A flexible and performant PyTorch implementation of efficient network architectures, including:\n * EfficientNet B0-B8, L2\n * EfficientNet-EdgeTPU\n * EfficientNet-CondConv\n * MixNet S, M, L, XL\n * MnasNet A1, B1, and small\n * FBNet C\n * Single-Path NAS Pixel1\n\n \"\"\"\n\n def __init__(self, block_args, num_classes=1000, num_features=1280, in_chans=3, stem_size=32,\n channel_multiplier=1.0, channel_divisor=8, channel_min=None,\n output_stride=32, pad_type='', fix_stem=False, act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0.,\n se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, global_pool='avg'):\n super(EfficientNet, self).__init__()\n norm_kwargs = norm_kwargs or {}\n\n self.num_classes = num_classes\n self.num_features = num_features\n self.drop_rate = drop_rate\n\n # Stem\n if not fix_stem:\n stem_size = round_channels(stem_size, channel_multiplier, channel_divisor, channel_min)\n self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)\n self.bn1 = norm_layer(stem_size, **norm_kwargs)\n self.act1 = act_layer(inplace=True)\n\n # Middle stages (IR/ER/DS Blocks)\n builder = EfficientNetBuilder(\n channel_multiplier, channel_divisor, channel_min, output_stride, pad_type, act_layer, se_kwargs,\n norm_layer, norm_kwargs, drop_path_rate, verbose=_DEBUG)\n self.blocks = nn.Sequential(*builder(stem_size, block_args))\n self.feature_info = builder.features\n head_chs = builder.in_chs\n\n # Head + Pooling\n self.conv_head = create_conv2d(head_chs, self.num_features, 1, padding=pad_type)\n self.bn2 = norm_layer(self.num_features, **norm_kwargs)\n self.act2 = act_layer(inplace=True)\n self.global_pool, self.classifier = create_classifier(\n self.num_features, self.num_classes, pool_type=global_pool)\n\n efficientnet_init_weights(self)\n\n def as_sequential(self):\n layers = [self.conv_stem, self.bn1, self.act1]\n layers.extend(self.blocks)\n layers.extend([self.conv_head, self.bn2, self.act2, self.global_pool])\n layers.extend([nn.Dropout(self.drop_rate), self.classifier])\n return nn.Sequential(*layers)\n\n def get_classifier(self):\n return self.classifier\n\n def reset_classifier(self, num_classes, global_pool='avg'):\n self.num_classes = num_classes\n self.global_pool, self.classifier = create_classifier(\n self.num_features, self.num_classes, pool_type=global_pool)\n\n def forward_features(self, x):\n x = self.conv_stem(x)\n x = self.bn1(x)\n x = self.act1(x)\n x = self.blocks(x)\n x = self.conv_head(x)\n x = self.bn2(x)\n x = self.act2(x)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.global_pool(x)\n if self.drop_rate > 0.:\n x = F.dropout(x, p=self.drop_rate, training=self.training)\n return self.classifier(x)\n\n\nclass EfficientNetFeatures(nn.Module):\n \"\"\" EfficientNet Feature Extractor\n\n A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation\n and object detection models.\n \"\"\"\n\n def __init__(self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck',\n in_chans=3, stem_size=32, channel_multiplier=1.0, channel_divisor=8, channel_min=None,\n output_stride=32, pad_type='', fix_stem=False, act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0.,\n se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None):\n super(EfficientNetFeatures, self).__init__()\n norm_kwargs = norm_kwargs or {}\n self.drop_rate = drop_rate\n\n # Stem\n if not fix_stem:\n stem_size = round_channels(stem_size, channel_multiplier, channel_divisor, channel_min)\n self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)\n self.bn1 = norm_layer(stem_size, **norm_kwargs)\n self.act1 = act_layer(inplace=True)\n\n # Middle stages (IR/ER/DS Blocks)\n builder = EfficientNetBuilder(\n channel_multiplier, channel_divisor, channel_min, output_stride, pad_type, act_layer, se_kwargs,\n norm_layer, norm_kwargs, drop_path_rate, feature_location=feature_location, verbose=_DEBUG)\n self.blocks = nn.Sequential(*builder(stem_size, block_args))\n self.feature_info = FeatureInfo(builder.features, out_indices)\n self._stage_out_idx = {v['stage']: i for i, v in enumerate(self.feature_info) if i in out_indices}\n\n efficientnet_init_weights(self)\n\n # Register feature extraction hooks with FeatureHooks helper\n self.feature_hooks = None\n if feature_location != 'bottleneck':\n hooks = self.feature_info.get_dicts(keys=('module', 'hook_type'))\n self.feature_hooks = FeatureHooks(hooks, self.named_modules())\n\n def forward(self, x) -> List[torch.Tensor]:\n x = self.conv_stem(x)\n x = self.bn1(x)\n x = self.act1(x)\n if self.feature_hooks is None:\n features = []\n if 0 in self._stage_out_idx:\n features.append(x) # add stem out\n for i, b in enumerate(self.blocks):\n x = b(x)\n if i + 1 in self._stage_out_idx:\n features.append(x)\n return features\n else:\n self.blocks(x)\n out = self.feature_hooks.get_output(x.device)\n return list(out.values())\n\n\ndef _create_effnet(variant, pretrained=False, **kwargs):\n features_only = False\n model_cls = EfficientNet\n kwargs_filter = None\n if kwargs.pop('features_only', False):\n features_only = True\n kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool')\n model_cls = EfficientNetFeatures\n model = build_model_with_cfg(\n model_cls, variant, pretrained,\n default_cfg=default_cfgs[variant],\n pretrained_strict=not features_only,\n kwargs_filter=kwargs_filter,\n **kwargs)\n if features_only:\n model.default_cfg = default_cfg_for_features(model.default_cfg)\n return model\n\n\ndef _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates a mnasnet-a1 model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet\n Paper: https://arxiv.org/pdf/1807.11626.pdf.\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n \"\"\"\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_e1_c16_noskip'],\n # stage 1, 112x112 in\n ['ir_r2_k3_s2_e6_c24'],\n # stage 2, 56x56 in\n ['ir_r3_k5_s2_e3_c40_se0.25'],\n # stage 3, 28x28 in\n ['ir_r4_k3_s2_e6_c80'],\n # stage 4, 14x14in\n ['ir_r2_k3_s1_e6_c112_se0.25'],\n # stage 5, 14x14in\n ['ir_r3_k5_s2_e6_c160_se0.25'],\n # stage 6, 7x7 in\n ['ir_r1_k3_s1_e6_c320'],\n ]\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def),\n stem_size=32,\n channel_multiplier=channel_multiplier,\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_effnet(variant, pretrained, **model_kwargs)\n return model\n\n\ndef _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates a mnasnet-b1 model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet\n Paper: https://arxiv.org/pdf/1807.11626.pdf.\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n \"\"\"\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_c16_noskip'],\n # stage 1, 112x112 in\n ['ir_r3_k3_s2_e3_c24'],\n # stage 2, 56x56 in\n ['ir_r3_k5_s2_e3_c40'],\n # stage 3, 28x28 in\n ['ir_r3_k5_s2_e6_c80'],\n # stage 4, 14x14in\n ['ir_r2_k3_s1_e6_c96'],\n # stage 5, 14x14in\n ['ir_r4_k5_s2_e6_c192'],\n # stage 6, 7x7 in\n ['ir_r1_k3_s1_e6_c320_noskip']\n ]\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def),\n stem_size=32,\n channel_multiplier=channel_multiplier,\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_effnet(variant, pretrained, **model_kwargs)\n return model\n\n\ndef _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates a mnasnet-b1 model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet\n Paper: https://arxiv.org/pdf/1807.11626.pdf.\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n \"\"\"\n arch_def = [\n ['ds_r1_k3_s1_c8'],\n ['ir_r1_k3_s2_e3_c16'],\n ['ir_r2_k3_s2_e6_c16'],\n ['ir_r4_k5_s2_e6_c32_se0.25'],\n ['ir_r3_k3_s1_e6_c32_se0.25'],\n ['ir_r3_k5_s2_e6_c88_se0.25'],\n ['ir_r1_k3_s1_e6_c144']\n ]\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def),\n stem_size=8,\n channel_multiplier=channel_multiplier,\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_effnet(variant, pretrained, **model_kwargs)\n return model\n\n\ndef _gen_mobilenet_v2(\n variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs):\n \"\"\" Generate MobileNet-V2 network\n Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py\n Paper: https://arxiv.org/abs/1801.04381\n \"\"\"\n arch_def = [\n ['ds_r1_k3_s1_c16'],\n ['ir_r2_k3_s2_e6_c24'],\n ['ir_r3_k3_s2_e6_c32'],\n ['ir_r4_k3_s2_e6_c64'],\n ['ir_r3_k3_s1_e6_c96'],\n ['ir_r3_k3_s2_e6_c160'],\n ['ir_r1_k3_s1_e6_c320'],\n ]\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head),\n num_features=1280 if fix_stem_head else round_channels(1280, channel_multiplier, 8, None),\n stem_size=32,\n fix_stem=fix_stem_head,\n channel_multiplier=channel_multiplier,\n norm_kwargs=resolve_bn_args(kwargs),\n act_layer=resolve_act_layer(kwargs, 'relu6'),\n **kwargs\n )\n model = _create_effnet(variant, pretrained, **model_kwargs)\n return model\n\n\ndef _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\" FBNet-C\n\n Paper: https://arxiv.org/abs/1812.03443\n Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py\n\n NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper,\n it was used to confirm some building block details\n \"\"\"\n arch_def = [\n ['ir_r1_k3_s1_e1_c16'],\n ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'],\n ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'],\n ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'],\n ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'],\n ['ir_r4_k5_s2_e6_c184'],\n ['ir_r1_k3_s1_e6_c352'],\n ]\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def),\n stem_size=16,\n num_features=1984, # paper suggests this, but is not 100% clear\n channel_multiplier=channel_multiplier,\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_effnet(variant, pretrained, **model_kwargs)\n return model\n\n\ndef _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates the Single-Path NAS model from search targeted for Pixel1 phone.\n\n Paper: https://arxiv.org/abs/1904.02877\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n \"\"\"\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_c16_noskip'],\n # stage 1, 112x112 in\n ['ir_r3_k3_s2_e3_c24'],\n # stage 2, 56x56 in\n ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'],\n # stage 3, 28x28 in\n ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'],\n # stage 4, 14x14in\n ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'],\n # stage 5, 14x14in\n ['ir_r4_k5_s2_e6_c192'],\n # stage 6, 7x7 in\n ['ir_r1_k3_s1_e6_c320_noskip']\n ]\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def),\n stem_size=32,\n channel_multiplier=channel_multiplier,\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_effnet(variant, pretrained, **model_kwargs)\n return model\n\n\ndef _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates an EfficientNet model.\n\n Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py\n Paper: https://arxiv.org/abs/1905.11946\n\n EfficientNet params\n name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)\n 'efficientnet-b0': (1.0, 1.0, 224, 0.2),\n 'efficientnet-b1': (1.0, 1.1, 240, 0.2),\n 'efficientnet-b2': (1.1, 1.2, 260, 0.3),\n 'efficientnet-b3': (1.2, 1.4, 300, 0.3),\n 'efficientnet-b4': (1.4, 1.8, 380, 0.4),\n 'efficientnet-b5': (1.6, 2.2, 456, 0.4),\n 'efficientnet-b6': (1.8, 2.6, 528, 0.5),\n 'efficientnet-b7': (2.0, 3.1, 600, 0.5),\n 'efficientnet-b8': (2.2, 3.6, 672, 0.5),\n 'efficientnet-l2': (4.3, 5.3, 800, 0.5),\n\n Args:\n channel_multiplier: multiplier to number of channels per layer\n depth_multiplier: multiplier to number of repeats per stage\n\n \"\"\"\n arch_def = [\n ['ds_r1_k3_s1_e1_c16_se0.25'],\n ['ir_r2_k3_s2_e6_c24_se0.25'],\n ['ir_r2_k5_s2_e6_c40_se0.25'],\n ['ir_r3_k3_s2_e6_c80_se0.25'],\n ['ir_r3_k5_s1_e6_c112_se0.25'],\n ['ir_r4_k5_s2_e6_c192_se0.25'],\n ['ir_r1_k3_s1_e6_c320_se0.25'],\n ]\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def, depth_multiplier),\n num_features=round_channels(1280, channel_multiplier, 8, None),\n stem_size=32,\n channel_multiplier=channel_multiplier,\n act_layer=resolve_act_layer(kwargs, 'swish'),\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs,\n )\n model = _create_effnet(variant, pretrained, **model_kwargs)\n return model\n\n\ndef _gen_efficientnet_edge(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\" Creates an EfficientNet-EdgeTPU model\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu\n \"\"\"\n\n arch_def = [\n # NOTE `fc` is present to override a mismatch between stem channels and in chs not\n # present in other models\n ['er_r1_k3_s1_e4_c24_fc24_noskip'],\n ['er_r2_k3_s2_e8_c32'],\n ['er_r4_k3_s2_e8_c48'],\n ['ir_r5_k5_s2_e8_c96'],\n ['ir_r4_k5_s1_e8_c144'],\n ['ir_r2_k5_s2_e8_c192'],\n ]\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def, depth_multiplier),\n num_features=round_channels(1280, channel_multiplier, 8, None),\n stem_size=32,\n channel_multiplier=channel_multiplier,\n norm_kwargs=resolve_bn_args(kwargs),\n act_layer=resolve_act_layer(kwargs, 'relu'),\n **kwargs,\n )\n model = _create_effnet(variant, pretrained, **model_kwargs)\n return model\n\n\ndef _gen_efficientnet_condconv(\n variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs):\n \"\"\"Creates an EfficientNet-CondConv model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv\n \"\"\"\n arch_def = [\n ['ds_r1_k3_s1_e1_c16_se0.25'],\n ['ir_r2_k3_s2_e6_c24_se0.25'],\n ['ir_r2_k5_s2_e6_c40_se0.25'],\n ['ir_r3_k3_s2_e6_c80_se0.25'],\n ['ir_r3_k5_s1_e6_c112_se0.25_cc4'],\n ['ir_r4_k5_s2_e6_c192_se0.25_cc4'],\n ['ir_r1_k3_s1_e6_c320_se0.25_cc4'],\n ]\n # NOTE unlike official impl, this one uses `cc<x>` option where x is the base number of experts for each stage and\n # the expert_multiplier increases that on a per-model basis as with depth/channel multipliers\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier),\n num_features=round_channels(1280, channel_multiplier, 8, None),\n stem_size=32,\n channel_multiplier=channel_multiplier,\n norm_kwargs=resolve_bn_args(kwargs),\n act_layer=resolve_act_layer(kwargs, 'swish'),\n **kwargs,\n )\n model = _create_effnet(variant, pretrained, **model_kwargs)\n return model\n\n\ndef _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates an EfficientNet-Lite model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite\n Paper: https://arxiv.org/abs/1905.11946\n\n EfficientNet params\n name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)\n 'efficientnet-lite0': (1.0, 1.0, 224, 0.2),\n 'efficientnet-lite1': (1.0, 1.1, 240, 0.2),\n 'efficientnet-lite2': (1.1, 1.2, 260, 0.3),\n 'efficientnet-lite3': (1.2, 1.4, 280, 0.3),\n 'efficientnet-lite4': (1.4, 1.8, 300, 0.3),\n\n Args:\n channel_multiplier: multiplier to number of channels per layer\n depth_multiplier: multiplier to number of repeats per stage\n \"\"\"\n arch_def = [\n ['ds_r1_k3_s1_e1_c16'],\n ['ir_r2_k3_s2_e6_c24'],\n ['ir_r2_k5_s2_e6_c40'],\n ['ir_r3_k3_s2_e6_c80'],\n ['ir_r3_k5_s1_e6_c112'],\n ['ir_r4_k5_s2_e6_c192'],\n ['ir_r1_k3_s1_e6_c320'],\n ]\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True),\n num_features=1280,\n stem_size=32,\n fix_stem=True,\n channel_multiplier=channel_multiplier,\n act_layer=resolve_act_layer(kwargs, 'relu6'),\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs,\n )\n model = _create_effnet(variant, pretrained, **model_kwargs)\n return model\n\n\ndef _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Small model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet\n Paper: https://arxiv.org/abs/1907.09595\n \"\"\"\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_e1_c16'], # relu\n # stage 1, 112x112 in\n ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu\n # stage 2, 56x56 in\n ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish\n # stage 3, 28x28 in\n ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish\n # stage 4, 14x14in\n ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish\n # stage 5, 14x14in\n ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish\n # 7x7\n ]\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def),\n num_features=1536,\n stem_size=16,\n channel_multiplier=channel_multiplier,\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_effnet(variant, pretrained, **model_kwargs)\n return model\n\n\ndef _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Medium-Large model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet\n Paper: https://arxiv.org/abs/1907.09595\n \"\"\"\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_e1_c24'], # relu\n # stage 1, 112x112 in\n ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu\n # stage 2, 56x56 in\n ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish\n # stage 3, 28x28 in\n ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish\n # stage 4, 14x14in\n ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish\n # stage 5, 14x14in\n ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish\n # 7x7\n ]\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'),\n num_features=1536,\n stem_size=24,\n channel_multiplier=channel_multiplier,\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_effnet(variant, pretrained, **model_kwargs)\n return model\n\n\n@register_model\ndef mnasnet_050(pretrained=False, **kwargs):\n \"\"\" MNASNet B1, depth multiplier of 0.5. \"\"\"\n model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mnasnet_075(pretrained=False, **kwargs):\n \"\"\" MNASNet B1, depth multiplier of 0.75. \"\"\"\n model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mnasnet_100(pretrained=False, **kwargs):\n \"\"\" MNASNet B1, depth multiplier of 1.0. \"\"\"\n model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mnasnet_b1(pretrained=False, **kwargs):\n \"\"\" MNASNet B1, depth multiplier of 1.0. \"\"\"\n return mnasnet_100(pretrained, **kwargs)\n\n\n@register_model\ndef mnasnet_140(pretrained=False, **kwargs):\n \"\"\" MNASNet B1, depth multiplier of 1.4 \"\"\"\n model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef semnasnet_050(pretrained=False, **kwargs):\n \"\"\" MNASNet A1 (w/ SE), depth multiplier of 0.5 \"\"\"\n model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef semnasnet_075(pretrained=False, **kwargs):\n \"\"\" MNASNet A1 (w/ SE), depth multiplier of 0.75. \"\"\"\n model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef semnasnet_100(pretrained=False, **kwargs):\n \"\"\" MNASNet A1 (w/ SE), depth multiplier of 1.0. \"\"\"\n model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mnasnet_a1(pretrained=False, **kwargs):\n \"\"\" MNASNet A1 (w/ SE), depth multiplier of 1.0. \"\"\"\n return semnasnet_100(pretrained, **kwargs)\n\n\n@register_model\ndef semnasnet_140(pretrained=False, **kwargs):\n \"\"\" MNASNet A1 (w/ SE), depth multiplier of 1.4. \"\"\"\n model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mnasnet_small(pretrained=False, **kwargs):\n \"\"\" MNASNet Small, depth multiplier of 1.0. \"\"\"\n model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mobilenetv2_100(pretrained=False, **kwargs):\n \"\"\" MobileNet V2 w/ 1.0 channel multiplier \"\"\"\n model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mobilenetv2_140(pretrained=False, **kwargs):\n \"\"\" MobileNet V2 w/ 1.4 channel multiplier \"\"\"\n model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mobilenetv2_110d(pretrained=False, **kwargs):\n \"\"\" MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers\"\"\"\n model = _gen_mobilenet_v2(\n 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mobilenetv2_120d(pretrained=False, **kwargs):\n \"\"\" MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers \"\"\"\n model = _gen_mobilenet_v2(\n 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef fbnetc_100(pretrained=False, **kwargs):\n \"\"\" FBNet-C \"\"\"\n if pretrained:\n # pretrained model trained with non-default BN epsilon\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef spnasnet_100(pretrained=False, **kwargs):\n \"\"\" Single-Path NAS Pixel1\"\"\"\n model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b0(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B0 \"\"\"\n # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2\n model = _gen_efficientnet(\n 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b1(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B1 \"\"\"\n # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2\n model = _gen_efficientnet(\n 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b2(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B2 \"\"\"\n # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2\n model = _gen_efficientnet(\n 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b2a(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B2 @ 288x288 w/ 1.0 test crop\"\"\"\n # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2\n model = _gen_efficientnet(\n 'efficientnet_b2a', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b3(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B3 \"\"\"\n # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2\n model = _gen_efficientnet(\n 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b3a(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B3 @ 320x320 w/ 1.0 test crop-pct \"\"\"\n # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2\n model = _gen_efficientnet(\n 'efficientnet_b3a', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b4(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B4 \"\"\"\n # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2\n model = _gen_efficientnet(\n 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b5(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B5 \"\"\"\n # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2\n model = _gen_efficientnet(\n 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b6(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B6 \"\"\"\n # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2\n model = _gen_efficientnet(\n 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b7(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B7 \"\"\"\n # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2\n model = _gen_efficientnet(\n 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b8(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B8 \"\"\"\n # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2\n model = _gen_efficientnet(\n 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_l2(pretrained=False, **kwargs):\n \"\"\" EfficientNet-L2.\"\"\"\n # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2\n model = _gen_efficientnet(\n 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_es(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge Small. \"\"\"\n model = _gen_efficientnet_edge(\n 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n@register_model\ndef efficientnet_es_pruned(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0\"\"\"\n model = _gen_efficientnet_edge(\n 'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n@register_model\ndef efficientnet_em(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge-Medium. \"\"\"\n model = _gen_efficientnet_edge(\n 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_el(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge-Large. \"\"\"\n model = _gen_efficientnet_edge(\n 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n@register_model\ndef efficientnet_el_pruned(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0\"\"\"\n model = _gen_efficientnet_edge(\n 'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n@register_model\ndef efficientnet_cc_b0_4e(pretrained=False, **kwargs):\n \"\"\" EfficientNet-CondConv-B0 w/ 8 Experts \"\"\"\n # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2\n model = _gen_efficientnet_condconv(\n 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_cc_b0_8e(pretrained=False, **kwargs):\n \"\"\" EfficientNet-CondConv-B0 w/ 8 Experts \"\"\"\n # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2\n model = _gen_efficientnet_condconv(\n 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2,\n pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_cc_b1_8e(pretrained=False, **kwargs):\n \"\"\" EfficientNet-CondConv-B1 w/ 8 Experts \"\"\"\n # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2\n model = _gen_efficientnet_condconv(\n 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2,\n pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_lite0(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite0 \"\"\"\n # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2\n model = _gen_efficientnet_lite(\n 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_lite1(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite1 \"\"\"\n # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2\n model = _gen_efficientnet_lite(\n 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_lite2(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite2 \"\"\"\n # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2\n model = _gen_efficientnet_lite(\n 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_lite3(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite3 \"\"\"\n # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2\n model = _gen_efficientnet_lite(\n 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_lite4(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite4 \"\"\"\n # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2\n model = _gen_efficientnet_lite(\n 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b1_pruned(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n variant = 'efficientnet_b1_pruned'\n model = _gen_efficientnet(\n variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b2_pruned(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True,\n pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef efficientnet_b3_pruned(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True,\n pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b0(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B0. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b1(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B1. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b2(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B2. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b3(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B3. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b4(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B4. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b5(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B5. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b6(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B6. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b7(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B7. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b8(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B8. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b0_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B0 AdvProp. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b1_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B1 AdvProp. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b2_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B2 AdvProp. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b3_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B3 AdvProp. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b3_ap', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b4_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B4 AdvProp. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b4_ap', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b5_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B5 AdvProp. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b6_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B6 AdvProp. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b6_ap', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b7_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B7 AdvProp. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b7_ap', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b8_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B8 AdvProp. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b0_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B0 NoisyStudent. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b0_ns', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b1_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B1 NoisyStudent. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b2_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B2 NoisyStudent. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b3_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B3 NoisyStudent. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b3_ns', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b4_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B4 NoisyStudent. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b5_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B5 NoisyStudent. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b5_ns', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b6_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B6 NoisyStudent. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b6_ns', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_b7_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B7 NoisyStudent. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_l2_ns_475(pretrained=False, **kwargs):\n \"\"\" EfficientNet-L2 NoisyStudent @ 475x475. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_l2_ns_475', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_l2_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-L2 NoisyStudent. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_es(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge Small. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_edge(\n 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_em(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge-Medium. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_edge(\n 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_el(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge-Large. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_edge(\n 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs):\n \"\"\" EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_condconv(\n 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs):\n \"\"\" EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_condconv(\n 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2,\n pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs):\n \"\"\" EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant \"\"\"\n # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_condconv(\n 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2,\n pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_lite0(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite0 \"\"\"\n # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_lite(\n 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_lite1(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite1 \"\"\"\n # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_lite(\n 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_lite2(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite2 \"\"\"\n # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_lite(\n 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_lite3(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite3 \"\"\"\n # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_lite(\n 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_efficientnet_lite4(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite4 \"\"\"\n # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_lite(\n 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mixnet_s(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Small model.\n \"\"\"\n model = _gen_mixnet_s(\n 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mixnet_m(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Medium model.\n \"\"\"\n model = _gen_mixnet_m(\n 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mixnet_l(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Large model.\n \"\"\"\n model = _gen_mixnet_m(\n 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mixnet_xl(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Extra-Large model.\n Not a paper spec, experimental def by RW w/ depth scaling.\n \"\"\"\n model = _gen_mixnet_m(\n 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef mixnet_xxl(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Double Extra Large model.\n Not a paper spec, experimental def by RW w/ depth scaling.\n \"\"\"\n model = _gen_mixnet_m(\n 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_mixnet_s(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Small model. Tensorflow compatible variant\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_mixnet_s(\n 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_mixnet_m(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Medium model. Tensorflow compatible variant\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_mixnet_m(\n 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\n@register_model\ndef tf_mixnet_l(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Large model. Tensorflow compatible variant\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_mixnet_m(\n 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)\n return model\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.functional.dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FernandoGaGu/pywinEA
[ "7dc2e62668851fcf6b4d88d6696c69a2d4a570ec", "7dc2e62668851fcf6b4d88d6696c69a2d4a570ec" ]
[ "pywinEA/imputation/dynamic.py", "pywinEA/algorithm/nsga2.py" ]
[ "# Module that defines the strategies defined for the handling of missing values.\n#\n# Author: Fernando García <[email protected]>\n#\n# External dependencies\nimport numpy as np\n# Module dependencies\nfrom ..interface.imputation import ImputationStrategy\n\n\nclass DynamicValue(ImputationStrategy):\n \"\"\"\n Imputation technique that replaces missing values dynamically using a backward/forward fill with randomization.\n \"\"\"\n def __init__(self, seed=None):\n \"\"\"\n __init__(self, seed=None)\n \"\"\"\n if seed is not None and isinstance(seed, int):\n np.random.seed(seed)\n\n def __repr__(self):\n return \"DynamicValue\"\n\n def __str__(self):\n return self.__repr__()\n\n def impute(self, data: np.ndarray, y: np.ndarray):\n \"\"\"\n Function that receives a dataset with missing values and replaces the values filling first in a forward\n way, that is, replacing missing values with the previous known value. Additionally, it may be the case\n in which a missing value is in the first row, therefore, after filling it using a forward strategy, it\n applies a backward filling to avoid the possible presence of missing values. Each time the impute()\n method is called the rows are shuffled randomly. The dataset is returned in the same order in which\n it is received.\n\n Parameters\n -------------\n :param data: 2d-array\n Predictor variables with missing values\n :param y: 1d-array\n Class labels\n\n Returns\n ----------\n :return: 2d-array\n Dataset without imputation values.\n \"\"\"\n # Create an index\n idx = [n for n in range(data.shape[0])]\n\n # Append index as column\n data = np.hstack((data, np.array(idx).reshape(len(idx), 1)))\n\n # Handle imputation values using forward/backward fill with randomization\n np.random.shuffle(data)\n\n # Forward fill\n data = DynamicValue._forward_fill(data)\n\n # Backward fill (When the inverted dataset is provided, It's like performing a backward filling)\n data = DynamicValue._forward_fill(data[::-1])\n\n # Sort values by index\n data = data[np.argsort(data[:,data.shape[1]-1])]\n\n # Return data excluding the index\n return data[:, :data.shape[1]-1]\n\n @staticmethod\n def _forward_fill(data: np.ndarray):\n \"\"\"\n Function that replaces missing values with the value of the previous instance.\n\n Parameters\n -------------\n :param data: 2d-array\n Dataset with missing values.\n\n Returns\n -----------\n :return: 2d-array\n Dataset filling using a forward strategy\n \"\"\"\n last_values = None\n\n for row in data:\n if last_values is not None:\n # Get NaN values index\n idx = np.isnan(row)\n # Fill NaN values using last seen values\n row[idx] = last_values[idx]\n\n # Update last seen values\n last_values = row\n\n return data\n", "# Module that contains all the genetic algorithms implemented in the pywin module.\n#\n# Author: Fernando García <[email protected]>\n#\n# External dependencies\nimport numpy as np\nfrom math import inf as infinite\nfrom functools import total_ordering\nfrom tqdm import tqdm\n\n#  Module dependencies\nfrom ..interface.algorithm import MOAbase\nfrom ..population.population import Population\nfrom ..utils.hypervolume import hypervolume\nfrom ..utils.fast_non_dominated_sort import fast_non_dominated_sort\n\n\ndef calculate_crowding(solutions: list):\n \"\"\"\n Method to calculate crowding for all solutions using _crowding_distance() method for each solution.\n\n Parameters\n -----------\n :param solutions: list\n List with all solutions.\n \"\"\"\n\n def crowding_distance(all_solutions: list, idx: int, measure_idx: int):\n \"\"\"\n Function that calculates the crowding distance (cuboid) for a certain solution.\n\n Parameters\n ------------\n :param all_solutions: list\n All solutions.\n :param idx: int\n Index indicating the objective solution for which the crowding will be calculated.\n :param measure_idx: int\n Indicates the index at which the scores of a certain objective function are found.\n \"\"\"\n # Get target function values\n measured_values = [solution.values[measure_idx] for solution in all_solutions]\n f_max = max(measured_values)\n f_min = min(measured_values)\n\n # If all the solutions are the same crowding is 0\n if f_max == f_min:\n return 0\n\n # Calculate crowding distance\n distance = (measured_values[idx + 1] - measured_values[idx - 1]) / \\\n (max(measured_values) - min(measured_values))\n\n return distance\n\n # Get the number of target functions\n num_objectives = len(solutions[0].values)\n\n for measure in range(num_objectives):\n\n # Sort solutions based on measure value (ascending)\n solutions = sorted(solutions, key=lambda solution: solution.values[measure])\n\n # Select limits to infinite\n solutions[0].crowding_distance, solutions[len(solutions) - 1].crowding_distance = infinite, infinite\n\n # Calculate crowding distance for target function\n for i in range(1, len(solutions) - 1):\n solutions[i].crowding_distance += crowding_distance(all_solutions=solutions,\n idx=i, measure_idx=measure)\n\n\ndef restart_solutions(solutions):\n \"\"\"\n Method that resets the values of each solution.\n\n Parameters\n -----------\n :param solutions: list\n \"\"\"\n for solution in solutions:\n solution.restart()\n\n\n@total_ordering\nclass Solution:\n \"\"\"\n Class that represents a possible solution with the values of each of the objective functions.\n \"\"\"\n\n def __init__(self, values: list):\n \"\"\"\n\n __init__(values)\n\n Notes\n -------\n - values: 1d-array -> List of target function values.\n - dominated_set: list(Solution) -> Solutions dominated by the current solution.\n - np: int -> Number of times this solution is dominated.\n - rank: int -> Indicates which front the current solution is on.\n - crowding_distance: float -> Crowding distance.\n \"\"\"\n self.values = values\n self.dominated_set = []\n self.np = 0\n self.rank = None\n self.crowding_distance = 0\n\n def __str__(self):\n return f\"Solution(values={self.values} rank={self.rank} crowding={self.crowding_distance})\"\n\n def __repr__(self):\n return self.__str__()\n\n def restart(self):\n \"\"\"\n Reset all values in the solution.\n \"\"\"\n self.dominated_set = []\n self.np = 0\n self.rank = None\n self.crowding_distance = 0\n\n def _crowded_comparision(self, other):\n \"\"\"\n Comparison operator between two solutions. Based on:\n\n K. Deb, A. Pratap, S. Agarwal and T. Meyarivan, \"A fast and elitist multiobjective genetic algorithm:\n NSGA-II,\" in IEEE Transactions on Evolutionary Computation, vol. 6, no. 2, pp. 182-197, April 2002.\n\n Crowded comparision operator:\n If we have two solutions with a different Pareto ranking, we choose the one with the lowest value.\n If they have the same ranking we take the one with the highest crowding (the least covered solution).\n \"\"\"\n # Current solution dominates\n if (self.rank < other.rank) or ((self.rank == other.rank) and\n (self.crowding_distance > other.crowding_distance)):\n return 1\n # Both solutions are equal\n elif (self.rank == other.rank) and (self.crowding_distance == other.crowding_distance):\n return 0\n\n return -1\n\n def __eq__(self, other):\n \"\"\"\n Operator ==\n \"\"\"\n if self._crowded_comparision(other) == 0:\n return True\n\n return False\n\n def __lt__(self, other):\n \"\"\"\n Operator <\n \"\"\"\n if self._crowded_comparision(other) == -1:\n return True\n\n return False\n\n\nclass NSGA2(MOAbase):\n \"\"\"\n Implementation of the multi-objective NSGAII algorithm based on:\n\n K. Deb, A. Pratap, S. Agarwal and T. Meyarivan, \"A fast and elitist multiobjective genetic algorithm:\n NSGA-II,\" in IEEE Transactions on Evolutionary Computation, vol. 6, no. 2, pp. 182-197, April 2002.\n\n As many target functions as desired can be added from the fitness sub-module. You can also treat the number\n of features as an objective function (to be minimized) by indicating the parameter optimize_features as true.\n To evaluate the evolution of the algorithm, the hypervolume indicator is used. This metric has been implemented\n using the inclusion-exclusion algorithm. It must be taken into account that the larger the population or the\n more objectives have to be maximized, the higher the computational cost will be.\n\n\n Parameters\n ------------\n fitness: pywin.interface.FitnessStrategy / pywin.fitness / list\n If the parameter optimize_features is True, a unique fitness function of the fitness submodule can be provided\n (also a list with several functions is allowed). If this parameter is selected as False it is necessary to\n provide a list with at least two functions to optimize.\n\n optimize_features: <optional> bool\n If this parameter is true, the number of targets will be a function to optimize.\n\n features_function: <optional> function\n User-defined function that should receive a value called \"individual\" and another value \"tot_feats\". This f\n unction should return a single value that will be maximized. By default this function will be:\n\n f(individual) = 1 - (len(individual.features) / len(all features))\n\n Important note: Functions cannot be defined as lambda functions as they are not serializable by the pickle\n library and an error will be thrown.\n\n population: <optional> pywin.population.Population\n If a population is not provided it is necessary to specify the population size using argument\n population_size.\n\n population_size: <optionally> int\n Only necessary if a population is not provided. By default the algorithm will create a basic\n population (pywin.population.Population).\n\n selection: <optional> pywin.selection.interface.SelectionStrategy\n Individual selection strategy. By default pywin.selection.TournamentSelection (with two gladiators,\n a single winner and sampling without replacement).\n\n crossover: <optional> pywin.operators.interface.CrossOverStrategy\n Individual cross-over strategy. By default pywin.operators.OnePoint.\n\n mutation: <optional> pywin.operators.interface.MutationStrategy\n Mutation strategy used to introduce changes in the population. With some strategies it may be necessary\n to indicate the mutation_rate parameter.\n\n mutation_rate <optional> float\n A parameter that indicates the probability of a random change occurring in a given individual. If this\n argument is provided without a mutation argument by default pywin.operators.RandomMutation will be used.\n\n imputer: <optional> pywin.imputation.interface.ImputationStrategy\n Strategy to handle missing values. The missing values will be imputed for each individual in the\n population using the individual features.\n\n generations: int\n Number of generations (the minimum number of generations is 1).\n\n positive_class: <optional> int\n Class that will be considered as positive class, the rest of the classes will be considered as negative\n classes.\n If a value is provided the class labels will be transformed -> Binary Classification.\n Otherwise they will not be modified -> Multiclass classification.\n\n random_state : <optional> int\n Random seed.\n\n id : <optional> str\n Algorithm identifier\n\n Attributes\n ------------\n best_features: (Getter) Return a list with the best features in each solution.\n\n get_current_generation: (Getter) Return the current generation of the algorithm.\n\n population: (Getter) Return the current population of the algorithm.\n\n population_fitness: (Getter) Return the fitness of the current generation of the algorithm (This consists of\n instances of Solution).\n\n best_performance: (Getter) Return a dict with the score and the best performance achieved for each solution.\n\n fitness: (Getter / Setter) Algorithm fitness strategy.\n\n features_function: (Getter / Setter) Function to evaluate the number of features.\n\n generations: (Getter / Setter) Algorithm number of generations.\n\n selection: (Getter / Setter) Algorithm selection strategy.\n\n mutation_rate: (Getter / Setter) Algorithm mutation_rate.\n\n imputer: (Getter / Setter) Algorithm imputation strategy.\n\n crossover: (Getter / Setter) Algorithm crossover strategy.\n\n positive_class: (Getter / Setter) Algorithm selection strategy.\n\n random_state: (Getter / Setter) Algorithm selection strategy.\n\n id: (Getter / Setter) Algorithm selection strategy.\n\n\n Methods\n ---------\n set_features(features): Allows you to assign the labels of the columns (the names of the predictor variables).\n If this function is not used before using the fit method, the corresponding numerical value will be assigned\n to the position of the predictor variable. It also can be used after training step.\n\n fit(X, y): Start algorithm execution.\n\n continue_training(generations): Continue training the algorithm for an extra number of generations.\n\n predict(X): Method NOT available for NSGA2.\n\n training_evolution(): Returns the parameters monitored during training.It returns a dictionary with the\n following scheme:\n :key hypervolume:\n Hypervolume indicator in each generation.\n :key num_solutions_front:\n Number of solutions on the Pareto front in each generation.\n :key best_values:\n Best values of each function in each generation.\n\n get_dataset(): Function that returns all the dataset and class labels.\n\n set_population(new_individuals): Method that allows to select a new population of solutions.\n\n save(file_name, dir_name, overwrite): This function allows to save the model into a file.\n\n load_model(file_name, dir_name): <class method> This function allows to load a model from a file.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n __init__(**kwargs)\n\n Notes\n ------\n Use help(NSGA2) to view the required parameters.\n \"\"\"\n # Call superclass\n super().__init__(**kwargs)\n\n def __repr__(self):\n return f\"NSGAII(population_size={self._population.size} generations={self.generations} \" \\\n f\"mutation={self.mutation} mutation_rate={self.mutation_rate} selection={self.selection} \" \\\n f\"fitness={self.fitness} optimize_features={self.optimize_features} crossover={self.crossover} \" \\\n f\"imputer={self.imputer} positive_class={self.positive_class} random_rate={self.random_state}\"\n\n def __str__(self):\n return self.__repr__()\n\n def get_dataset(self):\n \"\"\"\n Function that returns all the dataset and class labels.\n\n Returns\n ---------\n :return: 2d-array\n Predictor variables.\n :return 1d-array\n Class labels.\n \"\"\"\n return self._X, self._y\n\n def fit(self, X: np.ndarray, y: np.ndarray):\n \"\"\"\n Function that begins the execution of the algorithm. First, it processes the class labels to adapt\n them to a mono-class classification problem (only if positive_class has been provided). Then it initializes\n a random population and calls the _core() method where the main logic of the algorithm is defined.\n\n fit(X: np.ndarray, y: np.ndarray)\n\n Parameters\n ------------\n :param X: 2d-array\n Values of the target variables.\n :param y: 1d-array\n Labels of each condition.\n\n Returns\n ----------\n :return: pywin.algorithms.NSGA2\n Model fitted using the best feature combination.\n \"\"\"\n # Transformation of class labels\n self._y = self._label_processing(y.copy()) if self.positive_class is not None else y.copy()\n\n # Save data and current generation\n self._X = X\n self._current_generation = self.generations\n\n # If no column names have been provided, numbers are assigned by default\n if self._population.features is None:\n self._population.init_features([n for n in range(self._X.shape[1])])\n\n # Initializes population\n self._population.init()\n\n # Genetic algorithm core\n self._core(generation_start=0, generation_end=self.generations)\n\n return self\n\n def continue_training(self, generations: int):\n \"\"\"\n This function allows to continue training the algorithm since the last generation.\n\n Parameters\n -------------\n :param generations: int\n Extra number of generations.\n\n Returns\n -----------\n :return: NSGA2\n \"\"\"\n if not isinstance(generations, int):\n raise TypeError(\"Parameter generations must be an integer.\")\n\n # Genetic algorithm core\n self._core(generation_start=self._current_generation, generation_end=(self._current_generation + generations))\n\n # Save last generation\n self._current_generation = self.generations + generations\n self.generations += generations\n\n return self\n\n def training_evolution(self):\n \"\"\"\n Function that returns a dictionary with the data collected throughout the algorithm search.\n You can use the function plot_evolution() from pywin.visualization.Plotter to display it.\n\n Returns\n ----------\n :return: dict\n Dictionary with the evolution of the hypervolume indicator, number of solutions on the non-dominated\n front and best values for each objective function.\n :return str\n Scores used to evaluate fitness.\n \"\"\"\n scores = [fitness_func.score for fitness_func in self.fitness]\n if self.optimize_features:\n scores.append(\"Num. features\")\n\n return self._evolution, scores\n\n def _evaluate_fitness(self, population: Population):\n \"\"\"\n Function that evaluates the values of the objective functions for each of the individuals.\n A solution will be assigned to the fitness value of each individual\n\n Parameters\n ------------\n :param population: pywin.population.Population\n Population of individuals.\n\n Returns\n ----------\n :return: pywin.population.Population\n Population.\n \"\"\"\n for n, individual in enumerate(population.individuals):\n\n # Dataset extraction using individual features\n X_data = self._create_dataset(individual, self._X)\n\n # Get scores for each fitness strategy (each objective)\n scores = [fitness_func.eval_fitness(X=X_data, y=self._y, num_feats=len(population.features))\n for fitness_func in self.fitness]\n\n # If the number of features is an objective\n if self.optimize_features:\n scores.append(self.features_function(individual=individual,\n total_feats=len(self._population.features)))\n\n # Create a solution\n individual.fitness = Solution(scores)\n\n return population\n\n def _annotate(self, generation: int):\n \"\"\"\n Record the values to be monitored in the algorithm for each generation\n\n Parameters\n ------------\n :param generation: int\n \"\"\"\n # Get pareto front\n pareto_front_scores = np.array(\n [individual.fitness.values for individual in self._population.individuals\n if individual.fitness.rank == 0]\n )\n\n # Calculate hypervolume\n self._evolution['hypervolume'][generation + 1] = hypervolume(pareto_front=pareto_front_scores)\n\n # Get number of solutions on the Pareto front\n self._evolution['num_solutions_front'][generation + 1] = len(pareto_front_scores)\n\n # Get best performance achieved for each objective\n self._evolution['best_values'][generation + 1] = np.max(pareto_front_scores, axis=0)\n\n def _set_new_population(self, parents_offspring: Population):\n \"\"\"\n Select the new population using the crowding operator until fill the population size. Individuals with a\n lower rank value (better solutions) will be selected. For individuals with the same rank value, those\n with a higher crowding distance will be selected.\n\n Parameters\n -----------\n :param parents_offspring: pywin.population.Population\n \"\"\"\n # Get fitness\n parents_offspring_fitness = parents_offspring.fitness\n\n # Get individual indices sorted by fitness\n indices = np.argsort(parents_offspring_fitness)[:(self._population.size - 1):-1]\n\n # Get best individuals and their fitness\n best_individuals = [parents_offspring.individuals[idx] for idx in indices]\n\n # Assign best individuals to population\n self._population.set_new_individuals(best_individuals)\n\n def _core(self, generation_start: int, generation_end: int):\n \"\"\"\n Main logic of the algorithm. First, evaluate the individual fitness (creating solutions). Second, applies\n the \"fast non dominated sort\" algorithm to assign the solutions to different non-dominated fronts and\n calculates the crowding distance. Then apply the basic operations (selection, crossover and mutation)\n to generate offspring. And finally evaluate the fitness of the offspring, merge the offspring with the\n parents and fill the next generation with the best solutions.\n\n Parameters\n ------------\n :param generation_start: int\n Generation from which the algorithm should start\n :param generation_end: int\n Generation until which the algorithm must arrive.\n \"\"\"\n\n # Evaluate fitness for each objective\n self._population = self._evaluate_fitness(population=self._population)\n\n #  Get population fitness\n population_fitness = self._population.fitness\n\n # Sort Pareto front\n fast_non_dominated_sort(population_fitness)\n\n # Calculate crowding\n calculate_crowding(population_fitness)\n\n info = \"(NSGAII) Generations (form %d to %d)\" % (generation_start, generation_end)\n for generation in tqdm(range(generation_start, generation_end), desc=info):\n\n # Annotate algorithm performance\n self._annotate(generation=generation)\n\n # Apply selection\n offspring = self.selection.select(population=self._population,\n new_pop_length=self._population.size)\n\n # Apply cross-over\n offspring = self.crossover.cross_population(offspring)\n\n # Introduces mutations\n if self.mutation is not None:\n offspring = self.mutation.mutate(population=offspring, mutation_rate=self.mutation_rate)\n\n # Evaluate offspring\n offspring = self._evaluate_fitness(population=offspring)\n\n # Restart parent solutions\n restart_solutions(self._population.fitness)\n\n # Merge parents and offspring\n parents_offspring = self._population.merge_population(self._population, offspring)\n\n #  Get parents_offspring fitness\n parents_offspring_fitness = parents_offspring.fitness\n\n # Sort Pareto front\n fast_non_dominated_sort(parents_offspring_fitness)\n\n # Calculate crowding\n calculate_crowding(parents_offspring_fitness)\n\n # Set new population\n self._set_new_population(parents_offspring)\n" ]
[ [ "numpy.random.seed", "numpy.isnan", "numpy.random.shuffle", "numpy.argsort", "numpy.array" ], [ "numpy.argsort", "numpy.max", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
francois-rozet/lampe
[ "50e53c767ee5d98502ec8520b3bca554f2169eb7" ]
[ "lampe/simulators/hh.py" ]
[ "r\"\"\"Hodgkin-Huxley (HH) benchmark.\n\nHH is a widespread non-linear mechanistic model of neural dynamics.\n\nReferences:\n A quantitative description of membrane current and its application to conduction and excitation in nerve\n (Hodgkin et al., 1952)\n https://link.springer.com/article/10.1007/BF02459568\n\n Training deep neural density estimators to identify mechanistic models of neural dynamics\n (Gonçalves et al., 2020)\n https://elifesciences.org/articles/56261\n\nShapes:\n theta: :math:`(8,)`.\n x: :math:`(7,)`.\n\"\"\"\n\nimport numpy as np\nimport torch\n\nfrom numpy import ndarray as Array\nfrom torch import Tensor, BoolTensor\nfrom typing import *\n\nfrom . import Simulator\n\n\nLABELS = [\n f'${l}$' for l in [\n r'g_{\\mathrm{Na}}', r'g_{\\mathrm{K}}', r'g_{\\mathrm{M}}', 'g_l',\n r'\\tau_{\\max}', 'V_t', r'\\sigma', 'E_l',\n ]\n]\n\nLOWER, UPPER = torch.tensor([\n [0.5, 80.], # g_Na [mS/cm^2]\n [1e-4, 15.], # g_K [mS/cm^2]\n [1e-4, .6], # g_M [mS/cm^2]\n [1e-4, .6], # g_l [mS/cm^2]\n [50., 3000.], # tau_max [ms]\n [-90., -40.], # V_t [mV]\n [1e-4, .15], # sigma [uA/cm^2]\n [-100., -35.], # E_l [mV]\n]).t()\n\n\nclass HH(Simulator):\n r\"\"\"Creates an Hodgkin-Huxley (HH) simulator.\n\n Arguments:\n summary: Whether voltage traces are converted to summary statistics or not.\n seed: A random number generator seed.\n kwargs: Simulator settings and constants (e.g. duration, inital voltage, ...).\n \"\"\"\n\n def __init__(self, summary: bool = True, seed: int = None, **kwargs):\n super().__init__()\n\n # Constants\n default = {\n 'duration': 80., # s\n 'time_step': 0.02, # s\n 'padding': 10., # s\n 'initial_voltage': -70., # mV\n 'current': 5e-4 / (np.pi * 7e-3 ** 2), # uA / cm^2\n }\n\n self.constants = {\n k: kwargs.get(k, v)\n for k, v in default.items()\n }\n\n # Summary statistics\n self.summary = summary\n\n # RNG\n self.rng = np.random.default_rng(seed)\n\n def __call__(self, theta: Array) -> Array:\n x = voltage_trace(theta, self.constants, self.rng)\n\n if self.summary:\n x = summarize(x, self.constants)\n\n return x\n\n\ndef voltage_trace(\n theta: Array,\n constants: Dict[str, float],\n rng: np.random.Generator,\n) -> Array:\n r\"\"\"Simulates an Hodgkin-Huxley voltage trace.\n\n References:\n https://github.com/mackelab/sbi/blob/main/examples/HH_helper_functions.py\n \"\"\"\n\n # Parameters\n T = constants['duration']\n dt = constants['time_step']\n pad = constants['padding']\n V_0 = constants['initial_voltage']\n I = constants['current']\n\n theta = np.expand_dims(theta, axis=0)\n g_Na, g_K, g_M, g_leak, tau_max, V_t, sigma, E_leak = [\n theta[..., i] for i in range(8)\n ]\n\n C = 1. # uF/cm^2\n E_Na = 53. # mV\n E_K = -107. # mV\n\n # Kinetics\n exp = np.exp\n efun = lambda x: np.where(\n np.abs(x) < 1e-4,\n 1 - x / 2,\n x / (exp(x) - 1)\n )\n\n alpha_n = lambda x: 0.032 * efun(-0.2 * (x - 15)) / 0.2\n beta_n = lambda x: 0.5 * exp(-(x - 10) / 40)\n tau_n = lambda x: 1 / (alpha_n(x) + beta_n(x))\n n_inf = lambda x: alpha_n(x) / (alpha_n(x) + beta_n(x))\n\n alpha_m = lambda x: 0.32 * efun(-0.25 * (x - 13)) / 0.25\n beta_m = lambda x: 0.28 * efun(0.2 * (x - 40)) / 0.2\n tau_m = lambda x: 1 / (alpha_m(x) + beta_m(x))\n m_inf = lambda x: alpha_m(x) / (alpha_m(x) + beta_m(x))\n\n alpha_h = lambda x: 0.128 * exp(-(x - 17) / 18)\n beta_h = lambda x: 4 / (1 + exp(-0.2 * (x - 40)))\n tau_h = lambda x: 1 / (alpha_h(x) + beta_h(x))\n h_inf = lambda x: alpha_h(x) / (alpha_h(x) + beta_h(x))\n\n tau_p = lambda x: tau_max / (3.3 * exp(0.05 * (x + 35)) + exp(-0.05 * (x + 35)))\n p_inf = lambda x: 1 / (1 + exp(-0.1 * (x + 35)))\n\n # Iterations\n voltages = []\n\n V = np.full_like(V_t, V_0)\n V_rel = V - V_t\n\n n = n_inf(V_rel)\n m = m_inf(V_rel)\n h = h_inf(V_rel)\n p = p_inf(V)\n\n for t in np.arange(0, T, dt):\n tau_V = C / (\n g_Na * m**3 * h\n + g_K * n**4\n + g_M * p\n + g_leak\n )\n\n V_inf = tau_V * (\n E_Na * g_Na * m**3 * h\n + E_K * g_K * n**4\n + E_K * g_M * p\n + E_leak * g_leak\n + I * (pad <= t < T - pad)\n + sigma * rng.standard_normal(V.shape) / dt**0.5\n ) / C\n\n V = V_inf + (V - V_inf) * exp(-dt / tau_V)\n V_rel = V - V_t\n\n n = n_inf(V_rel) + (n - n_inf(V_rel)) * exp(-dt / tau_n(V_rel))\n m = m_inf(V_rel) + (m - m_inf(V_rel)) * exp(-dt / tau_m(V_rel))\n h = h_inf(V_rel) + (h - h_inf(V_rel)) * exp(-dt / tau_h(V_rel))\n p = p_inf(V) + (p - p_inf(V)) * exp(-dt / tau_p(V))\n\n voltages.append(V)\n\n return np.stack(voltages, axis=-1).squeeze(axis=0)\n\n\ndef summarize(x: Array, constants: Dict[str, float]) -> Array:\n r\"\"\"Returns summary statistics of a voltage trace.\"\"\"\n\n # Constants\n T = constants['duration']\n dt = constants['time_step']\n pad = constants['padding']\n\n t = np.arange(0, T, dt)\n\n # Number of spikes\n spikes = np.maximum(x, -10)\n spikes = np.diff(np.sign(np.diff(spikes)))\n spikes = np.sum(spikes < 0, axis=-1)\n\n # Resting moments\n rest = x[..., (pad / 2 <= t) * (t < pad)]\n rest_mean = np.mean(rest, axis=-1)\n rest_std = np.std(rest, axis=-1)\n\n # Moments\n x = x[..., (pad <= t) * (t < T - pad)]\n x_mean = np.mean(x, axis=-1)\n x_std = np.std(x, axis=-1)\n\n z = (x - x_mean[..., None]) / x_std[..., None]\n\n x_skew = np.mean(z**3, axis=-1)\n x_kurtosis = np.mean(z**4, axis=-1)\n\n return np.stack([\n spikes,\n rest_mean, rest_std,\n x_mean, x_std, x_skew, x_kurtosis,\n ], axis=-1)\n" ]
[ [ "numpy.expand_dims", "numpy.maximum", "numpy.abs", "numpy.arange", "numpy.stack", "torch.tensor", "numpy.full_like", "numpy.std", "numpy.mean", "numpy.diff", "numpy.sum", "numpy.random.default_rng" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
edawson/SigProfilerExtractor
[ "a9cc43ddba2271ec68ff4d1a6a93399386713325" ]
[ "build/lib/SigProfilerExtractor/nmf_gpu.py" ]
[ "\"\"\"\nImplementation of non-negative matrix factorization for GPU\n\"\"\"\n\nfrom datetime import datetime\n\nfrom nimfa.methods.seeding import nndsvd\nimport numpy as np\nimport torch\nimport torch.nn\nfrom torch import nn\n\n\nclass NMF:\n def __init__(self, V, rank, max_iterations=100000, tolerance=1e-8, test_conv=1000, gpu_id=0, seed=None,\n init_method='random', floating_point_precision='double', min_iterations=2000):\n\n \"\"\"\n Run non-negative matrix factorisation using GPU. Uses beta-divergence.\n\n Args:\n V: Matrix to be factorised\n rank: (int) number of latent dimensnions to use in factorisation\n max_iterations: (int) Maximum number of update iterations to use during fitting\n tolerance: tolerance to use in convergence tests. Lower numbers give longer times to convergence\n test_conv: (int) How often to test for convergnce\n gpu_id: (int) Which GPU device to use\n seed: random seed, if None (default) datetime is used\n init_method: how to initialise basis and coefficient matrices, options are:\n - random (will always be the same if seed != None)\n - NNDSVD\n - NNDSVDa (fill in the zero elements with the average),\n - NNDSVDar (fill in the zero elements with random values in the space [0:average/100]).\n floating_point_precision: (string or type). Can be `double`, `float` or any type/string which\n torch can interpret.\n min_iterations: the minimum number of iterations to execute before termination. Useful when using\n fp32 tensors as convergence can happen too early.\n \"\"\"\n torch.cuda.set_device(gpu_id)\n\n if seed is None:\n seed = datetime.now().timestamp()\n\n if floating_point_precision == 'float':\n self._tensor_type = torch.FloatTensor\n elif floating_point_precision == 'double':\n self._tensor_type = torch.DoubleTensor\n else:\n self._tensor_type = floating_point_precision\n\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n self.max_iterations = max_iterations\n self.min_iterations = min_iterations\n\n # If V is not in a batch, put it in a batch of 1\n if len(V.shape) == 2:\n V = V[None, :, :]\n\n self._V = V.type(self._tensor_type).cuda()\n self._fix_neg = nn.Threshold(0., 1e-8)\n self._tolerance = tolerance\n self._prev_loss = None\n self._iter = 0\n self._test_conv = test_conv\n self._gpu_id = gpu_id\n self._rank = rank\n self._W, self._H = self._initialise_wh(init_method)\n\n def _initialise_wh(self, init_method):\n \"\"\"\n Initialise basis and coefficient matrices according to `init_method`\n \"\"\"\n if init_method == 'random':\n W = torch.rand(self._V.shape[0], self._V.shape[1], self._rank).type(self._tensor_type).cuda()\n H = torch.rand(self._V.shape[0], self._rank, self._V.shape[2]).type(self._tensor_type).cuda()\n return W, H\n\n elif init_method == 'NNDSVD':\n nv = nndsvd.Nndsvd()\n vin = np.mat(self._V.cpu().numpy())\n W, H = nv.initialize(vin, self._rank, options={'flag': 0})\n\n elif init_method == 'NNDSVDa':\n nv = nndsvd.Nndsvd()\n vin = np.mat(self._V.cpu().numpy())\n W, H = nv.initialize(vin, self._rank, options={'flag': 1})\n\n elif init_method == 'NNDSVDar':\n nv = nndsvd.Nndsvd()\n vin = np.mat(self._V.cpu().numpy())\n W, H = nv.initialize(vin, self._rank, options={'flag': 2})\n\n W = torch.from_numpy(W).type(self._tensor_type).cuda(self._gpu_id)\n H = torch.from_numpy(H).type(self._tensor_type).cuda(self._gpu_id)\n return W, H\n\n @property\n def reconstruction(self):\n return self.W @ self.H\n\n @property\n def W(self):\n return self._W\n\n @property\n def H(self):\n return self._H\n\n @property\n def _kl_loss(self):\n return (self._V * (self._V / self.reconstruction).log()).sum() - self._V.sum() + self.reconstruction.sum()\n\n @property\n def _loss_converged(self):\n \"\"\"\n Check if loss has converged\n \"\"\"\n if not self._iter:\n self._loss_init = self._kl_loss\n elif ((self._prev_loss - self._kl_loss) / self._loss_init) < self._tolerance:\n return True\n self._prev_loss = self._kl_loss\n return False\n\n def fit(self, beta=1):\n \"\"\"\n Fit the basis (W) and coefficient (H) matrices to the input matrix (V) using multiplicative updates and\n beta divergence\n Args:\n beta: value to use for generalised beta divergence. Default is 1 for KL divergence\n beta == 2 => Euclidean updates\n beta == 1 => Generalised Kullback-Leibler updates\n beta == 0 => Itakura-Saito updates\n \"\"\"\n with torch.no_grad():\n def stop_iterations():\n stop = (self._V.shape[0] == 1) and \\\n (self._iter % self._test_conv == 0) and \\\n self._loss_converged and \\\n (self._iter > self.min_iterations)\n if stop:\n print(\"loss converged with {} iterations\".format(self._iter))\n return stop\n\n if beta == 2:\n for self._iter in range(self.max_iterations):\n self.H = self.H * (self.W.transpose(1, 2) @ self._V) / (self.W.transpose(1, 2) @ (self.W @ self.H))\n self.W = self.W * (self._V @ self.H.transpose(1, 2)) / (self.W @ (self.H @ self.H.transpose(1, 2)))\n if stop_iterations():\n break\n\n # Optimisations for the (common) beta=1 (KL) case.\n elif beta == 1:\n ones = torch.ones(self._V.shape).type(self._tensor_type).cuda(self._gpu_id)\n for self._iter in range(self.max_iterations):\n ht = self.H.transpose(1, 2)\n numerator = (self._V / (self.W @ self.H)) @ ht\n\n denomenator = ones @ ht\n self._W *= numerator / denomenator\n\n wt = self.W.transpose(1, 2)\n numerator = wt @ (self._V / (self.W @ self.H))\n denomenator = wt @ ones\n self._H *= numerator / denomenator\n if stop_iterations():\n break\n\n else:\n for self._iter in range(self.max_iterations):\n self.H = self.H * ((self.W.transpose(1, 2) @ (((self.W @ self.H) ** (beta - 2)) * self._V)) /\n (self.W.transpose(1, 2) @ ((self.W @ self.H)**(beta-1))))\n self.W = self.W * (((([email protected])**(beta-2) * self._V) @ self.H.transpose(1, 2)) /\n (((self.W @ self.H) ** (beta - 1)) @ self.H.transpose(1, 2)))\n if stop_iterations():\n break\n" ]
[ [ "torch.ones", "torch.cuda.manual_seed", "torch.cuda.set_device", "torch.manual_seed", "torch.from_numpy", "torch.no_grad", "torch.rand", "torch.nn.Threshold" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neil-tan/utensor_cgen
[ "ffaf692bf6d1f8572039ad7e82e695f97b050cd2" ]
[ "tests/test_ir/test_NameAttrListConverter/conftest.py" ]
[ "import pytest\nfrom tensorflow.core.framework.attr_value_pb2 import AttrValue, NameAttrList\n\n\[email protected](scope='session')\ndef name_attr_list():\n attr = {\n 'float': AttrValue(f=3.14159),\n 'list': AttrValue(list=AttrValue.ListValue(b=[True, False, True]))\n }\n return NameAttrList(name='test_name_attr_list', attr=attr)\n" ]
[ [ "tensorflow.core.framework.attr_value_pb2.NameAttrList", "tensorflow.core.framework.attr_value_pb2.AttrValue.ListValue", "tensorflow.core.framework.attr_value_pb2.AttrValue" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pantelisantonoudiou/pandas
[ "328e5b61e4a6edbe232e7528d5070b701350c93a" ]
[ "pandas/core/series.py" ]
[ "\"\"\"\nData structure for 1-dimensional cross-sectional and time series data\n\"\"\"\nfrom __future__ import annotations\n\nfrom textwrap import dedent\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n Callable,\n Hashable,\n Iterable,\n Literal,\n Sequence,\n Union,\n cast,\n overload,\n)\nimport warnings\nimport weakref\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import (\n lib,\n properties,\n reshape,\n tslibs,\n)\nfrom pandas._libs.lib import no_default\nfrom pandas._typing import (\n AggFuncType,\n ArrayLike,\n Axis,\n Dtype,\n DtypeObj,\n FillnaOptions,\n IndexKeyFunc,\n SingleManager,\n StorageOptions,\n TimedeltaConvertibleTypes,\n TimestampConvertibleTypes,\n ValueKeyFunc,\n npt,\n)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import InvalidIndexError\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n deprecate_nonkeyword_arguments,\n doc,\n)\nfrom pandas.util._exceptions import find_stack_level\nfrom pandas.util._validators import (\n validate_ascending,\n validate_bool_kwarg,\n validate_percentile,\n)\n\nfrom pandas.core.dtypes.cast import (\n convert_dtypes,\n maybe_box_native,\n maybe_cast_pointwise_result,\n validate_numeric_casting,\n)\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_dict_like,\n is_integer,\n is_iterator,\n is_list_like,\n is_object_dtype,\n is_scalar,\n pandas_dtype,\n validate_all_hashable,\n)\nfrom pandas.core.dtypes.generic import ABCDataFrame\nfrom pandas.core.dtypes.inference import is_hashable\nfrom pandas.core.dtypes.missing import (\n isna,\n na_value_for_dtype,\n notna,\n remove_na_arraylike,\n)\n\nfrom pandas.core import (\n algorithms,\n base,\n generic,\n missing,\n nanops,\n ops,\n)\nfrom pandas.core.accessor import CachedAccessor\nfrom pandas.core.apply import SeriesApply\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.arrays.categorical import CategoricalAccessor\nfrom pandas.core.arrays.sparse import SparseAccessor\nimport pandas.core.common as com\nfrom pandas.core.construction import (\n create_series_with_explicit_dtype,\n extract_array,\n is_empty_data,\n sanitize_array,\n)\nfrom pandas.core.generic import NDFrame\nfrom pandas.core.indexers import (\n deprecate_ndim_indexing,\n unpack_1tuple,\n)\nfrom pandas.core.indexes.accessors import CombinedDatetimelikeProperties\nfrom pandas.core.indexes.api import (\n CategoricalIndex,\n DatetimeIndex,\n Float64Index,\n Index,\n MultiIndex,\n PeriodIndex,\n TimedeltaIndex,\n default_index,\n ensure_index,\n)\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexing import check_bool_indexer\nfrom pandas.core.internals import (\n SingleArrayManager,\n SingleBlockManager,\n)\nfrom pandas.core.shared_docs import _shared_docs\nfrom pandas.core.sorting import (\n ensure_key_mapped,\n nargsort,\n)\nfrom pandas.core.strings import StringMethods\nfrom pandas.core.tools.datetimes import to_datetime\n\nimport pandas.io.formats.format as fmt\nfrom pandas.io.formats.info import (\n INFO_DOCSTRING,\n SeriesInfo,\n series_sub_kwargs,\n)\nimport pandas.plotting\n\nif TYPE_CHECKING:\n\n from pandas._typing import (\n NumpySorter,\n NumpyValueArrayLike,\n )\n\n from pandas.core.frame import DataFrame\n from pandas.core.groupby.generic import SeriesGroupBy\n from pandas.core.resample import Resampler\n\n__all__ = [\"Series\"]\n\n_shared_doc_kwargs = {\n \"axes\": \"index\",\n \"klass\": \"Series\",\n \"axes_single_arg\": \"{0 or 'index'}\",\n \"axis\": \"\"\"axis : {0 or 'index'}\n Parameter needed for compatibility with DataFrame.\"\"\",\n \"inplace\": \"\"\"inplace : bool, default False\n If True, performs operation inplace and returns None.\"\"\",\n \"unique\": \"np.ndarray\",\n \"duplicated\": \"Series\",\n \"optional_by\": \"\",\n \"optional_mapper\": \"\",\n \"optional_labels\": \"\",\n \"optional_axis\": \"\",\n \"replace_iloc\": \"\"\"\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\"\"\",\n}\n\n\ndef _coerce_method(converter):\n \"\"\"\n Install the scalar coercion methods.\n \"\"\"\n\n def wrapper(self):\n if len(self) == 1:\n return converter(self.iloc[0])\n raise TypeError(f\"cannot convert the series to {converter}\")\n\n wrapper.__name__ = f\"__{converter.__name__}__\"\n return wrapper\n\n\n# ----------------------------------------------------------------------\n# Series class\n\n\nclass Series(base.IndexOpsMixin, generic.NDFrame):\n \"\"\"\n One-dimensional ndarray with axis labels (including time series).\n\n Labels need not be unique but must be a hashable type. The object\n supports both integer- and label-based indexing and provides a host of\n methods for performing operations involving the index. Statistical\n methods from ndarray have been overridden to automatically exclude\n missing data (currently represented as NaN).\n\n Operations between Series (+, -, /, \\\\*, \\\\*\\\\*) align values based on their\n associated index values-- they need not be the same length. The result\n index will be the sorted union of the two indexes.\n\n Parameters\n ----------\n data : array-like, Iterable, dict, or scalar value\n Contains data stored in Series. If data is a dict, argument order is\n maintained.\n index : array-like or Index (1d)\n Values must be hashable and have the same length as `data`.\n Non-unique index values are allowed. Will default to\n RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like\n and index is None, then the keys in the data are used as the index. If the\n index is not None, the resulting Series is reindexed with the index values.\n dtype : str, numpy.dtype, or ExtensionDtype, optional\n Data type for the output Series. If not specified, this will be\n inferred from `data`.\n See the :ref:`user guide <basics.dtypes>` for more usages.\n name : str, optional\n The name to give to the Series.\n copy : bool, default False\n Copy input data. Only affects Series or 1d ndarray input. See examples.\n\n Examples\n --------\n Constructing Series from a dictionary with an Index specified\n\n >>> d = {'a': 1, 'b': 2, 'c': 3}\n >>> ser = pd.Series(data=d, index=['a', 'b', 'c'])\n >>> ser\n a 1\n b 2\n c 3\n dtype: int64\n\n The keys of the dictionary match with the Index values, hence the Index\n values have no effect.\n\n >>> d = {'a': 1, 'b': 2, 'c': 3}\n >>> ser = pd.Series(data=d, index=['x', 'y', 'z'])\n >>> ser\n x NaN\n y NaN\n z NaN\n dtype: float64\n\n Note that the Index is first build with the keys from the dictionary.\n After this the Series is reindexed with the given Index values, hence we\n get all NaN as a result.\n\n Constructing Series from a list with `copy=False`.\n\n >>> r = [1, 2]\n >>> ser = pd.Series(r, copy=False)\n >>> ser.iloc[0] = 999\n >>> r\n [1, 2]\n >>> ser\n 0 999\n 1 2\n dtype: int64\n\n Due to input data type the Series has a `copy` of\n the original data even though `copy=False`, so\n the data is unchanged.\n\n Constructing Series from a 1d ndarray with `copy=False`.\n\n >>> r = np.array([1, 2])\n >>> ser = pd.Series(r, copy=False)\n >>> ser.iloc[0] = 999\n >>> r\n array([999, 2])\n >>> ser\n 0 999\n 1 2\n dtype: int64\n\n Due to input data type the Series has a `view` on\n the original data, so\n the data is changed as well.\n \"\"\"\n\n _typ = \"series\"\n _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)\n\n _name: Hashable\n _metadata: list[str] = [\"name\"]\n _internal_names_set = {\"index\"} | generic.NDFrame._internal_names_set\n _accessors = {\"dt\", \"cat\", \"str\", \"sparse\"}\n _hidden_attrs = (\n base.IndexOpsMixin._hidden_attrs\n | generic.NDFrame._hidden_attrs\n | frozenset([\"compress\", \"ptp\"])\n )\n\n # Override cache_readonly bc Series is mutable\n # error: Incompatible types in assignment (expression has type \"property\",\n # base class \"IndexOpsMixin\" defined the type as \"Callable[[IndexOpsMixin], bool]\")\n hasnans = property( # type: ignore[assignment]\n # error: \"Callable[[IndexOpsMixin], bool]\" has no attribute \"fget\"\n base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined]\n doc=base.IndexOpsMixin.hasnans.__doc__,\n )\n _mgr: SingleManager\n div: Callable[[Series, Any], Series]\n rdiv: Callable[[Series, Any], Series]\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n data=None,\n index=None,\n dtype: Dtype | None = None,\n name=None,\n copy: bool = False,\n fastpath: bool = False,\n ):\n\n if (\n isinstance(data, (SingleBlockManager, SingleArrayManager))\n and index is None\n and dtype is None\n and copy is False\n ):\n # GH#33357 called with just the SingleBlockManager\n NDFrame.__init__(self, data)\n if fastpath:\n # e.g. from _box_col_values, skip validation of name\n object.__setattr__(self, \"_name\", name)\n else:\n self.name = name\n return\n\n # we are called internally, so short-circuit\n if fastpath:\n\n # data is an ndarray, index is defined\n if not isinstance(data, (SingleBlockManager, SingleArrayManager)):\n manager = get_option(\"mode.data_manager\")\n if manager == \"block\":\n data = SingleBlockManager.from_array(data, index)\n elif manager == \"array\":\n data = SingleArrayManager.from_array(data, index)\n if copy:\n data = data.copy()\n if index is None:\n index = data.index\n\n else:\n\n name = ibase.maybe_extract_name(name, data, type(self))\n\n if is_empty_data(data) and dtype is None:\n # gh-17261\n warnings.warn(\n \"The default dtype for empty Series will be 'object' instead \"\n \"of 'float64' in a future version. Specify a dtype explicitly \"\n \"to silence this warning.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n # uncomment the line below when removing the FutureWarning\n # dtype = np.dtype(object)\n\n if index is not None:\n index = ensure_index(index)\n\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n if isinstance(data, MultiIndex):\n raise NotImplementedError(\n \"initializing a Series from a MultiIndex is not supported\"\n )\n elif isinstance(data, Index):\n\n if dtype is not None:\n # astype copies\n data = data.astype(dtype)\n else:\n # GH#24096 we need to ensure the index remains immutable\n data = data._values.copy()\n copy = False\n\n elif isinstance(data, np.ndarray):\n if len(data.dtype):\n # GH#13296 we are dealing with a compound dtype, which\n # should be treated as 2D\n raise ValueError(\n \"Cannot construct a Series from an ndarray with \"\n \"compound dtype. Use DataFrame instead.\"\n )\n elif isinstance(data, Series):\n if index is None:\n index = data.index\n else:\n data = data.reindex(index, copy=copy)\n copy = False\n data = data._mgr\n elif is_dict_like(data):\n data, index = self._init_dict(data, index, dtype)\n dtype = None\n copy = False\n elif isinstance(data, (SingleBlockManager, SingleArrayManager)):\n if index is None:\n index = data.index\n elif not data.index.equals(index) or copy:\n # GH#19275 SingleBlockManager input should only be called\n # internally\n raise AssertionError(\n \"Cannot pass both SingleBlockManager \"\n \"`data` argument and a different \"\n \"`index` argument. `copy` must be False.\"\n )\n\n elif isinstance(data, ExtensionArray):\n pass\n else:\n data = com.maybe_iterable_to_list(data)\n\n if index is None:\n if not is_list_like(data):\n data = [data]\n index = default_index(len(data))\n elif is_list_like(data):\n com.require_length_match(data, index)\n\n # create/copy the manager\n if isinstance(data, (SingleBlockManager, SingleArrayManager)):\n if dtype is not None:\n data = data.astype(dtype=dtype, errors=\"ignore\", copy=copy)\n elif copy:\n data = data.copy()\n else:\n data = sanitize_array(data, index, dtype, copy)\n\n manager = get_option(\"mode.data_manager\")\n if manager == \"block\":\n data = SingleBlockManager.from_array(data, index)\n elif manager == \"array\":\n data = SingleArrayManager.from_array(data, index)\n\n generic.NDFrame.__init__(self, data)\n self.name = name\n self._set_axis(0, index, fastpath=True)\n\n def _init_dict(\n self, data, index: Index | None = None, dtype: DtypeObj | None = None\n ):\n \"\"\"\n Derive the \"_mgr\" and \"index\" attributes of a new Series from a\n dictionary input.\n\n Parameters\n ----------\n data : dict or dict-like\n Data used to populate the new Series.\n index : Index or None, default None\n Index for the new Series: if None, use dict keys.\n dtype : np.dtype, ExtensionDtype, or None, default None\n The dtype for the new Series: if None, infer from data.\n\n Returns\n -------\n _data : BlockManager for the new Series\n index : index for the new Series\n \"\"\"\n keys: Index | tuple\n\n # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]\n # raises KeyError), so we iterate the entire dict, and align\n if data:\n # GH:34717, issue was using zip to extract key and values from data.\n # using generators in effects the performance.\n # Below is the new way of extracting the keys and values\n\n keys = tuple(data.keys())\n values = list(data.values()) # Generating list of values- faster way\n elif index is not None:\n # fastpath for Series(data=None). Just use broadcasting a scalar\n # instead of reindexing.\n values = na_value_for_dtype(pandas_dtype(dtype), compat=False)\n keys = index\n else:\n keys, values = (), []\n\n # Input is now list-like, so rely on \"standard\" construction:\n\n # TODO: passing np.float64 to not break anything yet. See GH-17261\n s = create_series_with_explicit_dtype(\n # error: Argument \"index\" to \"create_series_with_explicit_dtype\" has\n # incompatible type \"Tuple[Any, ...]\"; expected \"Union[ExtensionArray,\n # ndarray, Index, None]\"\n values,\n index=keys, # type: ignore[arg-type]\n dtype=dtype,\n dtype_if_empty=np.float64,\n )\n\n # Now we just make sure the order is respected, if any\n if data and index is not None:\n s = s.reindex(index, copy=False)\n return s._mgr, s.index\n\n # ----------------------------------------------------------------------\n\n @property\n def _constructor(self) -> type[Series]:\n return Series\n\n @property\n def _constructor_expanddim(self) -> type[DataFrame]:\n \"\"\"\n Used when a manipulation result has one higher dimension as the\n original, such as Series.to_frame()\n \"\"\"\n from pandas.core.frame import DataFrame\n\n return DataFrame\n\n # types\n @property\n def _can_hold_na(self) -> bool:\n return self._mgr._can_hold_na\n\n def _set_axis(self, axis: int, labels, fastpath: bool = False) -> None:\n \"\"\"\n Override generic, we want to set the _typ here.\n\n This is called from the cython code when we set the `index` attribute\n directly, e.g. `series.index = [1, 2, 3]`.\n \"\"\"\n if not fastpath:\n labels = ensure_index(labels)\n\n if labels._is_all_dates:\n deep_labels = labels\n if isinstance(labels, CategoricalIndex):\n deep_labels = labels.categories\n\n if not isinstance(\n deep_labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)\n ):\n try:\n labels = DatetimeIndex(labels)\n # need to set here because we changed the index\n if fastpath:\n self._mgr.set_axis(axis, labels)\n except (tslibs.OutOfBoundsDatetime, ValueError):\n # labels may exceeds datetime bounds,\n # or not be a DatetimeIndex\n pass\n\n if not fastpath:\n # The ensure_index call above ensures we have an Index object\n self._mgr.set_axis(axis, labels)\n\n # ndarray compatibility\n @property\n def dtype(self) -> DtypeObj:\n \"\"\"\n Return the dtype object of the underlying data.\n \"\"\"\n return self._mgr.dtype\n\n @property\n def dtypes(self) -> DtypeObj:\n \"\"\"\n Return the dtype object of the underlying data.\n \"\"\"\n # DataFrame compatibility\n return self.dtype\n\n @property\n def name(self) -> Hashable:\n \"\"\"\n Return the name of the Series.\n\n The name of a Series becomes its index or column name if it is used\n to form a DataFrame. It is also used whenever displaying the Series\n using the interpreter.\n\n Returns\n -------\n label (hashable object)\n The name of the Series, also the column name if part of a DataFrame.\n\n See Also\n --------\n Series.rename : Sets the Series name when given a scalar input.\n Index.name : Corresponding Index property.\n\n Examples\n --------\n The Series name can be set initially when calling the constructor.\n\n >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers')\n >>> s\n 0 1\n 1 2\n 2 3\n Name: Numbers, dtype: int64\n >>> s.name = \"Integers\"\n >>> s\n 0 1\n 1 2\n 2 3\n Name: Integers, dtype: int64\n\n The name of a Series within a DataFrame is its column name.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]],\n ... columns=[\"Odd Numbers\", \"Even Numbers\"])\n >>> df\n Odd Numbers Even Numbers\n 0 1 2\n 1 3 4\n 2 5 6\n >>> df[\"Even Numbers\"].name\n 'Even Numbers'\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, value: Hashable) -> None:\n validate_all_hashable(value, error_name=f\"{type(self).__name__}.name\")\n object.__setattr__(self, \"_name\", value)\n\n @property\n def values(self):\n \"\"\"\n Return Series as ndarray or ndarray-like depending on the dtype.\n\n .. warning::\n\n We recommend using :attr:`Series.array` or\n :meth:`Series.to_numpy`, depending on whether you need\n a reference to the underlying data or a NumPy array.\n\n Returns\n -------\n numpy.ndarray or ndarray-like\n\n See Also\n --------\n Series.array : Reference to the underlying data.\n Series.to_numpy : A NumPy array representing the underlying data.\n\n Examples\n --------\n >>> pd.Series([1, 2, 3]).values\n array([1, 2, 3])\n\n >>> pd.Series(list('aabc')).values\n array(['a', 'a', 'b', 'c'], dtype=object)\n\n >>> pd.Series(list('aabc')).astype('category').values\n ['a', 'a', 'b', 'c']\n Categories (3, object): ['a', 'b', 'c']\n\n Timezone aware datetime data is converted to UTC:\n\n >>> pd.Series(pd.date_range('20130101', periods=3,\n ... tz='US/Eastern')).values\n array(['2013-01-01T05:00:00.000000000',\n '2013-01-02T05:00:00.000000000',\n '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')\n \"\"\"\n return self._mgr.external_values()\n\n @property\n def _values(self):\n \"\"\"\n Return the internal repr of this data (defined by Block.interval_values).\n This are the values as stored in the Block (ndarray or ExtensionArray\n depending on the Block class), with datetime64[ns] and timedelta64[ns]\n wrapped in ExtensionArrays to match Index._values behavior.\n\n Differs from the public ``.values`` for certain data types, because of\n historical backwards compatibility of the public attribute (e.g. period\n returns object ndarray and datetimetz a datetime64[ns] ndarray for\n ``.values`` while it returns an ExtensionArray for ``._values`` in those\n cases).\n\n Differs from ``.array`` in that this still returns the numpy array if\n the Block is backed by a numpy array (except for datetime64 and\n timedelta64 dtypes), while ``.array`` ensures to always return an\n ExtensionArray.\n\n Overview:\n\n dtype | values | _values | array |\n ----------- | ------------- | ------------- | ------------- |\n Numeric | ndarray | ndarray | PandasArray |\n Category | Categorical | Categorical | Categorical |\n dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray |\n dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray |\n td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] |\n Period | ndarray[obj] | PeriodArray | PeriodArray |\n Nullable | EA | EA | EA |\n\n \"\"\"\n return self._mgr.internal_values()\n\n # error: Decorated property not supported\n @Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc]\n @property\n def array(self) -> ExtensionArray:\n return self._mgr.array_values()\n\n # ops\n def ravel(self, order=\"C\"):\n \"\"\"\n Return the flattened underlying data as an ndarray.\n\n Returns\n -------\n numpy.ndarray or ndarray-like\n Flattened data of the Series.\n\n See Also\n --------\n numpy.ndarray.ravel : Return a flattened array.\n \"\"\"\n return self._values.ravel(order=order)\n\n def __len__(self) -> int:\n \"\"\"\n Return the length of the Series.\n \"\"\"\n return len(self._mgr)\n\n def view(self, dtype: Dtype | None = None) -> Series:\n \"\"\"\n Create a new view of the Series.\n\n This function will return a new Series with a view of the same\n underlying values in memory, optionally reinterpreted with a new data\n type. The new data type must preserve the same size in bytes as to not\n cause index misalignment.\n\n Parameters\n ----------\n dtype : data type\n Data type object or one of their string representations.\n\n Returns\n -------\n Series\n A new Series object as a view of the same data in memory.\n\n See Also\n --------\n numpy.ndarray.view : Equivalent numpy function to create a new view of\n the same data in memory.\n\n Notes\n -----\n Series are instantiated with ``dtype=float64`` by default. While\n ``numpy.ndarray.view()`` will return a view with the same data type as\n the original array, ``Series.view()`` (without specified dtype)\n will try using ``float64`` and may fail if the original data type size\n in bytes is not the same.\n\n Examples\n --------\n >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')\n >>> s\n 0 -2\n 1 -1\n 2 0\n 3 1\n 4 2\n dtype: int8\n\n The 8 bit signed integer representation of `-1` is `0b11111111`, but\n the same bytes represent 255 if read as an 8 bit unsigned integer:\n\n >>> us = s.view('uint8')\n >>> us\n 0 254\n 1 255\n 2 0\n 3 1\n 4 2\n dtype: uint8\n\n The views share the same underlying values:\n\n >>> us[0] = 128\n >>> s\n 0 -128\n 1 -1\n 2 0\n 3 1\n 4 2\n dtype: int8\n \"\"\"\n # self.array instead of self._values so we piggyback on PandasArray\n # implementation\n res_values = self.array.view(dtype)\n res_ser = self._constructor(res_values, index=self.index)\n return res_ser.__finalize__(self, method=\"view\")\n\n # ----------------------------------------------------------------------\n # NDArray Compat\n _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)\n\n def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:\n \"\"\"\n Return the values as a NumPy array.\n\n Users should not call this directly. Rather, it is invoked by\n :func:`numpy.array` and :func:`numpy.asarray`.\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to use for the resulting NumPy array. By default,\n the dtype is inferred from the data.\n\n Returns\n -------\n numpy.ndarray\n The values in the series converted to a :class:`numpy.ndarray`\n with the specified `dtype`.\n\n See Also\n --------\n array : Create a new array from data.\n Series.array : Zero-copy view to the array backing the Series.\n Series.to_numpy : Series method for similar behavior.\n\n Examples\n --------\n >>> ser = pd.Series([1, 2, 3])\n >>> np.asarray(ser)\n array([1, 2, 3])\n\n For timezone-aware data, the timezones may be retained with\n ``dtype='object'``\n\n >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz=\"CET\"))\n >>> np.asarray(tzser, dtype=\"object\")\n array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),\n Timestamp('2000-01-02 00:00:00+0100', tz='CET')],\n dtype=object)\n\n Or the values may be localized to UTC and the tzinfo discarded with\n ``dtype='datetime64[ns]'``\n\n >>> np.asarray(tzser, dtype=\"datetime64[ns]\") # doctest: +ELLIPSIS\n array(['1999-12-31T23:00:00.000000000', ...],\n dtype='datetime64[ns]')\n \"\"\"\n return np.asarray(self._values, dtype)\n\n # ----------------------------------------------------------------------\n # Unary Methods\n\n # coercion\n __float__ = _coerce_method(float)\n __long__ = _coerce_method(int)\n __int__ = _coerce_method(int)\n\n # ----------------------------------------------------------------------\n\n # indexers\n @property\n def axes(self) -> list[Index]:\n \"\"\"\n Return a list of the row axis labels.\n \"\"\"\n return [self.index]\n\n # ----------------------------------------------------------------------\n # Indexing Methods\n\n @Appender(generic.NDFrame.take.__doc__)\n def take(self, indices, axis=0, is_copy=None, **kwargs) -> Series:\n if is_copy is not None:\n warnings.warn(\n \"is_copy is deprecated and will be removed in a future version. \"\n \"'take' always returns a copy, so there is no need to specify this.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n nv.validate_take((), kwargs)\n\n indices = ensure_platform_int(indices)\n new_index = self.index.take(indices)\n new_values = self._values.take(indices)\n\n result = self._constructor(new_values, index=new_index, fastpath=True)\n return result.__finalize__(self, method=\"take\")\n\n def _take_with_is_copy(self, indices, axis=0) -> Series:\n \"\"\"\n Internal version of the `take` method that sets the `_is_copy`\n attribute to keep track of the parent dataframe (using in indexing\n for the SettingWithCopyWarning). For Series this does the same\n as the public take (it never sets `_is_copy`).\n\n See the docstring of `take` for full explanation of the parameters.\n \"\"\"\n return self.take(indices=indices, axis=axis)\n\n def _ixs(self, i: int, axis: int = 0):\n \"\"\"\n Return the i-th value or values in the Series by location.\n\n Parameters\n ----------\n i : int\n\n Returns\n -------\n scalar (int) or Series (slice, sequence)\n \"\"\"\n return self._values[i]\n\n def _slice(self, slobj: slice, axis: int = 0) -> Series:\n # axis kwarg is retained for compat with NDFrame method\n # _slice is *always* positional\n return self._get_values(slobj)\n\n def __getitem__(self, key):\n key = com.apply_if_callable(key, self)\n\n if key is Ellipsis:\n return self\n\n key_is_scalar = is_scalar(key)\n if isinstance(key, (list, tuple)):\n key = unpack_1tuple(key)\n\n if is_integer(key) and self.index._should_fallback_to_positional:\n return self._values[key]\n\n elif key_is_scalar:\n return self._get_value(key)\n\n if is_hashable(key):\n # Otherwise index.get_value will raise InvalidIndexError\n try:\n # For labels that don't resolve as scalars like tuples and frozensets\n result = self._get_value(key)\n\n return result\n\n except (KeyError, TypeError):\n if isinstance(key, tuple) and isinstance(self.index, MultiIndex):\n # We still have the corner case where a tuple is a key\n # in the first level of our MultiIndex\n return self._get_values_tuple(key)\n\n if is_iterator(key):\n key = list(key)\n\n if com.is_bool_indexer(key):\n key = check_bool_indexer(self.index, key)\n key = np.asarray(key, dtype=bool)\n return self._get_values(key)\n\n return self._get_with(key)\n\n def _get_with(self, key):\n # other: fancy integer or otherwise\n if isinstance(key, slice):\n # _convert_slice_indexer to determine if this slice is positional\n # or label based, and if the latter, convert to positional\n slobj = self.index._convert_slice_indexer(key, kind=\"getitem\")\n return self._slice(slobj)\n elif isinstance(key, ABCDataFrame):\n raise TypeError(\n \"Indexing a Series with DataFrame is not \"\n \"supported, use the appropriate DataFrame column\"\n )\n elif isinstance(key, tuple):\n return self._get_values_tuple(key)\n\n elif not is_list_like(key):\n # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684\n return self.loc[key]\n\n if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)):\n key = list(key)\n\n if isinstance(key, Index):\n key_type = key.inferred_type\n else:\n key_type = lib.infer_dtype(key, skipna=False)\n\n # Note: The key_type == \"boolean\" case should be caught by the\n # com.is_bool_indexer check in __getitem__\n if key_type == \"integer\":\n # We need to decide whether to treat this as a positional indexer\n # (i.e. self.iloc) or label-based (i.e. self.loc)\n if not self.index._should_fallback_to_positional:\n return self.loc[key]\n else:\n return self.iloc[key]\n\n # handle the dup indexing case GH#4246\n return self.loc[key]\n\n def _get_values_tuple(self, key):\n # mpl hackaround\n if com.any_none(*key):\n result = self._get_values(key)\n deprecate_ndim_indexing(result, stacklevel=find_stack_level())\n return result\n\n if not isinstance(self.index, MultiIndex):\n raise KeyError(\"key of type tuple not found and not a MultiIndex\")\n\n # If key is contained, would have returned by now\n indexer, new_index = self.index.get_loc_level(key)\n return self._constructor(self._values[indexer], index=new_index).__finalize__(\n self\n )\n\n def _get_values(self, indexer):\n try:\n new_mgr = self._mgr.getitem_mgr(indexer)\n return self._constructor(new_mgr).__finalize__(self)\n except ValueError:\n # mpl compat if we look up e.g. ser[:, np.newaxis];\n # see tests.series.timeseries.test_mpl_compat_hack\n # the asarray is needed to avoid returning a 2D DatetimeArray\n return np.asarray(self._values[indexer])\n\n def _get_value(self, label, takeable: bool = False):\n \"\"\"\n Quickly retrieve single value at passed index label.\n\n Parameters\n ----------\n label : object\n takeable : interpret the index as indexers, default False\n\n Returns\n -------\n scalar value\n \"\"\"\n if takeable:\n return self._values[label]\n\n # Similar to Index.get_value, but we do not fall back to positional\n loc = self.index.get_loc(label)\n return self.index._get_values_for_loc(self, loc, label)\n\n def __setitem__(self, key, value) -> None:\n key = com.apply_if_callable(key, self)\n cacher_needs_updating = self._check_is_chained_assignment_possible()\n\n if key is Ellipsis:\n key = slice(None)\n\n if isinstance(key, slice):\n indexer = self.index._convert_slice_indexer(key, kind=\"getitem\")\n return self._set_values(indexer, value)\n\n try:\n self._set_with_engine(key, value)\n except (KeyError, ValueError):\n if is_integer(key) and self.index.inferred_type != \"integer\":\n # positional setter\n if not self.index._should_fallback_to_positional:\n # GH#33469\n warnings.warn(\n \"Treating integers as positional in Series.__setitem__ \"\n \"with a Float64Index is deprecated. In a future version, \"\n \"`series[an_int] = val` will insert a new key into the \"\n \"Series. Use `series.iloc[an_int] = val` to treat the \"\n \"key as positional.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n # this is equivalent to self._values[key] = value\n self._mgr.setitem_inplace(key, value)\n else:\n # GH#12862 adding a new key to the Series\n self.loc[key] = value\n\n except (InvalidIndexError, TypeError) as err:\n if isinstance(key, tuple) and not isinstance(self.index, MultiIndex):\n # cases with MultiIndex don't get here bc they raise KeyError\n raise KeyError(\n \"key of type tuple not found and not a MultiIndex\"\n ) from err\n\n if com.is_bool_indexer(key):\n key = check_bool_indexer(self.index, key)\n key = np.asarray(key, dtype=bool)\n\n if (\n is_list_like(value)\n and len(value) != len(self)\n and not isinstance(value, Series)\n and not is_object_dtype(self.dtype)\n ):\n # Series will be reindexed to have matching length inside\n # _where call below\n # GH#44265\n indexer = key.nonzero()[0]\n self._set_values(indexer, value)\n return\n\n # otherwise with listlike other we interpret series[mask] = other\n # as series[mask] = other[mask]\n try:\n self._where(~key, value, inplace=True)\n except InvalidIndexError:\n # test_where_dups\n self.iloc[key] = value\n return\n\n else:\n self._set_with(key, value)\n\n if cacher_needs_updating:\n self._maybe_update_cacher()\n\n def _set_with_engine(self, key, value) -> None:\n loc = self.index.get_loc(key)\n # error: Argument 1 to \"validate_numeric_casting\" has incompatible type\n # \"Union[dtype, ExtensionDtype]\"; expected \"dtype\"\n validate_numeric_casting(self.dtype, value) # type: ignore[arg-type]\n # this is equivalent to self._values[key] = value\n self._mgr.setitem_inplace(loc, value)\n\n def _set_with(self, key, value):\n # other: fancy integer or otherwise\n assert not isinstance(key, tuple)\n\n if is_scalar(key):\n key = [key]\n elif is_iterator(key):\n # Without this, the call to infer_dtype will consume the generator\n key = list(key)\n\n key_type = lib.infer_dtype(key, skipna=False)\n\n # Note: key_type == \"boolean\" should not occur because that\n # should be caught by the is_bool_indexer check in __setitem__\n if key_type == \"integer\":\n if not self.index._should_fallback_to_positional:\n self._set_labels(key, value)\n else:\n self._set_values(key, value)\n else:\n self.loc[key] = value\n\n def _set_labels(self, key, value) -> None:\n key = com.asarray_tuplesafe(key)\n indexer: np.ndarray = self.index.get_indexer(key)\n mask = indexer == -1\n if mask.any():\n raise KeyError(f\"{key[mask]} not in index\")\n self._set_values(indexer, value)\n\n def _set_values(self, key, value) -> None:\n if isinstance(key, (Index, Series)):\n key = key._values\n\n self._mgr = self._mgr.setitem(indexer=key, value=value)\n self._maybe_update_cacher()\n\n def _set_value(self, label, value, takeable: bool = False):\n \"\"\"\n Quickly set single value at passed label.\n\n If label is not contained, a new object is created with the label\n placed at the end of the result index.\n\n Parameters\n ----------\n label : object\n Partial indexing with MultiIndex not allowed.\n value : object\n Scalar value.\n takeable : interpret the index as indexers, default False\n \"\"\"\n if not takeable:\n try:\n loc = self.index.get_loc(label)\n except KeyError:\n # set using a non-recursive method\n self.loc[label] = value\n return\n else:\n loc = label\n\n self._set_values(loc, value)\n\n # ----------------------------------------------------------------------\n # Lookup Caching\n\n @property\n def _is_cached(self) -> bool:\n \"\"\"Return boolean indicating if self is cached or not.\"\"\"\n return getattr(self, \"_cacher\", None) is not None\n\n def _get_cacher(self):\n \"\"\"return my cacher or None\"\"\"\n cacher = getattr(self, \"_cacher\", None)\n if cacher is not None:\n cacher = cacher[1]()\n return cacher\n\n def _reset_cacher(self) -> None:\n \"\"\"\n Reset the cacher.\n \"\"\"\n if hasattr(self, \"_cacher\"):\n # should only get here with self.ndim == 1\n del self._cacher\n\n def _set_as_cached(self, item, cacher) -> None:\n \"\"\"\n Set the _cacher attribute on the calling object with a weakref to\n cacher.\n \"\"\"\n self._cacher = (item, weakref.ref(cacher))\n\n def _clear_item_cache(self) -> None:\n # no-op for Series\n pass\n\n def _check_is_chained_assignment_possible(self) -> bool:\n \"\"\"\n See NDFrame._check_is_chained_assignment_possible.__doc__\n \"\"\"\n if self._is_view and self._is_cached:\n ref = self._get_cacher()\n if ref is not None and ref._is_mixed_type:\n self._check_setitem_copy(t=\"referent\", force=True)\n return True\n return super()._check_is_chained_assignment_possible()\n\n def _maybe_update_cacher(\n self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False\n ) -> None:\n \"\"\"\n See NDFrame._maybe_update_cacher.__doc__\n \"\"\"\n cacher = getattr(self, \"_cacher\", None)\n if cacher is not None:\n assert self.ndim == 1\n ref: DataFrame = cacher[1]()\n\n # we are trying to reference a dead referent, hence\n # a copy\n if ref is None:\n del self._cacher\n elif len(self) == len(ref) and self.name in ref.columns:\n # GH#42530 self.name must be in ref.columns\n # to ensure column still in dataframe\n # otherwise, either self or ref has swapped in new arrays\n ref._maybe_cache_changed(cacher[0], self, inplace=inplace)\n else:\n # GH#33675 we have swapped in a new array, so parent\n # reference to self is now invalid\n ref._item_cache.pop(cacher[0], None)\n\n super()._maybe_update_cacher(\n clear=clear, verify_is_copy=verify_is_copy, inplace=inplace\n )\n\n # ----------------------------------------------------------------------\n # Unsorted\n\n @property\n def _is_mixed_type(self):\n return False\n\n def repeat(self, repeats, axis=None) -> Series:\n \"\"\"\n Repeat elements of a Series.\n\n Returns a new Series where each element of the current Series\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n Series.\n axis : None\n Must be ``None``. Has no effect but is accepted for compatibility\n with numpy.\n\n Returns\n -------\n Series\n Newly created Series with repeated elements.\n\n See Also\n --------\n Index.repeat : Equivalent function for Index.\n numpy.repeat : Similar method for :class:`numpy.ndarray`.\n\n Examples\n --------\n >>> s = pd.Series(['a', 'b', 'c'])\n >>> s\n 0 a\n 1 b\n 2 c\n dtype: object\n >>> s.repeat(2)\n 0 a\n 0 a\n 1 b\n 1 b\n 2 c\n 2 c\n dtype: object\n >>> s.repeat([1, 2, 3])\n 0 a\n 1 b\n 1 b\n 2 c\n 2 c\n 2 c\n dtype: object\n \"\"\"\n nv.validate_repeat((), {\"axis\": axis})\n new_index = self.index.repeat(repeats)\n new_values = self._values.repeat(repeats)\n return self._constructor(new_values, index=new_index).__finalize__(\n self, method=\"repeat\"\n )\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"level\"])\n def reset_index(self, level=None, drop=False, name=lib.no_default, inplace=False):\n \"\"\"\n Generate a new DataFrame or Series with the index reset.\n\n This is useful when the index needs to be treated as a column, or\n when the index is meaningless and needs to be reset to the default\n before another operation.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default optional\n For a Series with a MultiIndex, only remove the specified levels\n from the index. Removes all levels by default.\n drop : bool, default False\n Just reset the index, without inserting it as a column in\n the new DataFrame.\n name : object, optional\n The name to use for the column containing the original Series\n values. Uses ``self.name`` by default. This argument is ignored\n when `drop` is True.\n inplace : bool, default False\n Modify the Series in place (do not create a new object).\n\n Returns\n -------\n Series or DataFrame or None\n When `drop` is False (the default), a DataFrame is returned.\n The newly created columns will come first in the DataFrame,\n followed by the original Series values.\n When `drop` is True, a `Series` is returned.\n In either case, if ``inplace=True``, no value is returned.\n\n See Also\n --------\n DataFrame.reset_index: Analogous function for DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4], name='foo',\n ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))\n\n Generate a DataFrame with default index.\n\n >>> s.reset_index()\n idx foo\n 0 a 1\n 1 b 2\n 2 c 3\n 3 d 4\n\n To specify the name of the new column use `name`.\n\n >>> s.reset_index(name='values')\n idx values\n 0 a 1\n 1 b 2\n 2 c 3\n 3 d 4\n\n To generate a new Series with the default set `drop` to True.\n\n >>> s.reset_index(drop=True)\n 0 1\n 1 2\n 2 3\n 3 4\n Name: foo, dtype: int64\n\n To update the Series in place, without generating a new one\n set `inplace` to True. Note that it also requires ``drop=True``.\n\n >>> s.reset_index(inplace=True, drop=True)\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n Name: foo, dtype: int64\n\n The `level` parameter is interesting for Series with a multi-level\n index.\n\n >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),\n ... np.array(['one', 'two', 'one', 'two'])]\n >>> s2 = pd.Series(\n ... range(4), name='foo',\n ... index=pd.MultiIndex.from_arrays(arrays,\n ... names=['a', 'b']))\n\n To remove a specific level from the Index, use `level`.\n\n >>> s2.reset_index(level='a')\n a foo\n b\n one bar 0\n two bar 1\n one baz 2\n two baz 3\n\n If `level` is not set, all levels are removed from the Index.\n\n >>> s2.reset_index()\n a b foo\n 0 bar one 0\n 1 bar two 1\n 2 baz one 2\n 3 baz two 3\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if drop:\n new_index = default_index(len(self))\n if level is not None:\n if not isinstance(level, (tuple, list)):\n level = [level]\n level = [self.index._get_level_number(lev) for lev in level]\n if len(level) < self.index.nlevels:\n new_index = self.index.droplevel(level)\n\n if inplace:\n self.index = new_index\n else:\n return self._constructor(\n self._values.copy(), index=new_index\n ).__finalize__(self, method=\"reset_index\")\n elif inplace:\n raise TypeError(\n \"Cannot reset_index inplace on a Series to create a DataFrame\"\n )\n else:\n if name is lib.no_default:\n # For backwards compatibility, keep columns as [0] instead of\n # [None] when self.name is None\n if self.name is None:\n name = 0\n else:\n name = self.name\n\n df = self.to_frame(name)\n return df.reset_index(level=level, drop=drop)\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def __repr__(self) -> str:\n \"\"\"\n Return a string representation for a particular Series.\n \"\"\"\n repr_params = fmt.get_series_repr_params()\n return self.to_string(**repr_params)\n\n def to_string(\n self,\n buf=None,\n na_rep=\"NaN\",\n float_format=None,\n header=True,\n index=True,\n length=False,\n dtype=False,\n name=False,\n max_rows=None,\n min_rows=None,\n ):\n \"\"\"\n Render a string representation of the Series.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n na_rep : str, optional\n String representation of NaN to use, default 'NaN'.\n float_format : one-parameter function, optional\n Formatter function to apply to columns' elements if they are\n floats, default None.\n header : bool, default True\n Add the Series header (index name).\n index : bool, optional\n Add index (row) labels, default True.\n length : bool, default False\n Add the Series length.\n dtype : bool, default False\n Add the Series dtype.\n name : bool, default False\n Add the Series name if not None.\n max_rows : int, optional\n Maximum number of rows to show before truncating. If None, show\n all.\n min_rows : int, optional\n The number of rows to display in a truncated repr (when number\n of rows is above `max_rows`).\n\n Returns\n -------\n str or None\n String representation of Series if ``buf=None``, otherwise None.\n \"\"\"\n formatter = fmt.SeriesFormatter(\n self,\n name=name,\n length=length,\n header=header,\n index=index,\n dtype=dtype,\n na_rep=na_rep,\n float_format=float_format,\n min_rows=min_rows,\n max_rows=max_rows,\n )\n result = formatter.to_string()\n\n # catch contract violations\n if not isinstance(result, str):\n raise AssertionError(\n \"result must be of type str, type \"\n f\"of result is {repr(type(result).__name__)}\"\n )\n\n if buf is None:\n return result\n else:\n try:\n buf.write(result)\n except AttributeError:\n with open(buf, \"w\") as f:\n f.write(result)\n\n @doc(\n klass=_shared_doc_kwargs[\"klass\"],\n storage_options=generic._shared_docs[\"storage_options\"],\n examples=dedent(\n \"\"\"Examples\n --------\n >>> s = pd.Series([\"elk\", \"pig\", \"dog\", \"quetzal\"], name=\"animal\")\n >>> print(s.to_markdown())\n | | animal |\n |---:|:---------|\n | 0 | elk |\n | 1 | pig |\n | 2 | dog |\n | 3 | quetzal |\n\n Output markdown with a tabulate option.\n\n >>> print(s.to_markdown(tablefmt=\"grid\"))\n +----+----------+\n | | animal |\n +====+==========+\n | 0 | elk |\n +----+----------+\n | 1 | pig |\n +----+----------+\n | 2 | dog |\n +----+----------+\n | 3 | quetzal |\n +----+----------+\"\"\"\n ),\n )\n def to_markdown(\n self,\n buf: IO[str] | None = None,\n mode: str = \"wt\",\n index: bool = True,\n storage_options: StorageOptions = None,\n **kwargs,\n ) -> str | None:\n \"\"\"\n Print {klass} in Markdown-friendly format.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n mode : str, optional\n Mode in which file is opened, \"wt\" by default.\n index : bool, optional, default True\n Add index (row) labels.\n\n .. versionadded:: 1.1.0\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n **kwargs\n These parameters will be passed to `tabulate \\\n <https://pypi.org/project/tabulate>`_.\n\n Returns\n -------\n str\n {klass} in Markdown-friendly format.\n\n Notes\n -----\n Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.\n\n {examples}\n \"\"\"\n return self.to_frame().to_markdown(\n buf, mode, index, storage_options=storage_options, **kwargs\n )\n\n # ----------------------------------------------------------------------\n\n def items(self) -> Iterable[tuple[Hashable, Any]]:\n \"\"\"\n Lazily iterate over (index, value) tuples.\n\n This method returns an iterable tuple (index, value). This is\n convenient if you want to create a lazy iterator.\n\n Returns\n -------\n iterable\n Iterable of tuples containing the (index, value) pairs from a\n Series.\n\n See Also\n --------\n DataFrame.items : Iterate over (column name, Series) pairs.\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.\n\n Examples\n --------\n >>> s = pd.Series(['A', 'B', 'C'])\n >>> for index, value in s.items():\n ... print(f\"Index : {index}, Value : {value}\")\n Index : 0, Value : A\n Index : 1, Value : B\n Index : 2, Value : C\n \"\"\"\n return zip(iter(self.index), iter(self))\n\n @Appender(items.__doc__)\n def iteritems(self) -> Iterable[tuple[Hashable, Any]]:\n return self.items()\n\n # ----------------------------------------------------------------------\n # Misc public methods\n\n def keys(self) -> Index:\n \"\"\"\n Return alias for index.\n\n Returns\n -------\n Index\n Index of the Series.\n \"\"\"\n return self.index\n\n def to_dict(self, into=dict):\n \"\"\"\n Convert Series to {label -> value} dict or dict-like object.\n\n Parameters\n ----------\n into : class, default dict\n The collections.abc.Mapping subclass to use as the return\n object. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n collections.abc.Mapping\n Key-value representation of Series.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.to_dict()\n {0: 1, 1: 2, 2: 3, 3: 4}\n >>> from collections import OrderedDict, defaultdict\n >>> s.to_dict(OrderedDict)\n OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])\n >>> dd = defaultdict(list)\n >>> s.to_dict(dd)\n defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})\n \"\"\"\n # GH16122\n into_c = com.standardize_mapping(into)\n return into_c((k, maybe_box_native(v)) for k, v in self.items())\n\n def to_frame(self, name: Hashable = lib.no_default) -> DataFrame:\n \"\"\"\n Convert Series to DataFrame.\n\n Parameters\n ----------\n name : object, default None\n The passed name should substitute for the series name (if it has\n one).\n\n Returns\n -------\n DataFrame\n DataFrame representation of Series.\n\n Examples\n --------\n >>> s = pd.Series([\"a\", \"b\", \"c\"],\n ... name=\"vals\")\n >>> s.to_frame()\n vals\n 0 a\n 1 b\n 2 c\n \"\"\"\n columns: Index\n if name is lib.no_default:\n name = self.name\n if name is None:\n # default to [0], same as we would get with DataFrame(self)\n columns = default_index(1)\n else:\n columns = Index([name])\n else:\n columns = Index([name])\n\n mgr = self._mgr.to_2d_mgr(columns)\n return self._constructor_expanddim(mgr)\n\n def _set_name(self, name, inplace=False) -> Series:\n \"\"\"\n Set the Series name.\n\n Parameters\n ----------\n name : str\n inplace : bool\n Whether to modify `self` directly or return a copy.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n ser = self if inplace else self.copy()\n ser.name = name\n return ser\n\n @Appender(\n \"\"\"\nExamples\n--------\n>>> ser = pd.Series([390., 350., 30., 20.],\n... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name=\"Max Speed\")\n>>> ser\nFalcon 390.0\nFalcon 350.0\nParrot 30.0\nParrot 20.0\nName: Max Speed, dtype: float64\n>>> ser.groupby([\"a\", \"b\", \"a\", \"b\"]).mean()\na 210.0\nb 185.0\nName: Max Speed, dtype: float64\n>>> ser.groupby(level=0).mean()\nFalcon 370.0\nParrot 25.0\nName: Max Speed, dtype: float64\n>>> ser.groupby(ser > 100).mean()\nMax Speed\nFalse 25.0\nTrue 370.0\nName: Max Speed, dtype: float64\n\n**Grouping by Indexes**\n\nWe can groupby different levels of a hierarchical index\nusing the `level` parameter:\n\n>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n... ['Captive', 'Wild', 'Captive', 'Wild']]\n>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))\n>>> ser = pd.Series([390., 350., 30., 20.], index=index, name=\"Max Speed\")\n>>> ser\nAnimal Type\nFalcon Captive 390.0\n Wild 350.0\nParrot Captive 30.0\n Wild 20.0\nName: Max Speed, dtype: float64\n>>> ser.groupby(level=0).mean()\nAnimal\nFalcon 370.0\nParrot 25.0\nName: Max Speed, dtype: float64\n>>> ser.groupby(level=\"Type\").mean()\nType\nCaptive 210.0\nWild 185.0\nName: Max Speed, dtype: float64\n\nWe can also choose to include `NA` in group keys or not by defining\n`dropna` parameter, the default setting is `True`.\n\n>>> ser = pd.Series([1, 2, 3, 3], index=[\"a\", 'a', 'b', np.nan])\n>>> ser.groupby(level=0).sum()\na 3\nb 3\ndtype: int64\n\n>>> ser.groupby(level=0, dropna=False).sum()\na 3\nb 3\nNaN 3\ndtype: int64\n\n>>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot']\n>>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name=\"Max Speed\")\n>>> ser.groupby([\"a\", \"b\", \"a\", np.nan]).mean()\na 210.0\nb 350.0\nName: Max Speed, dtype: float64\n\n>>> ser.groupby([\"a\", \"b\", \"a\", np.nan], dropna=False).mean()\na 210.0\nb 350.0\nNaN 20.0\nName: Max Speed, dtype: float64\n\"\"\"\n )\n @Appender(generic._shared_docs[\"groupby\"] % _shared_doc_kwargs)\n def groupby(\n self,\n by=None,\n axis=0,\n level=None,\n as_index: bool = True,\n sort: bool = True,\n group_keys: bool = True,\n squeeze: bool | lib.NoDefault = no_default,\n observed: bool = False,\n dropna: bool = True,\n ) -> SeriesGroupBy:\n from pandas.core.groupby.generic import SeriesGroupBy\n\n if squeeze is not no_default:\n warnings.warn(\n (\n \"The `squeeze` parameter is deprecated and \"\n \"will be removed in a future version.\"\n ),\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n else:\n squeeze = False\n\n if level is None and by is None:\n raise TypeError(\"You have to supply one of 'by' and 'level'\")\n axis = self._get_axis_number(axis)\n\n # error: Argument \"squeeze\" to \"SeriesGroupBy\" has incompatible type\n # \"Union[bool, NoDefault]\"; expected \"bool\"\n return SeriesGroupBy(\n obj=self,\n keys=by,\n axis=axis,\n level=level,\n as_index=as_index,\n sort=sort,\n group_keys=group_keys,\n squeeze=squeeze, # type: ignore[arg-type]\n observed=observed,\n dropna=dropna,\n )\n\n # ----------------------------------------------------------------------\n # Statistics, overridden ndarray methods\n\n # TODO: integrate bottleneck\n\n def count(self, level=None):\n \"\"\"\n Return number of non-NA/null observations in the Series.\n\n Parameters\n ----------\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a smaller Series.\n\n Returns\n -------\n int or Series (if level specified)\n Number of non-null values in the Series.\n\n See Also\n --------\n DataFrame.count : Count non-NA cells for each column or row.\n\n Examples\n --------\n >>> s = pd.Series([0.0, 1.0, np.nan])\n >>> s.count()\n 2\n \"\"\"\n if level is None:\n return notna(self._values).sum().astype(\"int64\")\n else:\n warnings.warn(\n \"Using the level keyword in DataFrame and Series aggregations is \"\n \"deprecated and will be removed in a future version. Use groupby \"\n \"instead. ser.count(level=1) should use ser.groupby(level=1).count().\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n if not isinstance(self.index, MultiIndex):\n raise ValueError(\"Series.count level is only valid with a MultiIndex\")\n\n index = self.index\n assert isinstance(index, MultiIndex) # for mypy\n\n if isinstance(level, str):\n level = index._get_level_number(level)\n\n lev = index.levels[level]\n level_codes = np.array(index.codes[level], subok=False, copy=True)\n\n mask = level_codes == -1\n if mask.any():\n level_codes[mask] = cnt = len(lev)\n lev = lev.insert(cnt, lev._na_value)\n\n obs = level_codes[notna(self._values)]\n out = np.bincount(obs, minlength=len(lev) or None)\n return self._constructor(out, index=lev, dtype=\"int64\").__finalize__(\n self, method=\"count\"\n )\n\n def mode(self, dropna: bool = True) -> Series:\n \"\"\"\n Return the mode(s) of the Series.\n\n The mode is the value that appears most often. There can be multiple modes.\n\n Always returns Series even if only one value is returned.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't consider counts of NaN/NaT.\n\n Returns\n -------\n Series\n Modes of the Series in sorted order.\n \"\"\"\n # TODO: Add option for bins like value_counts()\n return algorithms.mode(self, dropna=dropna)\n\n def unique(self) -> ArrayLike:\n \"\"\"\n Return unique values of Series object.\n\n Uniques are returned in order of appearance. Hash table-based unique,\n therefore does NOT sort.\n\n Returns\n -------\n ndarray or ExtensionArray\n The unique values returned as a NumPy array. See Notes.\n\n See Also\n --------\n unique : Top-level unique method for any 1-d array-like object.\n Index.unique : Return Index with unique values from an Index object.\n\n Notes\n -----\n Returns the unique values as a NumPy array. In case of an\n extension-array backed Series, a new\n :class:`~api.extensions.ExtensionArray` of that type with just\n the unique values is returned. This includes\n\n * Categorical\n * Period\n * Datetime with Timezone\n * Interval\n * Sparse\n * IntegerNA\n\n See Examples section.\n\n Examples\n --------\n >>> pd.Series([2, 1, 3, 3], name='A').unique()\n array([2, 1, 3])\n\n >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()\n array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')\n\n >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern')\n ... for _ in range(3)]).unique()\n <DatetimeArray>\n ['2016-01-01 00:00:00-05:00']\n Length: 1, dtype: datetime64[ns, US/Eastern]\n\n An Categorical will return categories in the order of\n appearance and with the same dtype.\n\n >>> pd.Series(pd.Categorical(list('baabc'))).unique()\n ['b', 'a', 'c']\n Categories (3, object): ['a', 'b', 'c']\n >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'),\n ... ordered=True)).unique()\n ['b', 'a', 'c']\n Categories (3, object): ['a' < 'b' < 'c']\n \"\"\"\n return super().unique()\n\n @overload\n def drop_duplicates(self, keep=..., inplace: Literal[False] = ...) -> Series:\n ...\n\n @overload\n def drop_duplicates(self, keep, inplace: Literal[True]) -> None:\n ...\n\n @overload\n def drop_duplicates(self, *, inplace: Literal[True]) -> None:\n ...\n\n @overload\n def drop_duplicates(self, keep=..., inplace: bool = ...) -> Series | None:\n ...\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\"])\n def drop_duplicates(self, keep=\"first\", inplace=False) -> Series | None:\n \"\"\"\n Return Series with duplicate values removed.\n\n Parameters\n ----------\n keep : {'first', 'last', ``False``}, default 'first'\n Method to handle dropping duplicates:\n\n - 'first' : Drop duplicates except for the first occurrence.\n - 'last' : Drop duplicates except for the last occurrence.\n - ``False`` : Drop all duplicates.\n\n inplace : bool, default ``False``\n If ``True``, performs operation inplace and returns None.\n\n Returns\n -------\n Series or None\n Series with duplicates dropped or None if ``inplace=True``.\n\n See Also\n --------\n Index.drop_duplicates : Equivalent method on Index.\n DataFrame.drop_duplicates : Equivalent method on DataFrame.\n Series.duplicated : Related method on Series, indicating duplicate\n Series values.\n\n Examples\n --------\n Generate a Series with duplicated entries.\n\n >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],\n ... name='animal')\n >>> s\n 0 lama\n 1 cow\n 2 lama\n 3 beetle\n 4 lama\n 5 hippo\n Name: animal, dtype: object\n\n With the 'keep' parameter, the selection behaviour of duplicated values\n can be changed. The value 'first' keeps the first occurrence for each\n set of duplicated entries. The default value of keep is 'first'.\n\n >>> s.drop_duplicates()\n 0 lama\n 1 cow\n 3 beetle\n 5 hippo\n Name: animal, dtype: object\n\n The value 'last' for parameter 'keep' keeps the last occurrence for\n each set of duplicated entries.\n\n >>> s.drop_duplicates(keep='last')\n 1 cow\n 3 beetle\n 4 lama\n 5 hippo\n Name: animal, dtype: object\n\n The value ``False`` for parameter 'keep' discards all sets of\n duplicated entries. Setting the value of 'inplace' to ``True`` performs\n the operation inplace and returns ``None``.\n\n >>> s.drop_duplicates(keep=False, inplace=True)\n >>> s\n 1 cow\n 3 beetle\n 5 hippo\n Name: animal, dtype: object\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n result = super().drop_duplicates(keep=keep)\n if inplace:\n self._update_inplace(result)\n return None\n else:\n return result\n\n def duplicated(self, keep=\"first\") -> Series:\n \"\"\"\n Indicate duplicate Series values.\n\n Duplicated values are indicated as ``True`` values in the resulting\n Series. Either all duplicates, all except the first or all except the\n last occurrence of duplicates can be indicated.\n\n Parameters\n ----------\n keep : {'first', 'last', False}, default 'first'\n Method to handle dropping duplicates:\n\n - 'first' : Mark duplicates as ``True`` except for the first\n occurrence.\n - 'last' : Mark duplicates as ``True`` except for the last\n occurrence.\n - ``False`` : Mark all duplicates as ``True``.\n\n Returns\n -------\n Series[bool]\n Series indicating whether each value has occurred in the\n preceding values.\n\n See Also\n --------\n Index.duplicated : Equivalent method on pandas.Index.\n DataFrame.duplicated : Equivalent method on pandas.DataFrame.\n Series.drop_duplicates : Remove duplicate values from Series.\n\n Examples\n --------\n By default, for each set of duplicated values, the first occurrence is\n set on False and all others on True:\n\n >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama'])\n >>> animals.duplicated()\n 0 False\n 1 False\n 2 True\n 3 False\n 4 True\n dtype: bool\n\n which is equivalent to\n\n >>> animals.duplicated(keep='first')\n 0 False\n 1 False\n 2 True\n 3 False\n 4 True\n dtype: bool\n\n By using 'last', the last occurrence of each set of duplicated values\n is set on False and all others on True:\n\n >>> animals.duplicated(keep='last')\n 0 True\n 1 False\n 2 True\n 3 False\n 4 False\n dtype: bool\n\n By setting keep on ``False``, all duplicates are True:\n\n >>> animals.duplicated(keep=False)\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n dtype: bool\n \"\"\"\n res = self._duplicated(keep=keep)\n result = self._constructor(res, index=self.index)\n return result.__finalize__(self, method=\"duplicated\")\n\n def idxmin(self, axis=0, skipna=True, *args, **kwargs):\n \"\"\"\n Return the row label of the minimum value.\n\n If multiple values equal the minimum, the first row label with that\n value is returned.\n\n Parameters\n ----------\n axis : int, default 0\n For compatibility with DataFrame.idxmin. Redundant for application\n on Series.\n skipna : bool, default True\n Exclude NA/null values. If the entire Series is NA, the result\n will be NA.\n *args, **kwargs\n Additional arguments and keywords have no effect but might be\n accepted for compatibility with NumPy.\n\n Returns\n -------\n Index\n Label of the minimum value.\n\n Raises\n ------\n ValueError\n If the Series is empty.\n\n See Also\n --------\n numpy.argmin : Return indices of the minimum values\n along the given axis.\n DataFrame.idxmin : Return index of first occurrence of minimum\n over requested axis.\n Series.idxmax : Return index *label* of the first occurrence\n of maximum of values.\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmin``. This method\n returns the label of the minimum, while ``ndarray.argmin`` returns\n the position. To get the position, use ``series.values.argmin()``.\n\n Examples\n --------\n >>> s = pd.Series(data=[1, None, 4, 1],\n ... index=['A', 'B', 'C', 'D'])\n >>> s\n A 1.0\n B NaN\n C 4.0\n D 1.0\n dtype: float64\n\n >>> s.idxmin()\n 'A'\n\n If `skipna` is False and there is an NA value in the data,\n the function returns ``nan``.\n\n >>> s.idxmin(skipna=False)\n nan\n \"\"\"\n i = self.argmin(axis, skipna, *args, **kwargs)\n if i == -1:\n return np.nan\n return self.index[i]\n\n def idxmax(self, axis=0, skipna=True, *args, **kwargs):\n \"\"\"\n Return the row label of the maximum value.\n\n If multiple values equal the maximum, the first row label with that\n value is returned.\n\n Parameters\n ----------\n axis : int, default 0\n For compatibility with DataFrame.idxmax. Redundant for application\n on Series.\n skipna : bool, default True\n Exclude NA/null values. If the entire Series is NA, the result\n will be NA.\n *args, **kwargs\n Additional arguments and keywords have no effect but might be\n accepted for compatibility with NumPy.\n\n Returns\n -------\n Index\n Label of the maximum value.\n\n Raises\n ------\n ValueError\n If the Series is empty.\n\n See Also\n --------\n numpy.argmax : Return indices of the maximum values\n along the given axis.\n DataFrame.idxmax : Return index of first occurrence of maximum\n over requested axis.\n Series.idxmin : Return index *label* of the first occurrence\n of minimum of values.\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmax``. This method\n returns the label of the maximum, while ``ndarray.argmax`` returns\n the position. To get the position, use ``series.values.argmax()``.\n\n Examples\n --------\n >>> s = pd.Series(data=[1, None, 4, 3, 4],\n ... index=['A', 'B', 'C', 'D', 'E'])\n >>> s\n A 1.0\n B NaN\n C 4.0\n D 3.0\n E 4.0\n dtype: float64\n\n >>> s.idxmax()\n 'C'\n\n If `skipna` is False and there is an NA value in the data,\n the function returns ``nan``.\n\n >>> s.idxmax(skipna=False)\n nan\n \"\"\"\n i = self.argmax(axis, skipna, *args, **kwargs)\n if i == -1:\n return np.nan\n return self.index[i]\n\n def round(self, decimals=0, *args, **kwargs) -> Series:\n \"\"\"\n Round each value in a Series to the given number of decimals.\n\n Parameters\n ----------\n decimals : int, default 0\n Number of decimal places to round to. If decimals is negative,\n it specifies the number of positions to the left of the decimal point.\n *args, **kwargs\n Additional arguments and keywords have no effect but might be\n accepted for compatibility with NumPy.\n\n Returns\n -------\n Series\n Rounded values of the Series.\n\n See Also\n --------\n numpy.around : Round values of an np.array.\n DataFrame.round : Round values of a DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([0.1, 1.3, 2.7])\n >>> s.round()\n 0 0.0\n 1 1.0\n 2 3.0\n dtype: float64\n \"\"\"\n nv.validate_round(args, kwargs)\n result = self._values.round(decimals)\n result = self._constructor(result, index=self.index).__finalize__(\n self, method=\"round\"\n )\n\n return result\n\n def quantile(self, q=0.5, interpolation=\"linear\"):\n \"\"\"\n Return value at the given quantile.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n The quantile(s) to compute, which can lie in range: 0 <= q <= 1.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n\n Returns\n -------\n float or Series\n If ``q`` is an array, a Series will be returned where the\n index is ``q`` and the values are the quantiles, otherwise\n a float will be returned.\n\n See Also\n --------\n core.window.Rolling.quantile : Calculate the rolling quantile.\n numpy.percentile : Returns the q-th percentile(s) of the array elements.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.quantile(.5)\n 2.5\n >>> s.quantile([.25, .5, .75])\n 0.25 1.75\n 0.50 2.50\n 0.75 3.25\n dtype: float64\n \"\"\"\n validate_percentile(q)\n\n # We dispatch to DataFrame so that core.internals only has to worry\n # about 2D cases.\n df = self.to_frame()\n\n result = df.quantile(q=q, interpolation=interpolation, numeric_only=False)\n if result.ndim == 2:\n result = result.iloc[:, 0]\n\n if is_list_like(q):\n result.name = self.name\n return self._constructor(result, index=Float64Index(q), name=self.name)\n else:\n # scalar\n return result.iloc[0]\n\n def corr(self, other, method=\"pearson\", min_periods=None) -> float:\n \"\"\"\n Compute correlation with `other` Series, excluding missing values.\n\n Parameters\n ----------\n other : Series\n Series with which to compute the correlation.\n method : {'pearson', 'kendall', 'spearman'} or callable\n Method used to compute correlation:\n\n - pearson : Standard correlation coefficient\n - kendall : Kendall Tau correlation coefficient\n - spearman : Spearman rank correlation\n - callable: Callable with input two 1d ndarrays and returning a float.\n\n .. warning::\n Note that the returned matrix from corr will have 1 along the\n diagonals and will be symmetric regardless of the callable's\n behavior.\n min_periods : int, optional\n Minimum number of observations needed to have a valid result.\n\n Returns\n -------\n float\n Correlation with other.\n\n See Also\n --------\n DataFrame.corr : Compute pairwise correlation between columns.\n DataFrame.corrwith : Compute pairwise correlation with another\n DataFrame or Series.\n\n Examples\n --------\n >>> def histogram_intersection(a, b):\n ... v = np.minimum(a, b).sum().round(decimals=1)\n ... return v\n >>> s1 = pd.Series([.2, .0, .6, .2])\n >>> s2 = pd.Series([.3, .6, .0, .1])\n >>> s1.corr(s2, method=histogram_intersection)\n 0.3\n \"\"\"\n this, other = self.align(other, join=\"inner\", copy=False)\n if len(this) == 0:\n return np.nan\n\n if method in [\"pearson\", \"spearman\", \"kendall\"] or callable(method):\n return nanops.nancorr(\n this.values, other.values, method=method, min_periods=min_periods\n )\n\n raise ValueError(\n \"method must be either 'pearson', \"\n \"'spearman', 'kendall', or a callable, \"\n f\"'{method}' was supplied\"\n )\n\n def cov(\n self,\n other: Series,\n min_periods: int | None = None,\n ddof: int | None = 1,\n ) -> float:\n \"\"\"\n Compute covariance with Series, excluding missing values.\n\n Parameters\n ----------\n other : Series\n Series with which to compute the covariance.\n min_periods : int, optional\n Minimum number of observations needed to have a valid result.\n ddof : int, default 1\n Delta degrees of freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n float\n Covariance between Series and other normalized by N-1\n (unbiased estimator).\n\n See Also\n --------\n DataFrame.cov : Compute pairwise covariance of columns.\n\n Examples\n --------\n >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])\n >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])\n >>> s1.cov(s2)\n -0.01685762652715874\n \"\"\"\n this, other = self.align(other, join=\"inner\", copy=False)\n if len(this) == 0:\n return np.nan\n return nanops.nancov(\n this.values, other.values, min_periods=min_periods, ddof=ddof\n )\n\n @doc(\n klass=\"Series\",\n extra_params=\"\",\n other_klass=\"DataFrame\",\n examples=dedent(\n \"\"\"\n Difference with previous row\n\n >>> s = pd.Series([1, 1, 2, 3, 5, 8])\n >>> s.diff()\n 0 NaN\n 1 0.0\n 2 1.0\n 3 1.0\n 4 2.0\n 5 3.0\n dtype: float64\n\n Difference with 3rd previous row\n\n >>> s.diff(periods=3)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 2.0\n 4 4.0\n 5 6.0\n dtype: float64\n\n Difference with following row\n\n >>> s.diff(periods=-1)\n 0 0.0\n 1 -1.0\n 2 -1.0\n 3 -2.0\n 4 -3.0\n 5 NaN\n dtype: float64\n\n Overflow in input dtype\n\n >>> s = pd.Series([1, 0], dtype=np.uint8)\n >>> s.diff()\n 0 NaN\n 1 255.0\n dtype: float64\"\"\"\n ),\n )\n def diff(self, periods: int = 1) -> Series:\n \"\"\"\n First discrete difference of element.\n\n Calculates the difference of a {klass} element compared with another\n element in the {klass} (default is element in previous row).\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for calculating difference, accepts negative\n values.\n {extra_params}\n Returns\n -------\n {klass}\n First differences of the Series.\n\n See Also\n --------\n {klass}.pct_change: Percent change over given number of periods.\n {klass}.shift: Shift index by desired number of periods with an\n optional time freq.\n {other_klass}.diff: First discrete difference of object.\n\n Notes\n -----\n For boolean dtypes, this uses :meth:`operator.xor` rather than\n :meth:`operator.sub`.\n The result is calculated according to current dtype in {klass},\n however dtype of the result is always float64.\n\n Examples\n --------\n {examples}\n \"\"\"\n result = algorithms.diff(self._values, periods)\n return self._constructor(result, index=self.index).__finalize__(\n self, method=\"diff\"\n )\n\n def autocorr(self, lag=1) -> float:\n \"\"\"\n Compute the lag-N autocorrelation.\n\n This method computes the Pearson correlation between\n the Series and its shifted self.\n\n Parameters\n ----------\n lag : int, default 1\n Number of lags to apply before performing autocorrelation.\n\n Returns\n -------\n float\n The Pearson correlation between self and self.shift(lag).\n\n See Also\n --------\n Series.corr : Compute the correlation between two Series.\n Series.shift : Shift index by desired number of periods.\n DataFrame.corr : Compute pairwise correlation of columns.\n DataFrame.corrwith : Compute pairwise correlation between rows or\n columns of two DataFrame objects.\n\n Notes\n -----\n If the Pearson correlation is not well defined return 'NaN'.\n\n Examples\n --------\n >>> s = pd.Series([0.25, 0.5, 0.2, -0.05])\n >>> s.autocorr() # doctest: +ELLIPSIS\n 0.10355...\n >>> s.autocorr(lag=2) # doctest: +ELLIPSIS\n -0.99999...\n\n If the Pearson correlation is not well defined, then 'NaN' is returned.\n\n >>> s = pd.Series([1, 0, 0, 0])\n >>> s.autocorr()\n nan\n \"\"\"\n return self.corr(self.shift(lag))\n\n def dot(self, other):\n \"\"\"\n Compute the dot product between the Series and the columns of other.\n\n This method computes the dot product between the Series and another\n one, or the Series and each columns of a DataFrame, or the Series and\n each columns of an array.\n\n It can also be called using `self @ other` in Python >= 3.5.\n\n Parameters\n ----------\n other : Series, DataFrame or array-like\n The other object to compute the dot product with its columns.\n\n Returns\n -------\n scalar, Series or numpy.ndarray\n Return the dot product of the Series and other if other is a\n Series, the Series of the dot product of Series and each rows of\n other if other is a DataFrame or a numpy.ndarray between the Series\n and each columns of the numpy array.\n\n See Also\n --------\n DataFrame.dot: Compute the matrix product with the DataFrame.\n Series.mul: Multiplication of series and other, element-wise.\n\n Notes\n -----\n The Series and other has to share the same index if other is a Series\n or a DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([0, 1, 2, 3])\n >>> other = pd.Series([-1, 2, -3, 4])\n >>> s.dot(other)\n 8\n >>> s @ other\n 8\n >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])\n >>> s.dot(df)\n 0 24\n 1 14\n dtype: int64\n >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])\n >>> s.dot(arr)\n array([24, 14])\n \"\"\"\n if isinstance(other, (Series, ABCDataFrame)):\n common = self.index.union(other.index)\n if len(common) > len(self.index) or len(common) > len(other.index):\n raise ValueError(\"matrices are not aligned\")\n\n left = self.reindex(index=common, copy=False)\n right = other.reindex(index=common, copy=False)\n lvals = left.values\n rvals = right.values\n else:\n lvals = self.values\n rvals = np.asarray(other)\n if lvals.shape[0] != rvals.shape[0]:\n raise Exception(\n f\"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}\"\n )\n\n if isinstance(other, ABCDataFrame):\n return self._constructor(\n np.dot(lvals, rvals), index=other.columns\n ).__finalize__(self, method=\"dot\")\n elif isinstance(other, Series):\n return np.dot(lvals, rvals)\n elif isinstance(rvals, np.ndarray):\n return np.dot(lvals, rvals)\n else: # pragma: no cover\n raise TypeError(f\"unsupported type: {type(other)}\")\n\n def __matmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(other)\n\n def __rmatmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(np.transpose(other))\n\n @doc(base.IndexOpsMixin.searchsorted, klass=\"Series\")\n # Signature of \"searchsorted\" incompatible with supertype \"IndexOpsMixin\"\n def searchsorted( # type: ignore[override]\n self,\n value: NumpyValueArrayLike | ExtensionArray,\n side: Literal[\"left\", \"right\"] = \"left\",\n sorter: NumpySorter = None,\n ) -> npt.NDArray[np.intp] | np.intp:\n return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter)\n\n # -------------------------------------------------------------------\n # Combination\n\n def append(\n self, to_append, ignore_index: bool = False, verify_integrity: bool = False\n ):\n \"\"\"\n Concatenate two or more Series.\n\n Parameters\n ----------\n to_append : Series or list/tuple of Series\n Series to append with self.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n verify_integrity : bool, default False\n If True, raise Exception on creating index with duplicates.\n\n Returns\n -------\n Series\n Concatenated Series.\n\n See Also\n --------\n concat : General function to concatenate DataFrame or Series objects.\n\n Notes\n -----\n Iteratively appending to a Series can be more computationally intensive\n than a single concatenate. A better solution is to append values to a\n list and then concatenate the list with the original Series all at\n once.\n\n Examples\n --------\n >>> s1 = pd.Series([1, 2, 3])\n >>> s2 = pd.Series([4, 5, 6])\n >>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])\n >>> s1.append(s2)\n 0 1\n 1 2\n 2 3\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n >>> s1.append(s3)\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n 5 6\n dtype: int64\n\n With `ignore_index` set to True:\n\n >>> s1.append(s2, ignore_index=True)\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n 5 6\n dtype: int64\n\n With `verify_integrity` set to True:\n\n >>> s1.append(s2, verify_integrity=True)\n Traceback (most recent call last):\n ...\n ValueError: Indexes have overlapping values: [0, 1, 2]\n \"\"\"\n from pandas.core.reshape.concat import concat\n\n if isinstance(to_append, (list, tuple)):\n to_concat = [self]\n to_concat.extend(to_append)\n else:\n to_concat = [self, to_append]\n if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]):\n msg = \"to_append should be a Series or list/tuple of Series, got DataFrame\"\n raise TypeError(msg)\n return concat(\n to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity\n )\n\n def _binop(self, other: Series, func, level=None, fill_value=None):\n \"\"\"\n Perform generic binary operation with optional fill value.\n\n Parameters\n ----------\n other : Series\n func : binary operator\n fill_value : float or object\n Value to substitute for NA/null values. If both Series are NA in a\n location, the result will be NA regardless of the passed fill value.\n level : int or level name, default None\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n\n Returns\n -------\n Series\n \"\"\"\n if not isinstance(other, Series):\n raise AssertionError(\"Other operand must be Series\")\n\n this = self\n\n if not self.index.equals(other.index):\n this, other = self.align(other, level=level, join=\"outer\", copy=False)\n\n this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value)\n\n with np.errstate(all=\"ignore\"):\n result = func(this_vals, other_vals)\n\n name = ops.get_op_result_name(self, other)\n return this._construct_result(result, name)\n\n def _construct_result(\n self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable\n ) -> Series | tuple[Series, Series]:\n \"\"\"\n Construct an appropriately-labelled Series from the result of an op.\n\n Parameters\n ----------\n result : ndarray or ExtensionArray\n name : Label\n\n Returns\n -------\n Series\n In the case of __divmod__ or __rdivmod__, a 2-tuple of Series.\n \"\"\"\n if isinstance(result, tuple):\n # produced by divmod or rdivmod\n\n res1 = self._construct_result(result[0], name=name)\n res2 = self._construct_result(result[1], name=name)\n\n # GH#33427 assertions to keep mypy happy\n assert isinstance(res1, Series)\n assert isinstance(res2, Series)\n return (res1, res2)\n\n # We do not pass dtype to ensure that the Series constructor\n # does inference in the case where `result` has object-dtype.\n out = self._constructor(result, index=self.index)\n out = out.__finalize__(self)\n\n # Set the result's name after __finalize__ is called because __finalize__\n # would set it back to self.name\n out.name = name\n return out\n\n @doc(\n generic._shared_docs[\"compare\"],\n \"\"\"\nReturns\n-------\nSeries or DataFrame\n If axis is 0 or 'index' the result will be a Series.\n The resulting index will be a MultiIndex with 'self' and 'other'\n stacked alternately at the inner level.\n\n If axis is 1 or 'columns' the result will be a DataFrame.\n It will have two columns namely 'self' and 'other'.\n\nSee Also\n--------\nDataFrame.compare : Compare with another DataFrame and show differences.\n\nNotes\n-----\nMatching NaNs will not appear as a difference.\n\nExamples\n--------\n>>> s1 = pd.Series([\"a\", \"b\", \"c\", \"d\", \"e\"])\n>>> s2 = pd.Series([\"a\", \"a\", \"c\", \"b\", \"e\"])\n\nAlign the differences on columns\n\n>>> s1.compare(s2)\n self other\n1 b a\n3 d b\n\nStack the differences on indices\n\n>>> s1.compare(s2, align_axis=0)\n1 self b\n other a\n3 self d\n other b\ndtype: object\n\nKeep all original rows\n\n>>> s1.compare(s2, keep_shape=True)\n self other\n0 NaN NaN\n1 b a\n2 NaN NaN\n3 d b\n4 NaN NaN\n\nKeep all original rows and also all original values\n\n>>> s1.compare(s2, keep_shape=True, keep_equal=True)\n self other\n0 a a\n1 b a\n2 c c\n3 d b\n4 e e\n\"\"\",\n klass=_shared_doc_kwargs[\"klass\"],\n )\n def compare(\n self,\n other: Series,\n align_axis: Axis = 1,\n keep_shape: bool = False,\n keep_equal: bool = False,\n ) -> DataFrame | Series:\n return super().compare(\n other=other,\n align_axis=align_axis,\n keep_shape=keep_shape,\n keep_equal=keep_equal,\n )\n\n def combine(self, other, func, fill_value=None) -> Series:\n \"\"\"\n Combine the Series with a Series or scalar according to `func`.\n\n Combine the Series and `other` using `func` to perform elementwise\n selection for combined Series.\n `fill_value` is assumed when value is missing at some index\n from one of the two objects being combined.\n\n Parameters\n ----------\n other : Series or scalar\n The value(s) to be combined with the `Series`.\n func : function\n Function that takes two scalars as inputs and returns an element.\n fill_value : scalar, optional\n The value to assume when an index is missing from\n one Series or the other. The default specifies to use the\n appropriate NaN value for the underlying dtype of the Series.\n\n Returns\n -------\n Series\n The result of combining the Series with the other object.\n\n See Also\n --------\n Series.combine_first : Combine Series values, choosing the calling\n Series' values first.\n\n Examples\n --------\n Consider 2 Datasets ``s1`` and ``s2`` containing\n highest clocked speeds of different birds.\n\n >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})\n >>> s1\n falcon 330.0\n eagle 160.0\n dtype: float64\n >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})\n >>> s2\n falcon 345.0\n eagle 200.0\n duck 30.0\n dtype: float64\n\n Now, to combine the two datasets and view the highest speeds\n of the birds across the two datasets\n\n >>> s1.combine(s2, max)\n duck NaN\n eagle 200.0\n falcon 345.0\n dtype: float64\n\n In the previous example, the resulting value for duck is missing,\n because the maximum of a NaN and a float is a NaN.\n So, in the example, we set ``fill_value=0``,\n so the maximum value returned will be the value from some dataset.\n\n >>> s1.combine(s2, max, fill_value=0)\n duck 30.0\n eagle 200.0\n falcon 345.0\n dtype: float64\n \"\"\"\n if fill_value is None:\n fill_value = na_value_for_dtype(self.dtype, compat=False)\n\n if isinstance(other, Series):\n # If other is a Series, result is based on union of Series,\n # so do this element by element\n new_index = self.index.union(other.index)\n new_name = ops.get_op_result_name(self, other)\n new_values = np.empty(len(new_index), dtype=object)\n for i, idx in enumerate(new_index):\n lv = self.get(idx, fill_value)\n rv = other.get(idx, fill_value)\n with np.errstate(all=\"ignore\"):\n new_values[i] = func(lv, rv)\n else:\n # Assume that other is a scalar, so apply the function for\n # each element in the Series\n new_index = self.index\n new_values = np.empty(len(new_index), dtype=object)\n with np.errstate(all=\"ignore\"):\n new_values[:] = [func(lv, other) for lv in self._values]\n new_name = self.name\n\n # try_float=False is to match agg_series\n npvalues = lib.maybe_convert_objects(new_values, try_float=False)\n res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False)\n return self._constructor(res_values, index=new_index, name=new_name)\n\n def combine_first(self, other) -> Series:\n \"\"\"\n Update null elements with value in the same location in 'other'.\n\n Combine two Series objects by filling null values in one Series with\n non-null values from the other Series. Result index will be the union\n of the two indexes.\n\n Parameters\n ----------\n other : Series\n The value(s) to be used for filling null values.\n\n Returns\n -------\n Series\n The result of combining the provided Series with the other object.\n\n See Also\n --------\n Series.combine : Perform element-wise operation on two Series\n using a given function.\n\n Examples\n --------\n >>> s1 = pd.Series([1, np.nan])\n >>> s2 = pd.Series([3, 4, 5])\n >>> s1.combine_first(s2)\n 0 1.0\n 1 4.0\n 2 5.0\n dtype: float64\n\n Null values still persist if the location of that null value\n does not exist in `other`\n\n >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0})\n >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0})\n >>> s1.combine_first(s2)\n duck 30.0\n eagle 160.0\n falcon NaN\n dtype: float64\n \"\"\"\n new_index = self.index.union(other.index)\n this = self.reindex(new_index, copy=False)\n other = other.reindex(new_index, copy=False)\n if this.dtype.kind == \"M\" and other.dtype.kind != \"M\":\n other = to_datetime(other)\n\n return this.where(notna(this), other)\n\n def update(self, other) -> None:\n \"\"\"\n Modify Series in place using values from passed Series.\n\n Uses non-NA values from passed Series to make updates. Aligns\n on index.\n\n Parameters\n ----------\n other : Series, or object coercible into Series\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s.update(pd.Series([4, 5, 6]))\n >>> s\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n >>> s = pd.Series(['a', 'b', 'c'])\n >>> s.update(pd.Series(['d', 'e'], index=[0, 2]))\n >>> s\n 0 d\n 1 b\n 2 e\n dtype: object\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.update(pd.Series([4, 5, 6, 7, 8]))\n >>> s\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n If ``other`` contains NaNs the corresponding values are not updated\n in the original Series.\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.update(pd.Series([4, np.nan, 6]))\n >>> s\n 0 4\n 1 2\n 2 6\n dtype: int64\n\n ``other`` can also be a non-Series object type\n that is coercible into a Series\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.update([4, np.nan, 6])\n >>> s\n 0 4\n 1 2\n 2 6\n dtype: int64\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.update({1: 9})\n >>> s\n 0 1\n 1 9\n 2 3\n dtype: int64\n \"\"\"\n\n if not isinstance(other, Series):\n other = Series(other)\n\n other = other.reindex_like(self)\n mask = notna(other)\n\n self._mgr = self._mgr.putmask(mask=mask, new=other)\n self._maybe_update_cacher()\n\n # ----------------------------------------------------------------------\n # Reindexing, sorting\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\"])\n def sort_values(\n self,\n axis=0,\n ascending: bool | int | Sequence[bool | int] = True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n ignore_index: bool = False,\n key: ValueKeyFunc = None,\n ):\n \"\"\"\n Sort by the values.\n\n Sort a Series in ascending or descending order by some\n criterion.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Axis to direct sorting. The value 'index' is accepted for\n compatibility with DataFrame.sort_values.\n ascending : bool or list of bools, default True\n If True, sort values in ascending order, otherwise descending.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See also :func:`numpy.sort` for more\n information. 'mergesort' and 'stable' are the only stable algorithms.\n na_position : {'first' or 'last'}, default 'last'\n Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at\n the end.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n key : callable, optional\n If not None, apply the key function to the series values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect a\n ``Series`` and return an array-like.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n Series or None\n Series ordered by values or None if ``inplace=True``.\n\n See Also\n --------\n Series.sort_index : Sort by the Series indices.\n DataFrame.sort_values : Sort DataFrame by the values along either axis.\n DataFrame.sort_index : Sort DataFrame by indices.\n\n Examples\n --------\n >>> s = pd.Series([np.nan, 1, 3, 10, 5])\n >>> s\n 0 NaN\n 1 1.0\n 2 3.0\n 3 10.0\n 4 5.0\n dtype: float64\n\n Sort values ascending order (default behaviour)\n\n >>> s.sort_values(ascending=True)\n 1 1.0\n 2 3.0\n 4 5.0\n 3 10.0\n 0 NaN\n dtype: float64\n\n Sort values descending order\n\n >>> s.sort_values(ascending=False)\n 3 10.0\n 4 5.0\n 2 3.0\n 1 1.0\n 0 NaN\n dtype: float64\n\n Sort values inplace\n\n >>> s.sort_values(ascending=False, inplace=True)\n >>> s\n 3 10.0\n 4 5.0\n 2 3.0\n 1 1.0\n 0 NaN\n dtype: float64\n\n Sort values putting NAs first\n\n >>> s.sort_values(na_position='first')\n 0 NaN\n 1 1.0\n 2 3.0\n 4 5.0\n 3 10.0\n dtype: float64\n\n Sort a series of strings\n\n >>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])\n >>> s\n 0 z\n 1 b\n 2 d\n 3 a\n 4 c\n dtype: object\n\n >>> s.sort_values()\n 3 a\n 1 b\n 4 c\n 2 d\n 0 z\n dtype: object\n\n Sort using a key function. Your `key` function will be\n given the ``Series`` of values and should return an array-like.\n\n >>> s = pd.Series(['a', 'B', 'c', 'D', 'e'])\n >>> s.sort_values()\n 1 B\n 3 D\n 0 a\n 2 c\n 4 e\n dtype: object\n >>> s.sort_values(key=lambda x: x.str.lower())\n 0 a\n 1 B\n 2 c\n 3 D\n 4 e\n dtype: object\n\n NumPy ufuncs work well here. For example, we can\n sort by the ``sin`` of the value\n\n >>> s = pd.Series([-4, -2, 0, 2, 4])\n >>> s.sort_values(key=np.sin)\n 1 -2\n 4 4\n 2 0\n 0 -4\n 3 2\n dtype: int64\n\n More complicated user-defined functions can be used,\n as long as they expect a Series and return an array-like\n\n >>> s.sort_values(key=lambda x: (np.tan(x.cumsum())))\n 0 -4\n 3 2\n 4 4\n 1 -2\n 2 0\n dtype: int64\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n # Validate the axis parameter\n self._get_axis_number(axis)\n\n # GH 5856/5853\n if inplace and self._is_cached:\n raise ValueError(\n \"This Series is a view of some other array, to \"\n \"sort in-place you must create a copy\"\n )\n\n if is_list_like(ascending):\n ascending = cast(Sequence[Union[bool, int]], ascending)\n if len(ascending) != 1:\n raise ValueError(\n f\"Length of ascending ({len(ascending)}) must be 1 for Series\"\n )\n ascending = ascending[0]\n\n ascending = validate_ascending(ascending)\n\n if na_position not in [\"first\", \"last\"]:\n raise ValueError(f\"invalid na_position: {na_position}\")\n\n # GH 35922. Make sorting stable by leveraging nargsort\n values_to_sort = ensure_key_mapped(self, key)._values if key else self._values\n sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position)\n\n result = self._constructor(\n self._values[sorted_index], index=self.index[sorted_index]\n )\n\n if ignore_index:\n result.index = default_index(len(sorted_index))\n\n if inplace:\n self._update_inplace(result)\n else:\n return result.__finalize__(self, method=\"sort_values\")\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\"])\n def sort_index(\n self,\n axis=0,\n level=None,\n ascending: bool | int | Sequence[bool | int] = True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n sort_remaining: bool = True,\n ignore_index: bool = False,\n key: IndexKeyFunc = None,\n ):\n \"\"\"\n Sort Series by index labels.\n\n Returns a new Series sorted by label if `inplace` argument is\n ``False``, otherwise updates the original series and returns None.\n\n Parameters\n ----------\n axis : int, default 0\n Axis to direct sorting. This can only be 0 for Series.\n level : int, optional\n If not None, sort on values in specified index level(s).\n ascending : bool or list-like of bools, default True\n Sort ascending vs. descending. When the index is a MultiIndex the\n sort direction can be controlled for each level individually.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See also :func:`numpy.sort` for more\n information. 'mergesort' and 'stable' are the only stable algorithms. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end.\n Not implemented for MultiIndex.\n sort_remaining : bool, default True\n If True and sorting by level and index is multilevel, sort by other\n levels too (in order) after sorting by specified level.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n key : callable, optional\n If not None, apply the key function to the index values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect an\n ``Index`` and return an ``Index`` of the same shape.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n Series or None\n The original Series sorted by the labels or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.sort_index: Sort DataFrame by the index.\n DataFrame.sort_values: Sort DataFrame by the value.\n Series.sort_values : Sort Series by the value.\n\n Examples\n --------\n >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4])\n >>> s.sort_index()\n 1 c\n 2 b\n 3 a\n 4 d\n dtype: object\n\n Sort Descending\n\n >>> s.sort_index(ascending=False)\n 4 d\n 3 a\n 2 b\n 1 c\n dtype: object\n\n Sort Inplace\n\n >>> s.sort_index(inplace=True)\n >>> s\n 1 c\n 2 b\n 3 a\n 4 d\n dtype: object\n\n By default NaNs are put at the end, but use `na_position` to place\n them at the beginning\n\n >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan])\n >>> s.sort_index(na_position='first')\n NaN d\n 1.0 c\n 2.0 b\n 3.0 a\n dtype: object\n\n Specify index level to sort\n\n >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo',\n ... 'baz', 'baz', 'bar', 'bar']),\n ... np.array(['two', 'one', 'two', 'one',\n ... 'two', 'one', 'two', 'one'])]\n >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays)\n >>> s.sort_index(level=1)\n bar one 8\n baz one 6\n foo one 4\n qux one 2\n bar two 7\n baz two 5\n foo two 3\n qux two 1\n dtype: int64\n\n Does not sort by remaining levels when sorting by levels\n\n >>> s.sort_index(level=1, sort_remaining=False)\n qux one 2\n foo one 4\n baz one 6\n bar one 8\n qux two 1\n foo two 3\n baz two 5\n bar two 7\n dtype: int64\n\n Apply a key function before sorting\n\n >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd'])\n >>> s.sort_index(key=lambda x : x.str.lower())\n A 1\n b 2\n C 3\n d 4\n dtype: int64\n \"\"\"\n\n return super().sort_index(\n axis,\n level,\n ascending,\n inplace,\n kind,\n na_position,\n sort_remaining,\n ignore_index,\n key,\n )\n\n def argsort(self, axis=0, kind=\"quicksort\", order=None) -> Series:\n \"\"\"\n Return the integer indices that would sort the Series values.\n\n Override ndarray.argsort. Argsorts the value, omitting NA/null values,\n and places the result in the same locations as the non-NA values.\n\n Parameters\n ----------\n axis : {0 or \"index\"}\n Has no effect but is accepted for compatibility with numpy.\n kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See :func:`numpy.sort` for more\n information. 'mergesort' and 'stable' are the only stable algorithms.\n order : None\n Has no effect but is accepted for compatibility with numpy.\n\n Returns\n -------\n Series[np.intp]\n Positions of values within the sort order with -1 indicating\n nan values.\n\n See Also\n --------\n numpy.ndarray.argsort : Returns the indices that would sort this array.\n \"\"\"\n values = self._values\n mask = isna(values)\n\n if mask.any():\n result = np.full(len(self), -1, dtype=np.intp)\n notmask = ~mask\n result[notmask] = np.argsort(values[notmask], kind=kind)\n else:\n result = np.argsort(values, kind=kind)\n\n res = self._constructor(result, index=self.index, name=self.name, dtype=np.intp)\n return res.__finalize__(self, method=\"argsort\")\n\n def nlargest(self, n=5, keep=\"first\") -> Series:\n \"\"\"\n Return the largest `n` elements.\n\n Parameters\n ----------\n n : int, default 5\n Return this many descending sorted values.\n keep : {'first', 'last', 'all'}, default 'first'\n When there are duplicate values that cannot all fit in a\n Series of `n` elements:\n\n - ``first`` : return the first `n` occurrences in order\n of appearance.\n - ``last`` : return the last `n` occurrences in reverse\n order of appearance.\n - ``all`` : keep all occurrences. This can result in a Series of\n size larger than `n`.\n\n Returns\n -------\n Series\n The `n` largest values in the Series, sorted in decreasing order.\n\n See Also\n --------\n Series.nsmallest: Get the `n` smallest elements.\n Series.sort_values: Sort Series by values.\n Series.head: Return the first `n` rows.\n\n Notes\n -----\n Faster than ``.sort_values(ascending=False).head(n)`` for small `n`\n relative to the size of the ``Series`` object.\n\n Examples\n --------\n >>> countries_population = {\"Italy\": 59000000, \"France\": 65000000,\n ... \"Malta\": 434000, \"Maldives\": 434000,\n ... \"Brunei\": 434000, \"Iceland\": 337000,\n ... \"Nauru\": 11300, \"Tuvalu\": 11300,\n ... \"Anguilla\": 11300, \"Montserrat\": 5200}\n >>> s = pd.Series(countries_population)\n >>> s\n Italy 59000000\n France 65000000\n Malta 434000\n Maldives 434000\n Brunei 434000\n Iceland 337000\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n Montserrat 5200\n dtype: int64\n\n The `n` largest elements where ``n=5`` by default.\n\n >>> s.nlargest()\n France 65000000\n Italy 59000000\n Malta 434000\n Maldives 434000\n Brunei 434000\n dtype: int64\n\n The `n` largest elements where ``n=3``. Default `keep` value is 'first'\n so Malta will be kept.\n\n >>> s.nlargest(3)\n France 65000000\n Italy 59000000\n Malta 434000\n dtype: int64\n\n The `n` largest elements where ``n=3`` and keeping the last duplicates.\n Brunei will be kept since it is the last with value 434000 based on\n the index order.\n\n >>> s.nlargest(3, keep='last')\n France 65000000\n Italy 59000000\n Brunei 434000\n dtype: int64\n\n The `n` largest elements where ``n=3`` with all duplicates kept. Note\n that the returned Series has five elements due to the three duplicates.\n\n >>> s.nlargest(3, keep='all')\n France 65000000\n Italy 59000000\n Malta 434000\n Maldives 434000\n Brunei 434000\n dtype: int64\n \"\"\"\n return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()\n\n def nsmallest(self, n: int = 5, keep: str = \"first\") -> Series:\n \"\"\"\n Return the smallest `n` elements.\n\n Parameters\n ----------\n n : int, default 5\n Return this many ascending sorted values.\n keep : {'first', 'last', 'all'}, default 'first'\n When there are duplicate values that cannot all fit in a\n Series of `n` elements:\n\n - ``first`` : return the first `n` occurrences in order\n of appearance.\n - ``last`` : return the last `n` occurrences in reverse\n order of appearance.\n - ``all`` : keep all occurrences. This can result in a Series of\n size larger than `n`.\n\n Returns\n -------\n Series\n The `n` smallest values in the Series, sorted in increasing order.\n\n See Also\n --------\n Series.nlargest: Get the `n` largest elements.\n Series.sort_values: Sort Series by values.\n Series.head: Return the first `n` rows.\n\n Notes\n -----\n Faster than ``.sort_values().head(n)`` for small `n` relative to\n the size of the ``Series`` object.\n\n Examples\n --------\n >>> countries_population = {\"Italy\": 59000000, \"France\": 65000000,\n ... \"Brunei\": 434000, \"Malta\": 434000,\n ... \"Maldives\": 434000, \"Iceland\": 337000,\n ... \"Nauru\": 11300, \"Tuvalu\": 11300,\n ... \"Anguilla\": 11300, \"Montserrat\": 5200}\n >>> s = pd.Series(countries_population)\n >>> s\n Italy 59000000\n France 65000000\n Brunei 434000\n Malta 434000\n Maldives 434000\n Iceland 337000\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n Montserrat 5200\n dtype: int64\n\n The `n` smallest elements where ``n=5`` by default.\n\n >>> s.nsmallest()\n Montserrat 5200\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n Iceland 337000\n dtype: int64\n\n The `n` smallest elements where ``n=3``. Default `keep` value is\n 'first' so Nauru and Tuvalu will be kept.\n\n >>> s.nsmallest(3)\n Montserrat 5200\n Nauru 11300\n Tuvalu 11300\n dtype: int64\n\n The `n` smallest elements where ``n=3`` and keeping the last\n duplicates. Anguilla and Tuvalu will be kept since they are the last\n with value 11300 based on the index order.\n\n >>> s.nsmallest(3, keep='last')\n Montserrat 5200\n Anguilla 11300\n Tuvalu 11300\n dtype: int64\n\n The `n` smallest elements where ``n=3`` with all duplicates kept. Note\n that the returned Series has four elements due to the three duplicates.\n\n >>> s.nsmallest(3, keep='all')\n Montserrat 5200\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n dtype: int64\n \"\"\"\n return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()\n\n @doc(\n klass=_shared_doc_kwargs[\"klass\"],\n extra_params=dedent(\n \"\"\"copy : bool, default True\n Whether to copy underlying data.\"\"\"\n ),\n examples=dedent(\n \"\"\"Examples\n --------\n >>> s = pd.Series(\n ... [\"A\", \"B\", \"A\", \"C\"],\n ... index=[\n ... [\"Final exam\", \"Final exam\", \"Coursework\", \"Coursework\"],\n ... [\"History\", \"Geography\", \"History\", \"Geography\"],\n ... [\"January\", \"February\", \"March\", \"April\"],\n ... ],\n ... )\n >>> s\n Final exam History January A\n Geography February B\n Coursework History March A\n Geography April C\n dtype: object\n\n In the following example, we will swap the levels of the indices.\n Here, we will swap the levels column-wise, but levels can be swapped row-wise\n in a similar manner. Note that column-wise is the default behaviour.\n By not supplying any arguments for i and j, we swap the last and second to\n last indices.\n\n >>> s.swaplevel()\n Final exam January History A\n February Geography B\n Coursework March History A\n April Geography C\n dtype: object\n\n By supplying one argument, we can choose which index to swap the last\n index with. We can for example swap the first index with the last one as\n follows.\n\n >>> s.swaplevel(0)\n January History Final exam A\n February Geography Final exam B\n March History Coursework A\n April Geography Coursework C\n dtype: object\n\n We can also define explicitly which indices we want to swap by supplying values\n for both i and j. Here, we for example swap the first and second indices.\n\n >>> s.swaplevel(0, 1)\n History Final exam January A\n Geography Final exam February B\n History Coursework March A\n Geography Coursework April C\n dtype: object\"\"\"\n ),\n )\n def swaplevel(self, i=-2, j=-1, copy=True) -> Series:\n \"\"\"\n Swap levels i and j in a :class:`MultiIndex`.\n\n Default is to swap the two innermost levels of the index.\n\n Parameters\n ----------\n i, j : int or str\n Levels of the indices to be swapped. Can pass level name as string.\n {extra_params}\n\n Returns\n -------\n {klass}\n {klass} with levels swapped in MultiIndex.\n\n {examples}\n \"\"\"\n assert isinstance(self.index, MultiIndex)\n new_index = self.index.swaplevel(i, j)\n return self._constructor(self._values, index=new_index, copy=copy).__finalize__(\n self, method=\"swaplevel\"\n )\n\n def reorder_levels(self, order) -> Series:\n \"\"\"\n Rearrange index levels using input order.\n\n May not drop or duplicate levels.\n\n Parameters\n ----------\n order : list of int representing new level order\n Reference level by number or key.\n\n Returns\n -------\n type of caller (new object)\n \"\"\"\n if not isinstance(self.index, MultiIndex): # pragma: no cover\n raise Exception(\"Can only reorder levels on a hierarchical axis.\")\n\n result = self.copy()\n assert isinstance(result.index, MultiIndex)\n result.index = result.index.reorder_levels(order)\n return result\n\n def explode(self, ignore_index: bool = False) -> Series:\n \"\"\"\n Transform each element of a list-like to a row.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n ignore_index : bool, default False\n If True, the resulting index will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n Series\n Exploded lists to rows; index will be duplicated for these rows.\n\n See Also\n --------\n Series.str.split : Split string values on specified separator.\n Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex\n to produce DataFrame.\n DataFrame.melt : Unpivot a DataFrame from wide format to long format.\n DataFrame.explode : Explode a DataFrame from list-like\n columns to long format.\n\n Notes\n -----\n This routine will explode list-likes including lists, tuples, sets,\n Series, and np.ndarray. The result dtype of the subset rows will\n be object. Scalars will be returned unchanged, and empty list-likes will\n result in a np.nan for that row. In addition, the ordering of elements in\n the output will be non-deterministic when exploding sets.\n\n Examples\n --------\n >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]])\n >>> s\n 0 [1, 2, 3]\n 1 foo\n 2 []\n 3 [3, 4]\n dtype: object\n\n >>> s.explode()\n 0 1\n 0 2\n 0 3\n 1 foo\n 2 NaN\n 3 3\n 3 4\n dtype: object\n \"\"\"\n if not len(self) or not is_object_dtype(self):\n result = self.copy()\n return result.reset_index(drop=True) if ignore_index else result\n\n values, counts = reshape.explode(np.asarray(self._values))\n\n if ignore_index:\n index = default_index(len(values))\n else:\n index = self.index.repeat(counts)\n\n return self._constructor(values, index=index, name=self.name)\n\n def unstack(self, level=-1, fill_value=None) -> DataFrame:\n \"\"\"\n Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.\n\n Parameters\n ----------\n level : int, str, or list of these, default last level\n Level(s) to unstack, can pass level name.\n fill_value : scalar value, default None\n Value to use when replacing NaN values.\n\n Returns\n -------\n DataFrame\n Unstacked Series.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4],\n ... index=pd.MultiIndex.from_product([['one', 'two'],\n ... ['a', 'b']]))\n >>> s\n one a 1\n b 2\n two a 3\n b 4\n dtype: int64\n\n >>> s.unstack(level=-1)\n a b\n one 1 2\n two 3 4\n\n >>> s.unstack(level=0)\n one two\n a 1 3\n b 2 4\n \"\"\"\n from pandas.core.reshape.reshape import unstack\n\n return unstack(self, level, fill_value)\n\n # ----------------------------------------------------------------------\n # function application\n\n def map(self, arg, na_action=None) -> Series:\n \"\"\"\n Map values of Series according to input correspondence.\n\n Used for substituting each value in a Series with another value,\n that may be derived from a function, a ``dict`` or\n a :class:`Series`.\n\n Parameters\n ----------\n arg : function, collections.abc.Mapping subclass or Series\n Mapping correspondence.\n na_action : {None, 'ignore'}, default None\n If 'ignore', propagate NaN values, without passing them to the\n mapping correspondence.\n\n Returns\n -------\n Series\n Same index as caller.\n\n See Also\n --------\n Series.apply : For applying more complex functions on a Series.\n DataFrame.apply : Apply a function row-/column-wise.\n DataFrame.applymap : Apply a function elementwise on a whole DataFrame.\n\n Notes\n -----\n When ``arg`` is a dictionary, values in Series that are not in the\n dictionary (as keys) are converted to ``NaN``. However, if the\n dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.\n provides a method for default values), then this default is used\n rather than ``NaN``.\n\n Examples\n --------\n >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])\n >>> s\n 0 cat\n 1 dog\n 2 NaN\n 3 rabbit\n dtype: object\n\n ``map`` accepts a ``dict`` or a ``Series``. Values that are not found\n in the ``dict`` are converted to ``NaN``, unless the dict has a default\n value (e.g. ``defaultdict``):\n\n >>> s.map({'cat': 'kitten', 'dog': 'puppy'})\n 0 kitten\n 1 puppy\n 2 NaN\n 3 NaN\n dtype: object\n\n It also accepts a function:\n\n >>> s.map('I am a {}'.format)\n 0 I am a cat\n 1 I am a dog\n 2 I am a nan\n 3 I am a rabbit\n dtype: object\n\n To avoid applying the function to missing values (and keep them as\n ``NaN``) ``na_action='ignore'`` can be used:\n\n >>> s.map('I am a {}'.format, na_action='ignore')\n 0 I am a cat\n 1 I am a dog\n 2 NaN\n 3 I am a rabbit\n dtype: object\n \"\"\"\n new_values = self._map_values(arg, na_action=na_action)\n return self._constructor(new_values, index=self.index).__finalize__(\n self, method=\"map\"\n )\n\n def _gotitem(self, key, ndim, subset=None) -> Series:\n \"\"\"\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : string / list of selections\n ndim : {1, 2}\n Requested ndim of result.\n subset : object, default None\n Subset to act on.\n \"\"\"\n return self\n\n _agg_see_also_doc = dedent(\n \"\"\"\n See Also\n --------\n Series.apply : Invoke function on a Series.\n Series.transform : Transform function producing a Series with like indexes.\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.agg('min')\n 1\n\n >>> s.agg(['min', 'max'])\n min 1\n max 4\n dtype: int64\n \"\"\"\n )\n\n @doc(\n generic._shared_docs[\"aggregate\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n see_also=_agg_see_also_doc,\n examples=_agg_examples_doc,\n )\n def aggregate(self, func=None, axis=0, *args, **kwargs):\n # Validate the axis parameter\n self._get_axis_number(axis)\n\n # if func is None, will switch to user-provided \"named aggregation\" kwargs\n if func is None:\n func = dict(kwargs.items())\n\n op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs)\n result = op.agg()\n return result\n\n agg = aggregate\n\n @doc(\n _shared_docs[\"transform\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n )\n def transform(\n self, func: AggFuncType, axis: Axis = 0, *args, **kwargs\n ) -> DataFrame | Series:\n # Validate axis argument\n self._get_axis_number(axis)\n result = SeriesApply(\n self, func=func, convert_dtype=True, args=args, kwargs=kwargs\n ).transform()\n return result\n\n def apply(\n self,\n func: AggFuncType,\n convert_dtype: bool = True,\n args: tuple[Any, ...] = (),\n **kwargs,\n ) -> DataFrame | Series:\n \"\"\"\n Invoke function on values of Series.\n\n Can be ufunc (a NumPy function that applies to the entire Series)\n or a Python function that only works on single values.\n\n Parameters\n ----------\n func : function\n Python function or NumPy ufunc to apply.\n convert_dtype : bool, default True\n Try to find better dtype for elementwise function results. If\n False, leave as dtype=object. Note that the dtype is always\n preserved for some extension array dtypes, such as Categorical.\n args : tuple\n Positional arguments passed to func after the series value.\n **kwargs\n Additional keyword arguments passed to func.\n\n Returns\n -------\n Series or DataFrame\n If func returns a Series object the result will be a DataFrame.\n\n See Also\n --------\n Series.map: For element-wise operations.\n Series.agg: Only perform aggregating type operations.\n Series.transform: Only perform transforming type operations.\n\n Notes\n -----\n Functions that mutate the passed object can produce unexpected\n behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`\n for more details.\n\n Examples\n --------\n Create a series with typical summer temperatures for each city.\n\n >>> s = pd.Series([20, 21, 12],\n ... index=['London', 'New York', 'Helsinki'])\n >>> s\n London 20\n New York 21\n Helsinki 12\n dtype: int64\n\n Square the values by defining a function and passing it as an\n argument to ``apply()``.\n\n >>> def square(x):\n ... return x ** 2\n >>> s.apply(square)\n London 400\n New York 441\n Helsinki 144\n dtype: int64\n\n Square the values by passing an anonymous function as an\n argument to ``apply()``.\n\n >>> s.apply(lambda x: x ** 2)\n London 400\n New York 441\n Helsinki 144\n dtype: int64\n\n Define a custom function that needs additional positional\n arguments and pass these additional arguments using the\n ``args`` keyword.\n\n >>> def subtract_custom_value(x, custom_value):\n ... return x - custom_value\n\n >>> s.apply(subtract_custom_value, args=(5,))\n London 15\n New York 16\n Helsinki 7\n dtype: int64\n\n Define a custom function that takes keyword arguments\n and pass these arguments to ``apply``.\n\n >>> def add_custom_values(x, **kwargs):\n ... for month in kwargs:\n ... x += kwargs[month]\n ... return x\n\n >>> s.apply(add_custom_values, june=30, july=20, august=25)\n London 95\n New York 96\n Helsinki 87\n dtype: int64\n\n Use a function from the Numpy library.\n\n >>> s.apply(np.log)\n London 2.995732\n New York 3.044522\n Helsinki 2.484907\n dtype: float64\n \"\"\"\n return SeriesApply(self, func, convert_dtype, args, kwargs).apply()\n\n def _reduce(\n self,\n op,\n name: str,\n *,\n axis=0,\n skipna=True,\n numeric_only=None,\n filter_type=None,\n **kwds,\n ):\n \"\"\"\n Perform a reduction operation.\n\n If we have an ndarray as a value, then simply perform the operation,\n otherwise delegate to the object.\n \"\"\"\n delegate = self._values\n\n if axis is not None:\n self._get_axis_number(axis)\n\n if isinstance(delegate, ExtensionArray):\n # dispatch to ExtensionArray interface\n return delegate._reduce(name, skipna=skipna, **kwds)\n\n else:\n # dispatch to numpy arrays\n if numeric_only:\n kwd_name = \"numeric_only\"\n if name in [\"any\", \"all\"]:\n kwd_name = \"bool_only\"\n raise NotImplementedError(\n f\"Series.{name} does not implement {kwd_name}.\"\n )\n with np.errstate(all=\"ignore\"):\n return op(delegate, skipna=skipna, **kwds)\n\n def _reindex_indexer(\n self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool\n ) -> Series:\n # Note: new_index is None iff indexer is None\n # if not None, indexer is np.intp\n if indexer is None:\n if copy:\n return self.copy()\n return self\n\n new_values = algorithms.take_nd(\n self._values, indexer, allow_fill=True, fill_value=None\n )\n return self._constructor(new_values, index=new_index)\n\n def _needs_reindex_multi(self, axes, method, level) -> bool:\n \"\"\"\n Check if we do need a multi reindex; this is for compat with\n higher dims.\n \"\"\"\n return False\n\n # error: Cannot determine type of 'align'\n @doc(\n NDFrame.align, # type: ignore[has-type]\n klass=_shared_doc_kwargs[\"klass\"],\n axes_single_arg=_shared_doc_kwargs[\"axes_single_arg\"],\n )\n def align(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy=True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n broadcast_axis=None,\n ):\n return super().align(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n broadcast_axis=broadcast_axis,\n )\n\n def rename(\n self,\n mapper=None,\n *,\n index=None,\n columns=None,\n axis=None,\n copy=True,\n inplace=False,\n level=None,\n errors=\"ignore\",\n ) -> Series | None:\n \"\"\"\n Alter Series index labels or name.\n\n Function / dict values must be unique (1-to-1). Labels not contained in\n a dict / Series will be left as-is. Extra labels listed don't throw an\n error.\n\n Alternatively, change ``Series.name`` with a scalar value.\n\n See the :ref:`user guide <basics.rename>` for more.\n\n Parameters\n ----------\n axis : {0 or \"index\"}\n Unused. Accepted for compatibility with DataFrame method only.\n mapper : scalar, hashable sequence, dict-like or function, optional\n Functions or dict-like are transformations to apply to\n the index.\n Scalar or hashable sequence-like will alter the ``Series.name``\n attribute.\n\n **kwargs\n Additional keyword arguments passed to the function. Only the\n \"inplace\" keyword is used.\n\n Returns\n -------\n Series or None\n Series with index labels or name altered or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.rename : Corresponding DataFrame method.\n Series.rename_axis : Set the name of the axis.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.rename(\"my_name\") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n >>> s.rename({1: 3, 2: 5}) # mapping, changes labels\n 0 1\n 3 2\n 5 3\n dtype: int64\n \"\"\"\n if axis is not None:\n # Make sure we raise if an invalid 'axis' is passed.\n axis = self._get_axis_number(axis)\n\n if index is not None and mapper is not None:\n raise TypeError(\"Cannot specify both 'mapper' and 'index'\")\n if mapper is None:\n mapper = index\n if callable(mapper) or is_dict_like(mapper):\n return super().rename(\n mapper, copy=copy, inplace=inplace, level=level, errors=errors\n )\n else:\n return self._set_name(mapper, inplace=inplace)\n\n @overload\n def set_axis(\n self, labels, axis: Axis = ..., inplace: Literal[False] = ...\n ) -> Series:\n ...\n\n @overload\n def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None:\n ...\n\n @overload\n def set_axis(self, labels, *, inplace: Literal[True]) -> None:\n ...\n\n @overload\n def set_axis(self, labels, axis: Axis = ..., inplace: bool = ...) -> Series | None:\n ...\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"labels\"])\n @Appender(\n \"\"\"\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> s.set_axis(['a', 'b', 'c'], axis=0)\n a 1\n b 2\n c 3\n dtype: int64\n \"\"\"\n )\n @Substitution(\n **_shared_doc_kwargs,\n extended_summary_sub=\"\",\n axis_description_sub=\"\",\n see_also_sub=\"\",\n )\n @Appender(generic.NDFrame.set_axis.__doc__)\n def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):\n return super().set_axis(labels, axis=axis, inplace=inplace)\n\n # error: Cannot determine type of 'reindex'\n @doc(\n NDFrame.reindex, # type: ignore[has-type]\n klass=_shared_doc_kwargs[\"klass\"],\n axes=_shared_doc_kwargs[\"axes\"],\n optional_labels=_shared_doc_kwargs[\"optional_labels\"],\n optional_axis=_shared_doc_kwargs[\"optional_axis\"],\n )\n def reindex(self, *args, **kwargs) -> Series:\n if len(args) > 1:\n raise TypeError(\"Only one positional argument ('index') is allowed\")\n if args:\n (index,) = args\n if \"index\" in kwargs:\n raise TypeError(\n \"'index' passed as both positional and keyword argument\"\n )\n kwargs.update({\"index\": index})\n return super().reindex(**kwargs)\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"labels\"])\n def drop(\n self,\n labels=None,\n axis=0,\n index=None,\n columns=None,\n level=None,\n inplace=False,\n errors=\"raise\",\n ) -> Series:\n \"\"\"\n Return Series with specified index labels removed.\n\n Remove elements of a Series based on specifying the index labels.\n When using a multi-index, labels on different levels can be removed\n by specifying the level.\n\n Parameters\n ----------\n labels : single label or list-like\n Index labels to drop.\n axis : 0, default 0\n Redundant for application on Series.\n index : single label or list-like\n Redundant for application on Series, but 'index' can be used instead\n of 'labels'.\n columns : single label or list-like\n No change is made to the Series; use 'index' or 'labels' instead.\n level : int or level name, optional\n For MultiIndex, level for which the labels will be removed.\n inplace : bool, default False\n If True, do operation inplace and return None.\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and only existing labels are dropped.\n\n Returns\n -------\n Series or None\n Series with specified index labels removed or None if ``inplace=True``.\n\n Raises\n ------\n KeyError\n If none of the labels are found in the index.\n\n See Also\n --------\n Series.reindex : Return only specified index labels of Series.\n Series.dropna : Return series without null values.\n Series.drop_duplicates : Return Series with duplicate values removed.\n DataFrame.drop : Drop specified labels from rows or columns.\n\n Examples\n --------\n >>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C'])\n >>> s\n A 0\n B 1\n C 2\n dtype: int64\n\n Drop labels B en C\n\n >>> s.drop(labels=['B', 'C'])\n A 0\n dtype: int64\n\n Drop 2nd level label in MultiIndex Series\n\n >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],\n ... index=midx)\n >>> s\n lama speed 45.0\n weight 200.0\n length 1.2\n cow speed 30.0\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n >>> s.drop(labels='weight', level=1)\n lama speed 45.0\n length 1.2\n cow speed 30.0\n length 1.5\n falcon speed 320.0\n length 0.3\n dtype: float64\n \"\"\"\n return super().drop(\n labels=labels,\n axis=axis,\n index=index,\n columns=columns,\n level=level,\n inplace=inplace,\n errors=errors,\n )\n\n @overload\n def fillna(\n self,\n value=...,\n method: FillnaOptions | None = ...,\n axis: Axis | None = ...,\n inplace: Literal[False] = ...,\n limit=...,\n downcast=...,\n ) -> Series:\n ...\n\n @overload\n def fillna(\n self,\n value,\n method: FillnaOptions | None,\n axis: Axis | None,\n inplace: Literal[True],\n limit=...,\n downcast=...,\n ) -> None:\n ...\n\n @overload\n def fillna(\n self,\n *,\n inplace: Literal[True],\n limit=...,\n downcast=...,\n ) -> None:\n ...\n\n @overload\n def fillna(\n self,\n value,\n *,\n inplace: Literal[True],\n limit=...,\n downcast=...,\n ) -> None:\n ...\n\n @overload\n def fillna(\n self,\n *,\n method: FillnaOptions | None,\n inplace: Literal[True],\n limit=...,\n downcast=...,\n ) -> None:\n ...\n\n @overload\n def fillna(\n self,\n *,\n axis: Axis | None,\n inplace: Literal[True],\n limit=...,\n downcast=...,\n ) -> None:\n ...\n\n @overload\n def fillna(\n self,\n *,\n method: FillnaOptions | None,\n axis: Axis | None,\n inplace: Literal[True],\n limit=...,\n downcast=...,\n ) -> None:\n ...\n\n @overload\n def fillna(\n self,\n value,\n *,\n axis: Axis | None,\n inplace: Literal[True],\n limit=...,\n downcast=...,\n ) -> None:\n ...\n\n @overload\n def fillna(\n self,\n value,\n method: FillnaOptions | None,\n *,\n inplace: Literal[True],\n limit=...,\n downcast=...,\n ) -> None:\n ...\n\n @overload\n def fillna(\n self,\n value=...,\n method: FillnaOptions | None = ...,\n axis: Axis | None = ...,\n inplace: bool = ...,\n limit=...,\n downcast=...,\n ) -> Series | None:\n ...\n\n # error: Cannot determine type of 'fillna'\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"value\"])\n @doc(NDFrame.fillna, **_shared_doc_kwargs) # type: ignore[has-type]\n def fillna(\n self,\n value: object | ArrayLike | None = None,\n method: FillnaOptions | None = None,\n axis=None,\n inplace=False,\n limit=None,\n downcast=None,\n ) -> Series | None:\n return super().fillna(\n value=value,\n method=method,\n axis=axis,\n inplace=inplace,\n limit=limit,\n downcast=downcast,\n )\n\n def pop(self, item: Hashable) -> Any:\n \"\"\"\n Return item and drops from series. Raise KeyError if not found.\n\n Parameters\n ----------\n item : label\n Index of the element that needs to be removed.\n\n Returns\n -------\n Value that is popped from series.\n\n Examples\n --------\n >>> ser = pd.Series([1,2,3])\n\n >>> ser.pop(0)\n 1\n\n >>> ser\n 1 2\n 2 3\n dtype: int64\n \"\"\"\n return super().pop(item=item)\n\n # error: Cannot determine type of 'replace'\n @doc(\n NDFrame.replace, # type: ignore[has-type]\n klass=_shared_doc_kwargs[\"klass\"],\n inplace=_shared_doc_kwargs[\"inplace\"],\n replace_iloc=_shared_doc_kwargs[\"replace_iloc\"],\n )\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace=False,\n limit=None,\n regex=False,\n method=\"pad\",\n ):\n return super().replace(\n to_replace=to_replace,\n value=value,\n inplace=inplace,\n limit=limit,\n regex=regex,\n method=method,\n )\n\n @doc(INFO_DOCSTRING, **series_sub_kwargs)\n def info(\n self,\n verbose: bool | None = None,\n buf: IO[str] | None = None,\n max_cols: int | None = None,\n memory_usage: bool | str | None = None,\n show_counts: bool = True,\n ) -> None:\n return SeriesInfo(self, memory_usage).render(\n buf=buf,\n max_cols=max_cols,\n verbose=verbose,\n show_counts=show_counts,\n )\n\n def _replace_single(self, to_replace, method: str, inplace: bool, limit):\n \"\"\"\n Replaces values in a Series using the fill method specified when no\n replacement value is given in the replace method\n \"\"\"\n\n result = self if inplace else self.copy()\n\n values = result._values\n mask = missing.mask_missing(values, to_replace)\n\n if isinstance(values, ExtensionArray):\n # dispatch to the EA's _pad_mask_inplace method\n values._fill_mask_inplace(method, limit, mask)\n else:\n fill_f = missing.get_fill_func(method)\n values, _ = fill_f(values, limit=limit, mask=mask)\n\n if inplace:\n return\n return result\n\n # error: Cannot determine type of 'shift'\n @doc(NDFrame.shift, klass=_shared_doc_kwargs[\"klass\"]) # type: ignore[has-type]\n def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> Series:\n return super().shift(\n periods=periods, freq=freq, axis=axis, fill_value=fill_value\n )\n\n def memory_usage(self, index: bool = True, deep: bool = False) -> int:\n \"\"\"\n Return the memory usage of the Series.\n\n The memory usage can optionally include the contribution of\n the index and of elements of `object` dtype.\n\n Parameters\n ----------\n index : bool, default True\n Specifies whether to include the memory usage of the Series index.\n deep : bool, default False\n If True, introspect the data deeply by interrogating\n `object` dtypes for system-level memory consumption, and include\n it in the returned value.\n\n Returns\n -------\n int\n Bytes of memory consumed.\n\n See Also\n --------\n numpy.ndarray.nbytes : Total bytes consumed by the elements of the\n array.\n DataFrame.memory_usage : Bytes consumed by a DataFrame.\n\n Examples\n --------\n >>> s = pd.Series(range(3))\n >>> s.memory_usage()\n 152\n\n Not including the index gives the size of the rest of the data, which\n is necessarily smaller:\n\n >>> s.memory_usage(index=False)\n 24\n\n The memory footprint of `object` values is ignored by default:\n\n >>> s = pd.Series([\"a\", \"b\"])\n >>> s.values\n array(['a', 'b'], dtype=object)\n >>> s.memory_usage()\n 144\n >>> s.memory_usage(deep=True)\n 244\n \"\"\"\n v = self._memory_usage(deep=deep)\n if index:\n v += self.index.memory_usage(deep=deep)\n return v\n\n def isin(self, values) -> Series:\n \"\"\"\n Whether elements in Series are contained in `values`.\n\n Return a boolean Series showing whether each element in the Series\n matches an element in the passed sequence of `values` exactly.\n\n Parameters\n ----------\n values : set or list-like\n The sequence of values to test. Passing in a single string will\n raise a ``TypeError``. Instead, turn a single string into a\n list of one element.\n\n Returns\n -------\n Series\n Series of booleans indicating if each element is in values.\n\n Raises\n ------\n TypeError\n * If `values` is a string\n\n See Also\n --------\n DataFrame.isin : Equivalent method on DataFrame.\n\n Examples\n --------\n >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama',\n ... 'hippo'], name='animal')\n >>> s.isin(['cow', 'lama'])\n 0 True\n 1 True\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n To invert the boolean values, use the ``~`` operator:\n\n >>> ~s.isin(['cow', 'lama'])\n 0 False\n 1 False\n 2 False\n 3 True\n 4 False\n 5 True\n Name: animal, dtype: bool\n\n Passing a single string as ``s.isin('lama')`` will raise an error. Use\n a list of one element instead:\n\n >>> s.isin(['lama'])\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n Strings and integers are distinct and are therefore not comparable:\n\n >>> pd.Series([1]).isin(['1'])\n 0 False\n dtype: bool\n >>> pd.Series([1.1]).isin(['1.1'])\n 0 False\n dtype: bool\n \"\"\"\n result = algorithms.isin(self._values, values)\n return self._constructor(result, index=self.index).__finalize__(\n self, method=\"isin\"\n )\n\n def between(self, left, right, inclusive=\"both\") -> Series:\n \"\"\"\n Return boolean Series equivalent to left <= series <= right.\n\n This function returns a boolean vector containing `True` wherever the\n corresponding Series element is between the boundary values `left` and\n `right`. NA values are treated as `False`.\n\n Parameters\n ----------\n left : scalar or list-like\n Left boundary.\n right : scalar or list-like\n Right boundary.\n inclusive : {\"both\", \"neither\", \"left\", \"right\"}\n Include boundaries. Whether to set each bound as closed or open.\n\n .. versionchanged:: 1.3.0\n\n Returns\n -------\n Series\n Series representing whether each element is between left and\n right (inclusive).\n\n See Also\n --------\n Series.gt : Greater than of series and other.\n Series.lt : Less than of series and other.\n\n Notes\n -----\n This function is equivalent to ``(left <= ser) & (ser <= right)``\n\n Examples\n --------\n >>> s = pd.Series([2, 0, 4, 8, np.nan])\n\n Boundary values are included by default:\n\n >>> s.between(1, 4)\n 0 True\n 1 False\n 2 True\n 3 False\n 4 False\n dtype: bool\n\n With `inclusive` set to ``\"neither\"`` boundary values are excluded:\n\n >>> s.between(1, 4, inclusive=\"neither\")\n 0 True\n 1 False\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n `left` and `right` can be any scalar value:\n\n >>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])\n >>> s.between('Anna', 'Daniel')\n 0 False\n 1 True\n 2 True\n 3 False\n dtype: bool\n \"\"\"\n if inclusive is True or inclusive is False:\n warnings.warn(\n \"Boolean inputs to the `inclusive` argument are deprecated in \"\n \"favour of `both` or `neither`.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n if inclusive:\n inclusive = \"both\"\n else:\n inclusive = \"neither\"\n if inclusive == \"both\":\n lmask = self >= left\n rmask = self <= right\n elif inclusive == \"left\":\n lmask = self >= left\n rmask = self < right\n elif inclusive == \"right\":\n lmask = self > left\n rmask = self <= right\n elif inclusive == \"neither\":\n lmask = self > left\n rmask = self < right\n else:\n raise ValueError(\n \"Inclusive has to be either string of 'both',\"\n \"'left', 'right', or 'neither'.\"\n )\n\n return lmask & rmask\n\n # ----------------------------------------------------------------------\n # Convert to types that support pd.NA\n\n def _convert_dtypes(\n self,\n infer_objects: bool = True,\n convert_string: bool = True,\n convert_integer: bool = True,\n convert_boolean: bool = True,\n convert_floating: bool = True,\n ) -> Series:\n input_series = self\n if infer_objects:\n input_series = input_series.infer_objects()\n if is_object_dtype(input_series):\n input_series = input_series.copy()\n\n if convert_string or convert_integer or convert_boolean or convert_floating:\n inferred_dtype = convert_dtypes(\n input_series._values,\n convert_string,\n convert_integer,\n convert_boolean,\n convert_floating,\n )\n result = input_series.astype(inferred_dtype)\n else:\n result = input_series.copy()\n return result\n\n # error: Cannot determine type of 'isna'\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"]) # type: ignore[has-type]\n def isna(self) -> Series:\n return generic.NDFrame.isna(self)\n\n # error: Cannot determine type of 'isna'\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"]) # type: ignore[has-type]\n def isnull(self) -> Series:\n return super().isnull()\n\n # error: Cannot determine type of 'notna'\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"]) # type: ignore[has-type]\n def notna(self) -> Series:\n return super().notna()\n\n # error: Cannot determine type of 'notna'\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"]) # type: ignore[has-type]\n def notnull(self) -> Series:\n return super().notnull()\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\"])\n def dropna(self, axis=0, inplace=False, how=None):\n \"\"\"\n Return a new Series with missing values removed.\n\n See the :ref:`User Guide <missing_data>` for more on which values are\n considered missing, and how to work with missing data.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n There is only one axis to drop values from.\n inplace : bool, default False\n If True, do operation inplace and return None.\n how : str, optional\n Not in use. Kept for compatibility.\n\n Returns\n -------\n Series or None\n Series with NA entries dropped from it or None if ``inplace=True``.\n\n See Also\n --------\n Series.isna: Indicate missing values.\n Series.notna : Indicate existing (non-missing) values.\n Series.fillna : Replace missing values.\n DataFrame.dropna : Drop rows or columns which contain NA values.\n Index.dropna : Drop missing indices.\n\n Examples\n --------\n >>> ser = pd.Series([1., 2., np.nan])\n >>> ser\n 0 1.0\n 1 2.0\n 2 NaN\n dtype: float64\n\n Drop NA values from a Series.\n\n >>> ser.dropna()\n 0 1.0\n 1 2.0\n dtype: float64\n\n Keep the Series with valid entries in the same variable.\n\n >>> ser.dropna(inplace=True)\n >>> ser\n 0 1.0\n 1 2.0\n dtype: float64\n\n Empty strings are not considered NA values. ``None`` is considered an\n NA value.\n\n >>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay'])\n >>> ser\n 0 NaN\n 1 2\n 2 NaT\n 3\n 4 None\n 5 I stay\n dtype: object\n >>> ser.dropna()\n 1 2\n 3\n 5 I stay\n dtype: object\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n # Validate the axis parameter\n self._get_axis_number(axis or 0)\n\n if self._can_hold_na:\n result = remove_na_arraylike(self)\n if inplace:\n self._update_inplace(result)\n else:\n return result\n else:\n if inplace:\n # do nothing\n pass\n else:\n return self.copy()\n\n # ----------------------------------------------------------------------\n # Time series-oriented methods\n\n # error: Cannot determine type of 'asfreq'\n @doc(NDFrame.asfreq, **_shared_doc_kwargs) # type: ignore[has-type]\n def asfreq(\n self,\n freq,\n method=None,\n how: str | None = None,\n normalize: bool = False,\n fill_value=None,\n ) -> Series:\n return super().asfreq(\n freq=freq,\n method=method,\n how=how,\n normalize=normalize,\n fill_value=fill_value,\n )\n\n # error: Cannot determine type of 'resample'\n @doc(NDFrame.resample, **_shared_doc_kwargs) # type: ignore[has-type]\n def resample(\n self,\n rule,\n axis=0,\n closed: str | None = None,\n label: str | None = None,\n convention: str = \"start\",\n kind: str | None = None,\n loffset=None,\n base: int | None = None,\n on=None,\n level=None,\n origin: str | TimestampConvertibleTypes = \"start_day\",\n offset: TimedeltaConvertibleTypes | None = None,\n ) -> Resampler:\n return super().resample(\n rule=rule,\n axis=axis,\n closed=closed,\n label=label,\n convention=convention,\n kind=kind,\n loffset=loffset,\n base=base,\n on=on,\n level=level,\n origin=origin,\n offset=offset,\n )\n\n def to_timestamp(self, freq=None, how=\"start\", copy=True) -> Series:\n \"\"\"\n Cast to DatetimeIndex of Timestamps, at *beginning* of period.\n\n Parameters\n ----------\n freq : str, default frequency of PeriodIndex\n Desired frequency.\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of period\n vs. end.\n copy : bool, default True\n Whether or not to return a copy.\n\n Returns\n -------\n Series with DatetimeIndex\n \"\"\"\n new_values = self._values\n if copy:\n new_values = new_values.copy()\n\n if not isinstance(self.index, PeriodIndex):\n raise TypeError(f\"unsupported Type {type(self.index).__name__}\")\n new_index = self.index.to_timestamp(freq=freq, how=how)\n return self._constructor(new_values, index=new_index).__finalize__(\n self, method=\"to_timestamp\"\n )\n\n def to_period(self, freq=None, copy=True) -> Series:\n \"\"\"\n Convert Series from DatetimeIndex to PeriodIndex.\n\n Parameters\n ----------\n freq : str, default None\n Frequency associated with the PeriodIndex.\n copy : bool, default True\n Whether or not to return a copy.\n\n Returns\n -------\n Series\n Series with index converted to PeriodIndex.\n \"\"\"\n new_values = self._values\n if copy:\n new_values = new_values.copy()\n\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(f\"unsupported Type {type(self.index).__name__}\")\n new_index = self.index.to_period(freq=freq)\n return self._constructor(new_values, index=new_index).__finalize__(\n self, method=\"to_period\"\n )\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\"])\n def ffill(\n self: Series,\n axis: None | Axis = None,\n inplace: bool = False,\n limit: None | int = None,\n downcast=None,\n ) -> Series | None:\n return super().ffill(axis, inplace, limit, downcast)\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\"])\n def bfill(\n self: Series,\n axis: None | Axis = None,\n inplace: bool = False,\n limit: None | int = None,\n downcast=None,\n ) -> Series | None:\n return super().bfill(axis, inplace, limit, downcast)\n\n @deprecate_nonkeyword_arguments(\n version=None, allowed_args=[\"self\", \"lower\", \"upper\"]\n )\n def clip(\n self: Series,\n lower=None,\n upper=None,\n axis: Axis | None = None,\n inplace: bool = False,\n *args,\n **kwargs,\n ) -> Series | None:\n return super().clip(lower, upper, axis, inplace, *args, **kwargs)\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"method\"])\n def interpolate(\n self: Series,\n method: str = \"linear\",\n axis: Axis = 0,\n limit: int | None = None,\n inplace: bool = False,\n limit_direction: str | None = None,\n limit_area: str | None = None,\n downcast: str | None = None,\n **kwargs,\n ) -> Series | None:\n return super().interpolate(\n method,\n axis,\n limit,\n inplace,\n limit_direction,\n limit_area,\n downcast,\n **kwargs,\n )\n\n @deprecate_nonkeyword_arguments(\n version=None, allowed_args=[\"self\", \"cond\", \"other\"]\n )\n def where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=lib.no_default,\n try_cast=lib.no_default,\n ):\n return super().where(cond, other, inplace, axis, level, errors, try_cast)\n\n @deprecate_nonkeyword_arguments(\n version=None, allowed_args=[\"self\", \"cond\", \"other\"]\n )\n def mask(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=lib.no_default,\n try_cast=lib.no_default,\n ):\n return super().mask(cond, other, inplace, axis, level, errors, try_cast)\n\n # ----------------------------------------------------------------------\n # Add index\n _AXIS_ORDERS = [\"index\"]\n _AXIS_LEN = len(_AXIS_ORDERS)\n _info_axis_number = 0\n _info_axis_name = \"index\"\n\n index: Index = properties.AxisProperty(\n axis=0, doc=\"The index (axis labels) of the Series.\"\n )\n\n # ----------------------------------------------------------------------\n # Accessor Methods\n # ----------------------------------------------------------------------\n str = CachedAccessor(\"str\", StringMethods)\n dt = CachedAccessor(\"dt\", CombinedDatetimelikeProperties)\n cat = CachedAccessor(\"cat\", CategoricalAccessor)\n plot = CachedAccessor(\"plot\", pandas.plotting.PlotAccessor)\n sparse = CachedAccessor(\"sparse\", SparseAccessor)\n\n # ----------------------------------------------------------------------\n # Add plotting methods to Series\n hist = pandas.plotting.hist_series\n\n # ----------------------------------------------------------------------\n # Template-Based Arithmetic/Comparison Methods\n\n def _cmp_method(self, other, op):\n res_name = ops.get_op_result_name(self, other)\n\n if isinstance(other, Series) and not self._indexed_same(other):\n raise ValueError(\"Can only compare identically-labeled Series objects\")\n\n lvalues = self._values\n rvalues = extract_array(other, extract_numpy=True, extract_range=True)\n\n with np.errstate(all=\"ignore\"):\n res_values = ops.comparison_op(lvalues, rvalues, op)\n\n return self._construct_result(res_values, name=res_name)\n\n def _logical_method(self, other, op):\n res_name = ops.get_op_result_name(self, other)\n self, other = ops.align_method_SERIES(self, other, align_asobject=True)\n\n lvalues = self._values\n rvalues = extract_array(other, extract_numpy=True, extract_range=True)\n\n res_values = ops.logical_op(lvalues, rvalues, op)\n return self._construct_result(res_values, name=res_name)\n\n def _arith_method(self, other, op):\n self, other = ops.align_method_SERIES(self, other)\n return base.IndexOpsMixin._arith_method(self, other, op)\n\n\nSeries._add_numeric_operations()\n\n# Add arithmetic!\nops.add_flex_arithmetic_methods(Series)\n" ]
[ [ "pandas.core.ops.logical_op", "pandas.core.nanops.nancov", "pandas.core.dtypes.cast.maybe_cast_pointwise_result", "pandas.util._validators.validate_bool_kwarg", "pandas.core.ops.align_method_SERIES", "pandas.core.dtypes.inference.is_hashable", "pandas.util._decorators.deprecate_nonkeyword_arguments", "pandas.core.common.standardize_mapping", "pandas.core.construction.is_empty_data", "pandas.core.indexes.api.default_index", "pandas.core.dtypes.common.is_iterator", "pandas.core.dtypes.common.is_list_like", "pandas.core.common.require_length_match", "pandas.core.apply.SeriesApply", "pandas.core.base.IndexOpsMixin._arith_method", "numpy.array", "pandas.core.ops.comparison_op", "pandas.core.ops.fill_binop", "pandas.core.groupby.generic.SeriesGroupBy", "pandas.core.internals.SingleArrayManager.from_array", "pandas.core.dtypes.missing.na_value_for_dtype", "pandas.core.dtypes.missing.isna", "pandas.compat.numpy.function.validate_repeat", "pandas.core.common.maybe_iterable_to_list", "pandas.core.generic.NDFrame.__init__", "pandas.core.sorting.ensure_key_mapped", "pandas.core.dtypes.cast.maybe_box_native", "pandas.io.formats.format.SeriesFormatter", "numpy.asarray", "pandas.compat.numpy.function.validate_round", "pandas._config.get_option", "pandas.core.indexers.unpack_1tuple", "pandas.core.generic.NDFrame.isna", "pandas.core.internals.SingleBlockManager.from_array", "pandas.compat.numpy.function.validate_take", "pandas.core.common.asarray_tuplesafe", "pandas.core.nanops.nancorr", "pandas.core.common.any_none", "pandas.core.algorithms.take_nd", "pandas.core.construction.create_series_with_explicit_dtype", "pandas.core.algorithms.SelectNSeries", "pandas.core.indexes.api.Index", "numpy.errstate", "pandas.core.dtypes.cast.convert_dtypes", "pandas.core.algorithms.mode", "pandas.core.dtypes.common.is_integer", "pandas._libs.lib.infer_dtype", "pandas.util._decorators.doc", "pandas.core.indexes.api.DatetimeIndex", "pandas.core.dtypes.missing.notna", "pandas.core.indexes.api.Float64Index", "pandas.io.formats.info.SeriesInfo", "pandas.util._decorators.Substitution", "pandas.core.dtypes.common.is_dict_like", "pandas.util._decorators.Appender", "pandas.io.formats.format.get_series_repr_params", "pandas.core.dtypes.cast.validate_numeric_casting", "pandas.core.dtypes.common.pandas_dtype", "numpy.transpose", "pandas.core.missing.mask_missing", "numpy.argsort", "pandas._libs.properties.AxisProperty", "pandas.core.missing.get_fill_func", "pandas.core.ops.add_flex_arithmetic_methods", "pandas.core.algorithms.isin", "pandas.core.common.is_bool_indexer", "pandas.core.algorithms.diff", "pandas.core.dtypes.common.is_scalar", "pandas.util._validators.validate_ascending", "pandas.core.accessor.CachedAccessor", "pandas.core.indexes.api.ensure_index", "pandas.util._validators.validate_percentile", "pandas.core.base.IndexOpsMixin.searchsorted", "numpy.dot", "pandas.core.dtypes.missing.remove_na_arraylike", "pandas.core.indexing.check_bool_indexer", "pandas.core.ops.get_op_result_name", "pandas.core.dtypes.common.ensure_platform_int", "pandas.core.reshape.concat.concat", "pandas.core.tools.datetimes.to_datetime", "pandas.core.reshape.reshape.unstack", "pandas.util._exceptions.find_stack_level", "pandas.core.construction.sanitize_array", "pandas.core.dtypes.common.is_object_dtype", "pandas.core.common.apply_if_callable", "pandas._libs.lib.maybe_convert_objects", "pandas.core.construction.extract_array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.0", "1.3" ], "scipy": [], "tensorflow": [] } ]
jameschapman19/cca_zoo
[ "45c38f0164a324e8fcc33a480814842e747d86c3", "45c38f0164a324e8fcc33a480814842e747d86c3" ]
[ "cca_zoo/models/cca_base.py", "cca_zoo/models/innerloop.py" ]
[ "import itertools\nfrom abc import abstractmethod\nfrom typing import Union, Iterable\n\nimport numpy as np\nfrom scipy.sparse import issparse\nfrom sklearn.base import BaseEstimator, MultiOutputMixin, RegressorMixin\nfrom sklearn.utils.sparsefuncs import mean_variance_axis\nfrom sklearn.utils.validation import check_random_state, check_is_fitted\n\nfrom cca_zoo.utils import check_views, plot_latent_train_test\n\n\nclass _CCA_Base(BaseEstimator, MultiOutputMixin, RegressorMixin):\n \"\"\"\n A class used as the base for methods in the package. Allows methods to inherit fit_transform, predict_corr,\n and gridsearch_fit when only fit (and transform where it is different to the default) is provided.\n\n Attributes\n ----------\n weights : list of weights for each view\n\n \"\"\"\n\n def __init__(\n self,\n latent_dims: int = 1,\n scale=True,\n centre=True,\n copy_data=True,\n accept_sparse=False,\n random_state: Union[int, np.random.RandomState] = None,\n ):\n \"\"\"\n Constructor for _CCA_Base\n\n :param latent_dims: number of latent dimensions to fit\n :param scale: normalize variance in each column before fitting\n :param centre: demean data by column before fitting (and before transforming out of sample\n :param copy_data: If True, X will be copied; else, it may be overwritten\n :param accept_sparse: Whether model can take sparse data as input\n :param random_state: Pass for reproducible output across multiple function calls\n \"\"\"\n self.latent_dims = latent_dims\n self.scale = scale\n self.centre = centre\n self.copy_data = copy_data\n self.accept_sparse = accept_sparse\n self.random_state = check_random_state(random_state)\n self.n_views = None\n\n @abstractmethod\n def fit(self, views: Iterable[np.ndarray], y=None, **kwargs):\n \"\"\"\n Fits a given model\n\n :param views: list/tuple of numpy arrays or array likes with the same number of rows (samples)\n \"\"\"\n raise NotImplementedError\n\n def transform(self, views: Iterable[np.ndarray], y=None, **kwargs):\n \"\"\"\n Transforms data given a fit model\n\n :param views: numpy arrays with the same number of rows (samples) separated by commas\n :param kwargs: any additional keyword arguments required by the given model\n \"\"\"\n check_is_fitted(self, attributes=[\"weights\"])\n views = check_views(\n *views, copy=self.copy_data, accept_sparse=self.accept_sparse\n )\n views = self._centre_scale_transform(views)\n transformed_views = []\n for i, (view) in enumerate(views):\n transformed_view = view @ self.weights[i]\n transformed_views.append(transformed_view)\n return transformed_views\n\n def fit_transform(self, views: Iterable[np.ndarray], y=None, **kwargs):\n \"\"\"\n Fits and then transforms the training data\n\n :param views: list/tuple of numpy arrays or array likes with the same number of rows (samples)\n :param kwargs: any additional keyword arguments required by the given model\n \"\"\"\n return self.fit(views, **kwargs).transform(views)\n\n def get_loadings(self, views: Iterable[np.ndarray], y=None, **kwargs):\n \"\"\"\n Returns the model loadings for each view for the given data\n\n :param views: list/tuple of numpy arrays or array likes with the same number of rows (samples)\n :param kwargs: any additional keyword arguments required by the given model\n \"\"\"\n transformed_views = self.transform(views, **kwargs)\n views = self._centre_scale_transform(views)\n loadings = [\n view.T @ transformed_view\n for view, transformed_view in zip(views, transformed_views)\n ]\n return loadings\n\n def correlations(self, views: Iterable[np.ndarray], y=None, **kwargs):\n \"\"\"\n Predicts the correlation for the given data using the fit model\n\n :param views: list/tuple of numpy arrays or array likes with the same number of rows (samples)\n :param kwargs: any additional keyword arguments required by the given model\n :return: all_corrs: an array of the pairwise correlations (k,k,self.latent_dims) where k is the number of views\n :rtype: np.ndarray\n \"\"\"\n transformed_views = self.transform(views, **kwargs)\n all_corrs = []\n for x, y in itertools.product(transformed_views, repeat=2):\n all_corrs.append(\n np.diag(np.corrcoef(x.T, y.T)[: self.latent_dims, self.latent_dims:])\n )\n all_corrs = np.array(all_corrs).reshape(\n (len(views), len(views), self.latent_dims)\n )\n return all_corrs\n\n def plot_latent(\n self,\n views: Iterable[np.ndarray],\n test_views: Iterable[np.ndarray] = None,\n title=\"\",\n ):\n scores = self.transform(views)\n if test_views is not None:\n test_scores = self.transform(test_views)\n else:\n test_scores = None\n plot_latent_train_test(scores, test_scores, title=title)\n\n def score(self, views: Iterable[np.ndarray], y=None, **kwargs):\n # by default return the average pairwise correlation in each dimension (for 2 views just the correlation)\n pair_corrs = self.correlations(views, **kwargs)\n # n views\n n_views = pair_corrs.shape[0]\n # sum all the pairwise correlations for each dimension. Subtract the self correlations. Divide by the number of views. Gives average correlation\n dim_corrs = (\n pair_corrs.sum(axis=tuple(range(pair_corrs.ndim - 1))) - n_views\n ) / (n_views ** 2 - n_views)\n return dim_corrs\n\n def _centre_scale(self, views: Iterable[np.ndarray]):\n \"\"\"\n Removes the mean of the training data and standardizes for each view and stores mean and standard deviation during training\n\n :param views: list/tuple of numpy arrays or array likes with the same number of rows (samples)\n :return: train_views: the demeaned numpy arrays to be used to fit the model\n \"\"\"\n\n self.view_means = []\n self.view_stds = []\n transformed_views = []\n for view in views:\n if issparse(view):\n view_mean, view_std = mean_variance_axis(view, axis=0)\n self.view_means.append(view_mean)\n self.view_stds.append(view_std)\n view = view - self.view_means[-1]\n view = view / self.view_stds[-1]\n else:\n if self.centre:\n view_mean = view.mean(axis=0)\n self.view_means.append(view_mean)\n view = view - self.view_means[-1]\n if self.scale:\n view_std = view.std(axis=0, ddof=1)\n view_std[view_std == 0.0] = 1.0\n self.view_stds.append(view_std)\n view = view / self.view_stds[-1]\n transformed_views.append(view)\n return transformed_views\n\n def _centre_scale_transform(self, views: Iterable[np.ndarray]):\n \"\"\"\n Removes the mean and standardizes each view based on the mean and standard deviation of the training data\n\n :param views: list/tuple of numpy arrays or array likes with the same number of rows (samples)\n \"\"\"\n if self.centre:\n views = [view - mean for view, mean in zip(views, self.view_means)]\n if self.scale:\n views = [view / std for view, std in zip(views, self.view_stds)]\n return views\n", "import warnings\nfrom abc import abstractmethod\nfrom itertools import combinations\n\nimport numpy as np\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.linear_model import (\n ElasticNet,\n Lasso,\n LinearRegression,\n Ridge,\n SGDRegressor,\n)\nfrom sklearn.utils._testing import ignore_warnings\nfrom sklearn.utils.validation import check_random_state\n\nfrom ..utils.check_values import (\n _check_converged_weights,\n _check_Parikh2014,\n _process_parameter,\n)\n\n\nclass _InnerLoop:\n def __init__(\n self,\n max_iter: int = 100,\n tol: float = 1e-5,\n generalized: bool = False,\n initialization: str = \"unregularized\",\n random_state=None,\n ):\n \"\"\"\n :param max_iter: maximum number of iterations to perform if tol is not reached\n :param tol: tolerance value used for stopping criteria\n :param generalized: use an auxiliary variable to\n :param initialization: initialise the optimisation with either the 'unregularized' (CCA/PLS) solution, or a 'random' initialisation\n \"\"\"\n self.generalized = generalized\n self.initialization = initialization\n self.max_iter = max_iter\n self.tol = tol\n self.random_state = check_random_state(random_state)\n\n def _check_params(self):\n \"\"\"\n Put any parameter checks using exceptions inside this function.\n \"\"\"\n pass\n\n def _initialize(self):\n if self.initialization == \"random\":\n self.scores = np.array(\n [self.random_state.randn(view.shape[0], 1) for view in self.views]\n )\n elif self.initialization == \"uniform\":\n self.scores = np.array([np.ones((view.shape[0], 1)) for view in self.views])\n elif self.initialization == \"unregularized\":\n self.scores = (\n PLSInnerLoop(\n initialization=\"random\",\n random_state=self.random_state,\n tol=self.tol,\n )\n ._fit(*self.views)\n .scores\n )\n else:\n raise ValueError(\"initialize must be random, uniform or unregularized\")\n self.scores = (\n self.scores\n * np.sqrt(self.n - 1)\n / np.linalg.norm(self.scores, axis=1)[:, np.newaxis]\n )\n self.weights = [\n self.random_state.randn(view.shape[1], 1) for view in self.views\n ]\n\n def _fit(self, *views: np.ndarray):\n self.views = views\n self.n = views[0].shape[0]\n if len(self.views) > 2:\n self.generalized = True\n warnings.warn(\"For more than 2 views require generalized=True\")\n\n # Check that the parameters that have been passed are valid for these views given #views and #features\n self._check_params()\n self._initialize()\n\n self.track = {}\n # Iterate until convergence\n self.track[\"objective\"] = []\n for _ in range(self.max_iter):\n self._inner_iteration()\n self.track[\"objective\"].append(self._objective())\n if _ > 0 and self._early_stop():\n break\n self.old_scores = self.scores.copy()\n return self\n\n def _early_stop(self) -> bool:\n return False\n\n @abstractmethod\n def _inner_iteration(self):\n pass\n\n def _objective(self) -> int:\n \"\"\"\n Function used to calculate the objective function for the given. If we do not override then returns the covariance\n between projections\n\n :return:\n \"\"\"\n # default objective is correlation\n obj = 0\n for (score_i, score_j) in combinations(self.scores, 2):\n obj += score_i.T @ score_j\n return obj.item()\n\n\nclass PLSInnerLoop(_InnerLoop):\n def __init__(\n self,\n max_iter: int = 100,\n tol=1e-5,\n generalized: bool = False,\n initialization: str = \"unregularized\",\n random_state=None,\n ):\n super().__init__(\n max_iter=max_iter,\n tol=tol,\n generalized=generalized,\n initialization=initialization,\n random_state=random_state,\n )\n\n def _check_params(self):\n self.l1_ratio = [0] * len(self.views)\n self.c = [0] * len(self.views)\n\n def _inner_iteration(self):\n # Update each view using loop update function\n for i, view in enumerate(self.views):\n self._update_view(i)\n\n @abstractmethod\n def _update_view(self, view_index: int):\n \"\"\"\n Function used to update the parameters in each view within the loop. By changing this function, we can change\n the optimisation. This method NEEDS to update self.scores[view_index]\n\n :param view_index: index of view being updated\n :return: self with updated weights\n \"\"\"\n # mask off the current view and sum the rest\n targets = np.ma.array(self.scores, mask=False)\n targets.mask[view_index] = True\n self.weights[view_index] = (\n self.views[view_index].T @ targets.sum(axis=0).filled()\n )\n self.weights[view_index] /= np.linalg.norm(self.weights[view_index])\n self.scores[view_index] = self.views[view_index] @ self.weights[view_index]\n\n def _early_stop(self) -> bool:\n # Some kind of early stopping\n if all(\n _cosine_similarity(self.scores[n], self.old_scores[n]) > (1 - self.tol)\n for n, view in enumerate(self.scores)\n ):\n return True\n else:\n return False\n\n\nclass PMDInnerLoop(PLSInnerLoop):\n def __init__(\n self,\n max_iter: int = 100,\n tol=1e-5,\n generalized: bool = False,\n initialization: str = \"unregularized\",\n c=None,\n positive=None,\n random_state=None,\n ):\n super().__init__(\n max_iter=max_iter,\n tol=tol,\n generalized=generalized,\n initialization=initialization,\n random_state=random_state,\n )\n self.c = c\n self.positive = positive\n\n def _check_params(self):\n if self.c is None:\n warnings.warn(\n \"c parameter not set. Setting to c=1 i.e. maximum regularisation of l1 norm\"\n )\n self.c = _process_parameter(\"c\", self.c, 1, len(self.views))\n if any(c < 1 for c in self.c):\n raise ValueError(\n \"All regulariation parameters should be at least \" f\"1. c=[{self.c}]\"\n )\n shape_sqrts = [np.sqrt(view.shape[1]) for view in self.views]\n if any(c > shape_sqrt for c, shape_sqrt in zip(self.c, shape_sqrts)):\n raise ValueError(\n \"All regulariation parameters should be less than\"\n \" the square root of number of the respective\"\n f\" view. c=[{self.c}], limit of each view: \"\n f\"{shape_sqrts}\"\n )\n self.positive = _process_parameter(\n \"positive\", self.positive, False, len(self.views)\n )\n\n def _update_view(self, view_index: int):\n \"\"\"\n :param view_index: index of view being updated\n :return: updated weights\n \"\"\"\n # mask off the current view and sum the rest\n targets = np.ma.array(self.scores, mask=False)\n targets.mask[view_index] = True\n self.weights[view_index] = (\n self.views[view_index].T @ targets.sum(axis=0).filled()\n )\n self.weights[view_index] = _delta_search(\n self.weights[view_index],\n self.c[view_index],\n positive=self.positive[view_index],\n )\n _check_converged_weights(self.weights[view_index], view_index)\n self.scores[view_index] = self.views[view_index] @ self.weights[view_index]\n\n\nclass ParkhomenkoInnerLoop(PLSInnerLoop):\n def __init__(\n self,\n max_iter: int = 100,\n tol=1e-5,\n generalized: bool = False,\n initialization: str = \"unregularized\",\n c=None,\n random_state=None,\n ):\n super().__init__(\n max_iter=max_iter,\n tol=tol,\n generalized=generalized,\n initialization=initialization,\n random_state=random_state,\n )\n self.c = c\n\n def _check_params(self):\n self.c = _process_parameter(\"c\", self.c, [0.0001], len(self.views))\n if any(c <= 0 for c in self.c):\n raise (\"All regularisation parameters should be above 0. \" f\"c=[{self.c}]\")\n\n def _update_view(self, view_index: int):\n \"\"\"\n :param view_index: index of view being updated\n :return: updated weights\n \"\"\"\n # mask off the current view and sum the rest\n targets = np.ma.array(self.scores, mask=False)\n targets.mask[view_index] = True\n w = self.views[view_index].T @ targets.sum(axis=0).filled()\n _check_converged_weights(w, view_index)\n w /= np.linalg.norm(w)\n w = _soft_threshold(w, self.c[view_index] / 2)\n _check_converged_weights(w, view_index)\n self.weights[view_index] = w / np.linalg.norm(w)\n self.scores[view_index] = self.views[view_index] @ self.weights[view_index]\n\n\nclass ElasticInnerLoop(PLSInnerLoop):\n def __init__(\n self,\n max_iter: int = 100,\n tol=1e-5,\n generalized: bool = False,\n initialization: str = \"unregularized\",\n c=None,\n l1_ratio=None,\n constrained=False,\n stochastic=True,\n positive=None,\n random_state=None,\n ):\n super().__init__(\n max_iter=max_iter,\n tol=tol,\n generalized=generalized,\n initialization=initialization,\n random_state=random_state,\n )\n self.stochastic = stochastic\n self.constrained = constrained\n self.c = c\n self.l1_ratio = l1_ratio\n self.positive = positive\n\n def _check_params(self):\n self.c = _process_parameter(\"c\", self.c, 0, len(self.views))\n self.l1_ratio = _process_parameter(\n \"l1_ratio\", self.l1_ratio, 0, len(self.views)\n )\n self.positive = _process_parameter(\n \"positive\", self.positive, False, len(self.views)\n )\n if self.constrained:\n self.gamma = np.zeros(len(self.views))\n self.regressors = []\n for alpha, l1_ratio, positive in zip(self.c, self.l1_ratio, self.positive):\n if self.stochastic:\n if l1_ratio == 0:\n self.regressors.append(\n SGDRegressor(\n penalty=\"l2\",\n alpha=alpha / len(self.views),\n fit_intercept=False,\n tol=self.tol,\n warm_start=True,\n random_state=self.random_state,\n )\n )\n elif l1_ratio == 1:\n self.regressors.append(\n SGDRegressor(\n penalty=\"l1\",\n alpha=alpha / len(self.views),\n fit_intercept=False,\n tol=self.tol,\n warm_start=True,\n random_state=self.random_state,\n )\n )\n else:\n self.regressors.append(\n SGDRegressor(\n penalty=\"elasticnet\",\n alpha=alpha / len(self.views),\n l1_ratio=l1_ratio,\n fit_intercept=False,\n tol=self.tol,\n warm_start=True,\n random_state=self.random_state,\n )\n )\n else:\n if alpha == 0:\n self.regressors.append(LinearRegression(fit_intercept=False))\n elif l1_ratio == 0:\n if positive:\n self.regressors.append(\n ElasticNet(\n alpha=alpha / len(self.views),\n l1_ratio=0,\n fit_intercept=False,\n warm_start=True,\n positive=positive,\n random_state=self.random_state,\n )\n )\n else:\n self.regressors.append(\n Ridge(alpha=alpha / len(self.views), fit_intercept=False)\n )\n elif l1_ratio == 1:\n self.regressors.append(\n Lasso(\n alpha=alpha / len(self.views),\n fit_intercept=False,\n warm_start=True,\n positive=positive,\n random_state=self.random_state,\n )\n )\n else:\n self.regressors.append(\n ElasticNet(\n alpha=alpha / len(self.views),\n l1_ratio=l1_ratio,\n fit_intercept=False,\n warm_start=True,\n positive=positive,\n random_state=self.random_state,\n )\n )\n\n def _update_view(self, view_index: int):\n \"\"\"\n :param view_index: index of view being updated\n :return: updated weights\n \"\"\"\n if self.generalized:\n target = self.scores.mean(axis=0)\n else:\n target = self.scores[view_index - 1]\n if self.constrained:\n self._elastic_solver_constrained(self.views[view_index], target, view_index)\n else:\n self._elastic_solver(self.views[view_index], target, view_index)\n _check_converged_weights(self.weights[view_index], view_index)\n self.scores[view_index] = self.views[view_index] @ self.weights[view_index]\n\n @ignore_warnings(category=ConvergenceWarning)\n def _elastic_solver(self, X, y, view_index):\n self.weights[view_index] = np.expand_dims(\n self.regressors[view_index].fit(X, y.ravel()).coef_, 1\n )\n self.weights[view_index] /= np.linalg.norm(\n self.views[view_index] @ self.weights[view_index]\n ) / np.sqrt(self.n)\n\n @ignore_warnings(category=ConvergenceWarning)\n def _elastic_solver_constrained(self, X, y, view_index):\n converged = False\n min_ = -1\n max_ = 1\n previous = self.gamma[view_index]\n previous_val = None\n i = 0\n while not converged:\n i += 1\n coef = (\n self.regressors[view_index]\n .fit(\n np.sqrt(self.gamma[view_index] + 1) * X,\n y.ravel() / np.sqrt(self.gamma[view_index] + 1),\n )\n .coef_\n )\n current_val = 1 - (np.linalg.norm(X @ coef) ** 2) / self.n\n self.gamma[view_index], previous, min_, max_ = _bin_search(\n self.gamma[view_index], previous, current_val, previous_val, min_, max_\n )\n previous_val = current_val\n if np.abs(current_val) < 1e-5:\n converged = True\n elif np.abs(max_ - min_) < 1e-30 or i == 50:\n converged = True\n self.weights[view_index] = coef\n\n def _objective(self):\n views = len(self.views)\n c = np.array(self.c)\n ratio = np.array(self.l1_ratio)\n l1 = c * ratio\n l2 = c * (1 - ratio)\n total_objective = 0\n for i in range(views):\n # TODO this looks like it could be tidied up. In particular can we make the generalized objective correspond to the 2 view\n target = self.scores.mean(axis=0)\n objective = (\n views\n * np.linalg.norm(self.views[i] @ self.weights[i] - target) ** 2\n / (2 * self.n)\n )\n l1_pen = l1[i] * np.linalg.norm(self.weights[i], ord=1)\n l2_pen = l2[i] * np.linalg.norm(self.weights[i], ord=2)\n total_objective += objective + l1_pen + l2_pen\n return total_objective\n\n def _early_stop(self) -> bool:\n # Some kind of early stopping\n if np.abs(self.track['objective'][-2] - self.track['objective'][-1]) < self.tol:\n return True\n else:\n return False\n\n\nclass ADMMInnerLoop(ElasticInnerLoop):\n def __init__(\n self,\n max_iter: int = 100,\n tol=1e-5,\n generalized: bool = False,\n initialization: str = \"unregularized\",\n mu=None,\n lam=None,\n c=None,\n eta=None,\n random_state=None,\n ):\n super().__init__(\n max_iter=max_iter,\n tol=tol,\n generalized=generalized,\n initialization=initialization,\n random_state=random_state,\n )\n self.c = c\n self.lam = lam\n self.mu = mu\n self.eta = eta\n\n def _check_params(self):\n self.c = _process_parameter(\"c\", self.c, 0, len(self.views))\n self.lam = _process_parameter(\"lam\", self.lam, 1, len(self.views))\n if self.mu is None:\n self.mu = [\n lam / np.linalg.norm(view) ** 2\n for lam, view in zip(self.lam, self.views)\n ]\n else:\n self.mu = _process_parameter(\"mu\", self.mu, 0, len(self.views))\n self.eta = _process_parameter(\"eta\", self.eta, 0, len(self.views))\n\n if any(mu <= 0 for mu in self.mu):\n raise ValueError(\"At least one mu is less than zero.\")\n\n _check_Parikh2014(self.mu, self.lam, self.views)\n\n self.eta = [\n np.ones((view.shape[0], 1)) * eta for view, eta in zip(self.views, self.eta)\n ]\n self.z = [np.zeros((view.shape[0], 1)) for view in self.views]\n self.l1_ratio = [1] * len(self.views)\n\n def _update_view(self, view_index: int):\n targets = np.ma.array(self.scores, mask=False)\n targets.mask[view_index] = True\n # Suo uses parameter tau whereas we use parameter c to penalize the 1-norm of the weights.\n # Suo uses c to refer to the gradient where we now use gradient\n gradient = self.views[view_index].T @ targets.sum(axis=0).filled()\n # reset eta each loop?\n # self.eta[view_index][:] = 0\n mu = self.mu[view_index]\n lam = self.lam[view_index]\n N = self.views[view_index].shape[0]\n unnorm_z = []\n norm_eta = []\n norm_weights = []\n norm_proj = []\n for _ in range(self.max_iter):\n # We multiply 'c' by N in order to make regularisation match across the different sparse cca methods\n self.weights[view_index] = self._prox_mu_f(\n self.weights[view_index]\n - mu\n / lam\n * self.views[view_index].T\n @ (\n self.views[view_index] @ self.weights[view_index]\n - self.z[view_index]\n + self.eta[view_index]\n ),\n mu,\n gradient,\n N * self.c[view_index],\n )\n unnorm_z.append(\n np.linalg.norm(\n self.views[view_index] @ self.weights[view_index]\n + self.eta[view_index]\n )\n )\n self.z[view_index] = self._prox_lam_g(\n self.views[view_index] @ self.weights[view_index] + self.eta[view_index]\n )\n self.eta[view_index] = (\n self.eta[view_index]\n + self.views[view_index] @ self.weights[view_index]\n - self.z[view_index]\n )\n norm_eta.append(np.linalg.norm(self.eta[view_index]))\n norm_proj.append(\n np.linalg.norm(self.views[view_index] @ self.weights[view_index])\n )\n norm_weights.append(np.linalg.norm(self.weights[view_index], 1))\n _check_converged_weights(self.weights[view_index], view_index)\n self.scores[view_index] = self.views[view_index] @ self.weights[view_index]\n\n def _prox_mu_f(self, x, mu, c, tau):\n u_update = x.copy()\n mask_1 = x + (mu * c) > mu * tau\n # if mask_1.sum()>0:\n u_update[mask_1] = x[mask_1] + mu * (c[mask_1] - tau)\n mask_2 = x + (mu * c) < -mu * tau\n # if mask_2.sum() > 0:\n u_update[mask_2] = x[mask_2] + mu * (c[mask_2] + tau)\n mask_3 = ~(mask_1 | mask_2)\n u_update[mask_3] = 0\n return u_update\n\n def _prox_lam_g(self, x):\n norm = np.linalg.norm(x)\n if norm < 1:\n return x\n else:\n return x / max(1, norm)\n\n\nclass SpanCCAInnerLoop(_InnerLoop):\n def __init__(\n self,\n max_iter: int = 100,\n tol=1e-5,\n generalized: bool = False,\n initialization: str = \"unregularized\",\n c=None,\n regularisation=\"l0\",\n rank=1,\n random_state=None,\n positive=False,\n ):\n super().__init__(\n max_iter=max_iter,\n tol=tol,\n generalized=generalized,\n initialization=initialization,\n random_state=random_state,\n )\n self.c = c\n self.regularisation = regularisation\n self.rank = rank\n self.positive = positive\n\n def _check_params(self):\n \"\"\"check number of views=2\"\"\"\n if len(self.views) != 2:\n raise ValueError(f\"SpanCCA requires only 2 views\")\n cov = self.views[0].T @ self.views[1] / self.n\n # Perform SVD on im and obtain individual matrices\n P, D, Q = np.linalg.svd(cov, full_matrices=True)\n self.P = P[:, : self.rank]\n self.D = D[: self.rank]\n self.Q = Q[: self.rank, :].T\n self.max_obj = 0\n if self.regularisation == \"l0\":\n self.update = _support_soft_thresh\n self.c = _process_parameter(\"c\", self.c, 0, len(self.views))\n elif self.regularisation == \"l1\":\n self.update = _delta_search\n self.c = _process_parameter(\"c\", self.c, 0, len(self.views))\n self.positive = _process_parameter(\n \"positive\", self.positive, False, len(self.views)\n )\n\n def _inner_iteration(self):\n c = self.random_state.randn(self.rank, 1)\n c /= np.linalg.norm(c)\n a = self.P @ np.diag(self.D) @ c\n u = self.update(a, self.c[0])\n u /= np.linalg.norm(u)\n b = self.Q @ np.diag(self.D) @ self.P.T @ u\n v = self.update(b, self.c[1])\n v /= np.linalg.norm(v)\n if b.T @ v > self.max_obj:\n self.max_obj = b.T @ v\n self.scores[0] = self.views[0] @ u\n self.scores[1] = self.views[1] @ v\n self.weights[0] = u\n self.weights[1] = v\n\n\nclass SWCCAInnerLoop(PLSInnerLoop):\n def __init__(\n self,\n max_iter: int = 100,\n tol=1e-20,\n generalized: bool = False,\n initialization: str = \"unregularized\",\n regularisation=\"l0\",\n c=None,\n sample_support: int = None,\n random_state=None,\n positive=False,\n ):\n super().__init__(\n max_iter=max_iter,\n tol=tol,\n generalized=generalized,\n initialization=initialization,\n random_state=random_state,\n )\n self.c = c\n self.sample_support = sample_support\n if regularisation == \"l0\":\n self.update = _support_soft_thresh\n elif regularisation == \"l1\":\n self.update = _delta_search\n self.positive = positive\n\n def _check_params(self):\n self.sample_weights = np.ones((self.views[0].shape[0], 1))\n self.sample_weights /= np.linalg.norm(self.sample_weights)\n self.c = _process_parameter(\"c\", self.c, 1, len(self.views))\n self.positive = _process_parameter(\n \"positive\", self.positive, False, len(self.views)\n )\n\n def _update_view(self, view_index: int):\n \"\"\"\n :param view_index: index of view being updated\n :return: updated weights\n \"\"\"\n targets = np.ma.array(self.scores, mask=False)\n targets.mask[view_index] = True\n self.weights[view_index] = (\n self.views[view_index] * self.sample_weights\n ).T @ targets.sum(axis=0).filled()\n self.weights[view_index] = self.update(\n self.weights[view_index],\n self.c[view_index],\n positive=self.positive[view_index],\n )\n self.weights[view_index] /= np.linalg.norm(self.weights[view_index])\n if view_index == len(self.views) - 1:\n self._update_sample_weights()\n self.scores[view_index] = self.views[view_index] @ self.weights[view_index]\n\n def _update_sample_weights(self):\n w = self.scores.prod(axis=0)\n self.sample_weights = _support_soft_thresh(w, self.sample_support)\n self.sample_weights /= np.linalg.norm(self.sample_weights)\n self.track[\"sample_weights\"] = self.sample_weights\n\n def _early_stop(self) -> bool:\n return False\n\n def _objective(self) -> int:\n \"\"\"\n Function used to calculate the objective function for the given. If we do not override then returns the covariance\n between projections\n\n :return:\n \"\"\"\n # default objective is correlation\n obj = 0\n for (score_i, score_j) in combinations(self.scores, 2):\n obj += (score_i * self.sample_weights).T @ score_j\n return obj\n\n\ndef _bin_search(current, previous, current_val, previous_val, min_, max_):\n \"\"\"Binary search helper function:\n current:current parameter value\n previous:previous parameter value\n current_val:current function value\n previous_val: previous function values\n min_:minimum parameter value resulting in function value less than zero\n max_:maximum parameter value resulting in function value greater than zero\n Problem needs to be set up so that greater parameter, greater target\n \"\"\"\n if previous_val is None:\n previous_val = current_val\n if current_val <= 0:\n if previous_val <= 0:\n new = (current + max_) / 2\n if previous_val > 0:\n new = (current + previous) / 2\n if current > min_:\n min_ = current\n if current_val > 0:\n if previous_val > 0:\n new = (current + min_) / 2\n if previous_val <= 0:\n new = (current + previous) / 2\n if current < max_:\n max_ = current\n return new, current, min_, max_\n\n\ndef _delta_search(w, c, positive=False, init=0):\n \"\"\"\n Searches for threshold delta such that the 1-norm of weights w is less than or equal to c\n :param w: weights found by one power method iteration\n :param c: 1-norm threshold\n :return: updated weights\n \"\"\"\n # First normalise the weights unit length\n w = w / np.linalg.norm(w, 2)\n converged = False\n min_ = 0\n max_ = 10\n current = init\n previous = current\n previous_val = None\n i = 0\n while not converged:\n i += 1\n coef = _soft_threshold(w, current, positive=positive)\n if np.linalg.norm(coef) > 0:\n coef /= np.linalg.norm(coef)\n current_val = c - np.linalg.norm(coef, 1)\n current, previous, min_, max_ = _bin_search(\n current, previous, current_val, previous_val, min_, max_\n )\n previous_val = current_val\n if np.abs(current_val) < 1e-5 or np.abs(max_ - min_) < 1e-30 or i == 50:\n converged = True\n return coef\n\n\ndef _soft_threshold(x, threshold, positive=False):\n \"\"\"\n if absolute value of x less than threshold replace with zero\n :param x: input\n :return: x soft-thresholded by threshold\n \"\"\"\n if positive:\n u = np.clip(x, 0, None)\n else:\n u = np.abs(x)\n u = u - threshold\n u[u < 0] = 0\n return u * np.sign(x)\n\n\ndef _support_soft_thresh(x, support, positive=False):\n if x.shape[0] <= support or np.linalg.norm(x) == 0:\n return x\n if positive:\n u = np.clip(x, 0, None)\n else:\n u = np.abs(x)\n idx = np.argpartition(x.ravel(), x.shape[0] - support)\n u[idx[:-support]] = 0\n return u * np.sign(x)\n\n\ndef _cosine_similarity(a, b):\n \"\"\"\n Calculates the cosine similarity between vectors\n :param a: 1d numpy array\n :param b: 1d numpy array\n :return: cosine similarity\n \"\"\"\n # https: // www.statology.org / cosine - similarity - python /\n return a.T @ b / (np.linalg.norm(a) * np.linalg.norm(b))\n" ]
[ [ "sklearn.utils.validation.check_is_fitted", "scipy.sparse.issparse", "sklearn.utils.sparsefuncs.mean_variance_axis", "sklearn.utils.validation.check_random_state", "numpy.corrcoef", "numpy.array" ], [ "numpy.diag", "numpy.linalg.svd", "numpy.abs", "numpy.sqrt", "numpy.clip", "sklearn.utils._testing.ignore_warnings", "numpy.linalg.norm", "numpy.ones", "numpy.sign", "sklearn.utils.validation.check_random_state", "sklearn.linear_model.LinearRegression", "numpy.ma.array", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aboestpetersen/icesat2_canopy_heights
[ "556078d72036f18d00e645fb731ecca53320aa58" ]
[ "PhoREAL/source_code/icesatUtils.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nScript that contains utliity functions for PhoREAL\n\nCopyright 2019 Applied Research Laboratories, University of Texas at Austin\n\nThis package is free software; the copyright holder gives unlimited\npermission to copy and/or distribute, with or without modification, as\nlong as this notice is preserved.\n\nAuthors:\n Mike Alonzo\n Eric Guenther\n \nDate: September 20, 2019\n\"\"\"\n\n# Filter Runtime warnings\nimport warnings\n\n# Import Python modules\nimport numpy as np\nimport sys\nimport os\nimport pyproj as proj\nfrom scipy import interpolate\nimport pandas as pd\nimport h5py\nimport ctypes\nfrom numpy.ctypeslib import ndpointer \nimport copy\ntry:\n from osgeo import ogr, osr\nexcept ImportError:\n osgeo_func = ['createShapefiles']\n print('warning: module osgeo not found')\n print('affected functions:', osgeo_func)\n\n# Import ICESat-2 modules\nfrom gui_addins import (superFilterFile_windows, superFilterFile_linux)\n\nEPSG_ARCTIC = '3413'\nEPSG_ANTARCTIC = '3976'\nEPSG_ERR = '-1'\nDEG_ARCTIC = 84.0\nDEG_ANTARCTIC = -80.0\n\nEPSG_CEA = '6933' # Lambert Cylindrical Equal Area\nEPSG_LAEA_N = '6931' # Lambert Azimuthal Equal Area\nEPSG_LAEA_S = '6932'\n\nGT_NUMS = ['gt1r', 'gt2r', 'gt3r', 'gt1l', 'gt2l', 'gt3l']\n\n# Object for getNameParts function\nclass fileStruct:\n \n # Define class with designated fields\n def __init__(self, atlVersion, year, month, day, hour, minute, second, \n trackNum, unknown, releaseNum, incrementNum):\n \n self.atlVersion = atlVersion\n self.year = year\n self.month = month\n self.day = day\n self.hour = hour\n self.minute = minute\n self.second = second\n self.trackNum = trackNum\n self.unknown = unknown\n self.releaseNum = releaseNum\n self.incrementNum = incrementNum\n \n\n# Object for gridMetricNew function\nclass GridStruct:\n def __init__(self, x, y, grid, time):\n self.x = x\n self.y = y\n self.grid = grid\n self.t = time\n # endDef\n# endClass\n \n##### Function for reading parts of an .h5 file name\ndef getNameParts(h5FileName):\n \n # Split file name by underscores\n nameSplit = h5FileName.split('_')\n startPos = 0\n error = False\n for i in range(0,len(nameSplit)):\n if 'ATL' in nameSplit[i]:\n startPos = startPos + i\n error = False\n break\n else:\n error = True\n if error == False:\n # Get ATL version\n atlVersion = nameSplit[startPos + 0]\n \n # Get Year, month, day\n year = nameSplit[startPos + 1][0:4]\n month = nameSplit[startPos + 1][4:6]\n day = nameSplit[startPos + 1][6:8]\n \n # Get Hour, minute, second\n hour = nameSplit[startPos + 1][8:10]\n minute = nameSplit[startPos + 1][10:12]\n second = nameSplit[startPos + 1][12:14]\n \n # Get other details\n trackNum = nameSplit[startPos + 2][0:4]\n unknown = nameSplit[startPos + 2][4:9]\n releaseNum = nameSplit[startPos + 3]\n incrementNum = nameSplit[startPos + 4]\n else:\n print(' ATL Filename Not in Standard Format, Unable to record information')\n # Get ATL version\n atlVersion = \"000\"\n \n # Get Year, month, day\n year = \"0000\"\n month = \"00\"\n day = \"00\"\n \n # Get Hour, minute, second\n hour = \"00\"\n minute = \"00\"\n second = \"00\"\n \n # Get other details\n trackNum = \"0000\"\n unknown = \"0000\"\n releaseNum = \"0000\"\n incrementNum = \"0000\"\n \n \n # Get data into class structure\n fileInfo = fileStruct(atlVersion, year, month, day, hour, minute, second, trackNum, unknown, releaseNum, incrementNum)\n \n # Return class\n return fileInfo\n\n# endDef\n\n##### Function to represent 2 numbers as 1 unique number\ndef cantorPairing(arrayIn):\n \n # Do Cantor pairing algorithm to express two values as one unique value\n vectorOut = 0.5 * (arrayIn[:,0] + arrayIn[:,1]) * (arrayIn[:,0] + arrayIn[:,1] + 1) + arrayIn[:,1]\n \n return vectorOut\n\n \n##### Function to determine members of one array in another\ndef ismember(a_vec, b_vec, methodType = 'normal'):\n \n \"\"\" MATLAB equivalent ismember function \"\"\"\n \n # Combine multi column arrays into a 1-D array of strings if necessary\n # This will ensure unique rows when using np.isin below\n if(methodType.lower() == 'rows'):\n \n # Turn a_vec into an array of strings\n a_str = a_vec.astype('str')\n b_str = b_vec.astype('str')\n \n # Concatenate each column of strings with commas into a 1-D array of strings\n for i in range(0,np.shape(a_str)[1]):\n a_char = np.char.array(a_str[:,i])\n b_char = np.char.array(b_str[:,i])\n if(i==0):\n a_vec = a_char\n b_vec = b_char\n else:\n a_vec = a_vec + ',' + a_char\n b_vec = b_vec + ',' + b_char\n # endIf\n # endFor\n # endIf\n \n # Find which values in a_vec are present in b_vec\n matchingTF = np.isin(a_vec,b_vec)\n common = a_vec[matchingTF]\n common_unique, common_inv = np.unique(common, return_inverse=True) # common = common_unique[common_inv]\n b_unique, b_ind = np.unique(b_vec, return_index=True) # b_unique = b_vec[b_ind]\n common_ind = b_ind[np.isin(b_unique, common_unique, assume_unique=True)]\n matchingInds = common_ind[common_inv]\n \n return matchingTF, matchingInds\n\n\n##### Function to determine intersection of two arrays\ndef getIntersection2d(a_vec, b_vec, assume_unique=False):\n \n # Get max total number of rows in a_vec and b_vec\n a_vec_maxRows = np.max(a_vec[:,1]) - np.min(a_vec[:,1]) + 1\n b_vec_maxRows = np.max(b_vec[:,1]) - np.min(b_vec[:,1]) + 1\n maxRows = np.max([a_vec_maxRows, b_vec_maxRows])\n \n # Convert x,y locations to index values\n a_vec_IDs = a_vec[:,0]*maxRows + a_vec[:,1]\n b_vec_IDs = b_vec[:,0]*maxRows + b_vec[:,1]\n \n # Get common index values\n commonIDs, a_inds, b_inds = np.intersect1d(a_vec_IDs, b_vec_IDs, assume_unique, return_indices = True)\n \n # Get common values\n commonVals = a_vec[a_inds]\n \n # Return output\n return commonVals, a_inds, b_inds\n\n\n##### Function to determine intersection of two arrays\ndef getIntersection(a_vec, b_vec):\n \n # Get set intersection (common values) of two arrays\n res_set = set(map(tuple, a_vec)) & set(map(tuple, b_vec)) \n commonVals = np.array(list(map(list, res_set))) \n \n if(commonVals.any()):\n \n # Get indices of common values for each array\n _, a_inds = ismember(commonVals, a_vec, 'rows')\n _, b_inds = ismember(commonVals, b_vec, 'rows')\n \n else:\n \n a_inds = []\n b_inds = []\n \n # EndIf\n \n return commonVals, a_inds, b_inds\n\n\n##### Function to map ATL08 to ATL03 class photons\ndef getAtl08Mapping(atl03_ph_index_beg, atl03_segment_id, atl08_classed_pc_indx, atl08_classed_pc_flag, atl08_segment_id):\n \n # Get ATL03 data\n indsNotZero = atl03_ph_index_beg != 0\n atl03_ph_index_beg = atl03_ph_index_beg[indsNotZero];\n atl03_segment_id = atl03_segment_id[indsNotZero];\n \n # Find ATL08 segments that have ATL03 segments\n atl03SegsIn08TF, atl03SegsIn08Inds = ismember(atl08_segment_id,atl03_segment_id)\n \n # Get ATL08 classed indices and values\n atl08classed_inds = atl08_classed_pc_indx[atl03SegsIn08TF]\n atl08classed_vals = atl08_classed_pc_flag[atl03SegsIn08TF]\n\n # Determine new mapping into ATL03 data\n atl03_ph_beg_inds = atl03SegsIn08Inds;\n atl03_ph_beg_val = atl03_ph_index_beg[atl03_ph_beg_inds];\n newMapping = atl08classed_inds + atl03_ph_beg_val - 2;\n \n # Get max size of output array\n sizeOutput = newMapping[-1]\n \n # Pre-populate all photon classed array with zeroes\n allph_classed = (np.zeros(sizeOutput + 1).astype(int)) - 1\n \n # Populate all photon classed array from ATL08 classifications\n allph_classed[newMapping] = atl08classed_vals;\n \n # Return all photon classed array\n return allph_classed\n\n\n\n##### Functions to convert Lat/Lon to UTM and vice versa\n'''ATL Geographic Coordinate System Converter\n\nATL GCS Converter will translate an ATL groundtrack to different coordinate systems.\nMainly this is to convert Lonitute and latitude in WGS84 coordinates, which are\nradial, to cartesian coordinates (e.g., UTM) for easy processing, and if required,\nback to WGS84. Primary package required is pyproj.\n\nEach UTM grid is standardized across the globe but there a few exceptions. Zones\n 31V, 32V, 31X, 33X, 35X, and 37X are irregularly sized. Current code does \n not make these exceptions. This should not make too big of a difference for\n now but will become an issue if code needs to work with other GIS.\n For more information:\n https://en.wikipedia.org/wiki/Universal_Transverse_Mercator_coordinate_system\n https://upload.wikimedia.org/wikipedia/commons/e/ed/Utm-zones.jpg\n https://en.wikipedia.org/wiki/Universal_Transverse_Mercator_coordinate_system#/ media/File:Modified_UTM_Zones.png\n\nNotes:\n \nUTM Zone basics:\n https://en.wikipedia.org/wiki/Universal_Transverse_Mercator_coordinate_system\n https://www.e-education.psu.edu/natureofgeoinfo/c2_p23.html\n\nAntartic coordinate systems:\n https://epsg.io/3412 (Datum not specified)\n https://epsg.io/3976 (Official NSIDC coordinate system)\n https://epsg.io/3031 (Recommended by Mike)\n \nArtic coordinate systems:\n https://epsg.io/3995\n https://epsg.io/3413 (Official NSIDC coordinate system: \n https://nsidc.org/data/oib/epsg_3413.html)\n \n\n '''\n\n# Find UTM zone for individual lon/lat.\ndef find_utm_zone_point(lon, lat, module=None, m=None):\n\n \"\"\"\n Input:\n longitude, latitude, {opt} module, {opt} m\n\n\n Output:\n epsg_code string\n\n\n Desc:\n longitude, latitude - in degrees\n No checks on lon/lat bounds\n\n module - options: [None, 'mgrs', 'pygeodesy']\n optional\n If nothing is given, it defaults to None\n If a wrong module is given, it defaults to None condition\n If mgrs or pygeodesy are given, it uses the \n respective module\n\n m - mgrs object, i.e. m = mgrs.MGRS()\n optional input to allow user to predefine\n MGRS object since it doesn't have to be\n defined for every loop\n\n If it defined as anything other than\n mgrs.MGRS(), code will throw an error\n\n\n Latitude Bounds\n UTM exists between -80 and 84 deg latitude. UPS or\n NSIDC Sea Ice Polar Stereographic exists either\n south of -80 deg or north of 84 deg. The exact\n latitude bounds differ by method, as shown below\n in interval set notation:\n\n if module == None:\n antarctic: [-90, -80]\n UTM: (-80, 84)\n arctic: [84,90]\n Does not take Norway/Svalbard UTM zone changes\n into account\n\n if module == 'mgrs' or module == 'pygeodesy':\n antarctic: [-90, -80)\n UTM: [-80, 84)\n arctic: [84,90]\n Includes all one-offs in UTM zones\n \n\n Precision\n None has a precision of round-off\n mgrs has a precision of 1m\n pygeodesy has a precision of round-off\n\n \"\"\"\n\n\n if not (module == 'mgrs' or module == 'pygeodesy'): # None condition\n\n utm_band = str(int((np.floor((lon + 180) / 6 ) % 60) + 1))\n if len(utm_band) == 1:\n utm_band = '0'+utm_band\n # print(utm_band)\n if lat >= 0 and lat < 84:\n epsg_code = '326' + utm_band\n elif lat >= 84:\n # epsg_code = '3411'\n epsg_code = EPSG_ARCTIC\n elif lat <= -80:\n # epsg_code = '3412'\n epsg_code = EPSG_ANTARCTIC\n else:\n epsg_code = '327' + utm_band\n return epsg_code\n\n\n elif module == 'mgrs':\n import mgrs\n if m == None:\n m = mgrs.MGRS()\n\n # UTM defined in [-80, 84)\n if DEG_ANTARCTIC <= lat < DEG_ARCTIC:\n # [-80,84)\n mgrs_code = m.toMGRS(lat, lon, MGRSPrecision=5) # 1m precision\n UTM_code = m.MGRSToUTM(mgrs_code)\n utm_band = str(UTM_code[0]).zfill(2)\n hemi = UTM_code[1].decode() # 'N' or 'S'\n\n if not (hemi == 'N' or hemi == 'S'):\n # debug check since code is new\n print('warning ({}): hemi={} is something other than N or S'.format(module, hemi))\n\n epsg_code = '326' + utm_band # N\n if hemi == 'S':\n epsg_code = '327' + utm_band\n\n elif lat < DEG_ANTARCTIC:\n # [-90, -80), polar antarctic\n epsg_code = EPSG_ANTARCTIC\n\n elif lat >= DEG_ARCTIC:\n # [84, 90], polar arctic\n epsg_code = EPSG_ARCTIC\n\n return epsg_code\n\n\n elif module == 'pygeodesy':\n import pygeodesy as pg\n\n if DEG_ANTARCTIC <= lat < DEG_ARCTIC:\n # [-80,84)\n z = pg.utm.utmZoneBand5(lat, lon)\n utm_band = str(z.zone).zfill(2)\n hemi = z.hemipole # 'N' or 'S'\n\n if not (hemi == 'N' or hemi == 'S'):\n # debug check since code is new\n print('warning ({}): hemi={} is something other than N or S'.format(module, hemi))\n\n epsg_code = '326' + utm_band # N\n if hemi == 'S':\n epsg_code = '327' + utm_band\n\n\n elif lat < DEG_ANTARCTIC:\n # [-90, -80), polar antarctic\n epsg_code = EPSG_ANTARCTIC\n\n elif lat >= DEG_ARCTIC:\n # [84, 90], polar arctic\n epsg_code = EPSG_ARCTIC\n\n return epsg_code\n\n\n\n\n# Find UTM zone for numpy array of lon/lat.\ndef find_utm_zone_arr(lon, lat, mode=True, module=None, m=None):\n\n \"\"\"\n Input:\n longitude, latitude, {opt} mode, {opt} module, {opt} m\n \n\n Output:\n if mode:\n return a single epsg_code\n else:\n return a list of epsg_codes relative to lon/lat\n\n\n Desc:\n longitude, latitude - in degrees\n No checks on lon/lat bounds\n These must be the same length\n\n mode - whether to return the mode of zones or not\n default is True\n\n module - options: [None, 'mgrs', 'pygeodesy']\n optional\n If nothing is given, it defaults to None\n If a wrong module is given, it defaults to None condition\n If mgrs or pygeodesy are given, it uses the \n respective module\n\n m - mgrs object, i.e. m = mgrs.MGRS()\n optional input to allow user to predefine\n MGRS object since it doesn't have to be\n defined for every loop\n\n If it defined as anything other than\n mgrs.MGRS(), code will throw an error\n\n See find_utm_zone_point() for additional details.\n\n \"\"\"\n default_mod_bool = not (module == 'mgrs' or module == 'pygeodesy')\n\n if type(lon) != np.ndarray:\n lon = np.array(lon)\n if type(lat) != np.ndarray:\n lat = np.array(lat)\n\n if lon.size == 1:\n lon = np.array([lon])\n if lat.size == 1:\n lat = np.array([lat])\n\n if len(lon) != len(lat):\n print('error: len(lon) != len(lat)')\n return EPSG_ERR\n \n if default_mod_bool and mode:\n # OG way to find zone via the mode of all points\n\n arctic = len(lat[lat >= 84.0])\n nhem = len(lat[(lat < 84.0) & (lat >= 0)])\n shem = len(lat[(lat > -80.0) & (lat < 0)])\n antarctic = len(lat[lat < -80.0])\n if arctic > nhem: #Arctic case\n epsg_code = EPSG_ARCTIC # '3413'\n elif antarctic > shem: #Antarctic case\n epsg_code = EPSG_ANTARCTIC #'3976'\n else: #If not Arctic or Antarctic it is within UTM Zone\n tz = np.floor((((lon + 180) / 6 ) % 60) + 1).astype(int)\n zone = np.unique(tz)\n if len(zone) == 1:\n zone = zone[0]\n elif len(zone) == 2:\n z1 = len(tz[tz == zone[0]])\n z2 = len(tz[tz == zone[1]])\n if z1 > z2:\n zone = zone[0]\n else:\n zone = zone[1]\n elif len(zone) == 3:\n zone = zone[1]\n elif len(zone) > 3:\n from scipy.stats import mode as mode_func\n zone = mode_func(zone)[0][0]\n print(\"Warning: Input ground track present in more than 3 UTM zones. \\\n \\nRecommend manually selecting GCS.\")\n else:\n # len(zone) == 0\n print(\"Warning: zone == [], lon/lat may not have values\")\n sys.exit()\n\n if nhem >= shem:\n if len(str(zone)) == 1:\n zone = \"0\" + str(zone)\n epsg_code = '326' + str(zone)\n else:\n if len(str(zone)) == 1:\n zone = \"0\" + str(zone)\n epsg_code = '327' + str(zone)\n return epsg_code\n\n\n\n else:\n # OG method and mode or\n # OG method and not mode or\n # different method and mode or\n # different method and not mode\n\n epsg_code_arr = []\n for i in range(len(lon)):\n out = find_utm_zone_point(lon[i], lat[i], module=module, m=m)\n epsg_code_arr.append(out)\n\n if mode:\n from scipy.stats import mode as mode_func\n epsg_code = mode_func(epsg_code_arr)[0][0]\n return epsg_code\n\n else:\n return epsg_code_arr\n\ndef transform_single_point(x, y, coordTransform):\n point = ogr.Geometry(ogr.wkbPoint)\n point.AddPoint(x, y)\n point.Transform(coordTransform)\n x = point.GetX()\n y = point.GetY()\n return x, y\n\n\n# Transform GCS/PCS based on EPSG and x/y. \ndef transform(epsg_in, epsg_out, x, y, use_old_version=False):\n try:\n # Using pyproj version 2 and above\n # https://pyproj4.github.io/pyproj/stable/gotchas.html#upgrading-to-pyproj-2-from-pyproj-1\n transformer = proj.Transformer.from_crs(epsg_in, epsg_out, always_xy=True)\n xx, yy = transformer.transform(x, y)\n except:\n print('PYPROJ failed, attemping with GDAL...')\n if isinstance(epsg_in, str):\n epsg_in = int(epsg_in.strip('epsg:'))\n else:\n epsg_in = int(epsg_in)\n if isinstance(epsg_out, str):\n epsg_out = int(epsg_out.strip('epsg:'))\n else:\n epsg_out = int(epsg_out)\n inSpatialRef = osr.SpatialReference()\n inSpatialRef.ImportFromEPSG(epsg_in)\n \n outSpatialRef = osr.SpatialReference()\n outSpatialRef.ImportFromEPSG(epsg_out)\n\n x = x.astype('float')\n y = y.astype('float')\n \n coordTransform = osr.CoordinateTransformation(inSpatialRef, \n outSpatialRef)\n pts = np.asarray([transform_single_point(x[i], y[i], coordTransform) \\\n for i in range(0,len(x))])\n xx = pts[:,0]\n yy = pts[:,1]\n print('Projection with GDAL successful!')\n \n \n return xx,yy\n # endIf\n\n# Transform from lon/lat to given EPSG. \ndef wgs84_to_epsg_transform(epsg_out, lon, lat):\n epsg_in = 'epsg:4326'\n epsg_out = ('epsg:{0}'.format(str(epsg_out)))\n xx, yy = transform(epsg_in, epsg_out, lon, lat)\n return xx,yy\n\ndef identifyEPSG(hemi,zone):\n if hemi == 'N':\n outstring = 'epsg:326'\n elif hemi == \"S\":\n outstring = 'epsg:327'\n else:\n outstring = ''\n print('error')\n \n outstring = outstring + (str(zone).zfill(2))\n \n return outstring\n\ndef identify_hemi_zone(epsg):\n epsg = str(epsg)\n epsg = epsg.split(':')[-1]\n \n if (epsg[0:3] == '326'):\n hemi = 'N'\n zone = epsg[3:5]\n elif (epsg[0:3] == '327'):\n hemi = 'N'\n zone = epsg[3:5]\n elif(epsg =='3413'):\n zone = epsg \n hemi = 'arctic'\n elif(epsg =='3976'):\n zone = epsg \n hemi = 'arctic'\n else:\n print('Could not read EPSG for hemi/zone')\n zone = epsg \n hemi = ''\n return hemi, zone\n \n\n# Calls functions to find EPSG code and perform GCS transform automatically.\ndef wgs84_to_utm_find_and_transform(lon, lat):\n epsg_out = find_utm_zone_arr(lon, lat)\n xx, yy = wgs84_to_epsg_transform(epsg_out, lon, lat)\n return xx,yy,epsg_out\n\n\n# Inputs: lon, lat, (UTM zone), (UTM hemisphere)\ndef getLatLon2UTM(*args):\n\n # Set EPSG code for lat/lon coords\n epsg_in = 'epsg:4326'\n \n # Get lats/lons\n lon = args[0]\n lat = args[1]\n \n # Call function based on number of input args\n if(len(args) > 2):\n \n # Get zone/hemi\n zone = args[2]\n hemi = args[3]\n \n # Get EPSG out code for UTM coords\n if(hemi=='N'):\n if len(zone) == 1:\n zone = '0' + zone\n epsg_out = 'epsg:326' + zone\n else:\n if len(zone) == 1:\n zone = '0' + zone \n epsg_out = 'epsg:327' + zone\n # endif\n \n # Call transform function\n if len(lon) > 0 and len(lat) > 0:\n print(epsg_in)\n print(epsg_out)\n xx, yy = transform(epsg_in, epsg_out, lon, lat)\n else:\n xx, yy = np.array([]), np.array([])\n\n else:\n \n # Get UTM coords\n xx, yy, epsg_out = wgs84_to_utm_find_and_transform(lon, lat)\n \n if(epsg_out=='3413'):\n \n zone = epsg_out \n hemi = 'arctic'\n \n elif(epsg_out=='3976'):\n \n zone = epsg_out \n hemi = 'antarctic'\n \n else:\n \n # Store zone\n zone = epsg_out[3:]\n \n # Store hemisphere\n if(epsg_out[0:3]=='326'): \n hemi = 'N' \n else:\n hemi = 'S'\n # endIf\n \n # endIf\n \n # endIf\n \n # Return output\n return xx, yy, zone, hemi\n\n\n# Function to convert UTM to lat/lon\ndef getUTM2LatLon(x,y,zone,hemi):\n \n # Set EPSG code for lat/lon coords\n epsg_out = 'epsg:4326'\n \n # Get EPSG code for UTM coords\n if(hemi=='N'):\n if len(zone) == 1:\n zone = \"0\" + zone\n epsg_in = 'epsg:326' + zone\n else:\n if len(zone) == 1:\n zone = \"0\" + zone\n epsg_in = 'epsg:327' + zone\n # endif\n \n # Call transform function\n lon, lat = transform(epsg_in, epsg_out, x, y)\n \n return lat, lon\n\n\n# Identifies midpoint for a given array.\ndef getMidpoint(arr):\n n = arr.shape[0] / 2.0\n n_int = int(n)\n if n % 2 == 0:\n return (arr[n_int] + arr[n_int - 1]) / 2\n else:\n return arr[n_int]\n \n \n##### Functions to convert from Easting/Northing frame to Cross-Track/Along-Track frame and vice versa\ndef getCoordRotFwd(xIn,yIn,R_mat,xRotPt,yRotPt,desiredAngle):\n \n # Get shape of input X,Y data\n xInShape = np.shape(xIn)\n yInShape = np.shape(yIn)\n \n # If shape of arrays are (N,1), then make them (N,)\n xIn = xIn.ravel()\n yIn = yIn.ravel()\n \n # Suppress warnings that may come from np.polyfit\n if not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n # endif\n \n # If Rmatrix, xRotPt, and yRotPt are empty, then compute them\n if(len(R_mat)==0 and len(xRotPt)==0 and len(yRotPt)==0):\n \n # Get current angle of linear fit data\n x1 = xIn[0]\n x2 = xIn[-1]\n y1 = yIn[0]\n y2 = yIn[-1]\n # endif\n deltaX = x2 - x1\n deltaY = y2 - y1\n theta = np.arctan2(deltaY,deltaX)\n \n # Get angle to rotate through\n phi = np.radians(desiredAngle) - theta\n \n # Get rotation matrix\n R_mat = np.matrix(np.array([[np.cos(phi), -np.sin(phi)],[np.sin(phi), np.cos(phi)]]))\n \n # Get X,Y rotation points\n xRotPt = x1\n yRotPt = y1\n \n else:\n \n # Get angle to rotate through\n phi = np.arccos(R_mat[0,0])\n \n # endif\n \n # Translate data to X,Y rotation point\n xTranslated = xIn - xRotPt\n yTranslated = yIn - yRotPt\n \n # Convert np array to np matrix\n xTranslated_mat = np.matrix(xTranslated)\n yTranslated_mat = np.matrix(yTranslated)\n \n # Get shape of np X,Y matrices\n (xTranslated_matRows,xTranslated_matCols) = xTranslated_mat.shape\n (yTranslated_matRows,yTranslated_matCols) = yTranslated_mat.shape\n \n # Make X input a row vector\n if(xTranslated_matRows > 1):\n xTranslated_mat = np.transpose(xTranslated_mat)\n #endif\n \n # Make Y input a row vector\n if(yTranslated_matRows > 1):\n yTranslated_mat = np.transpose(yTranslated_mat)\n #endif\n \n # Put X,Y data into separate rows of matrix\n xyTranslated_mat = np.concatenate((xTranslated_mat,yTranslated_mat))\n \n # Compute matrix multiplication to get rotated frame\n measRot_mat = np.matmul(R_mat,xyTranslated_mat)\n \n # Pull out X,Y rotated data\n xRot_mat = measRot_mat[0,:]\n yRot_mat = measRot_mat[1,:]\n \n # Convert X,Y matrices back to np arrays for output\n xRot = np.array(xRot_mat)\n yRot = np.array(yRot_mat)\n \n # Make X,Y rotated output the same shape as X,Y input\n xRot = np.reshape(xRot,xInShape)\n yRot = np.reshape(yRot,yInShape)\n \n # Reset warnings \n warnings.resetwarnings()\n \n # Return outputs\n return xRot, yRot, R_mat, xRotPt, yRotPt, phi\n\n\ndef getCoordRotRev(xRot,yRot,R_mat,xRotPt,yRotPt):\n \n # Get shape of input X,Y data\n xRotShape = np.shape(xRot)\n yRotShape = np.shape(yRot)\n \n # Convert data to columns\n xRot_mat = np.c_[xRot]\n yRot_mat = np.c_[yRot]\n \n # Get shape of matrices\n (xRot_matRows,xRot_matCols) = xRot_mat.shape\n (yRot_matRows,yRot_matCols) = yRot_mat.shape\n \n # Make X input a row vector\n if(xRot_matRows > 1):\n xRot_mat = np.transpose(xRot_mat)\n #endif\n \n # Make Y input a row vector\n if(yRot_matRows > 1):\n yRot_mat = np.transpose(yRot_mat)\n #endif\n \n # Put X,Y data into 2 x N matrix\n xyRot_mat = np.concatenate((xRot_mat,yRot_mat))\n\n # Rotate data back to original frame\n measUnrot_mat = np.matmul(np.linalg.inv(R_mat),xyRot_mat)\n \n # Pull out X,Y unrotated data\n xUnrot_mat = measUnrot_mat[0,:]\n yUnrot_mat = measUnrot_mat[1,:]\n \n # Translate data back to original point\n xOut_mat = xUnrot_mat + xRotPt\n yOut_mat = yUnrot_mat + yRotPt\n \n # Convert matrices to numpy arrays for output\n xOut = np.squeeze(np.asarray(xOut_mat))\n yOut = np.squeeze(np.asarray(yOut_mat))\n \n # Make X,Y output the same shape as X,Y input\n xOut = np.reshape(xOut,xRotShape)\n yOut = np.reshape(yOut,yRotShape)\n \n # Return output variables\n return xOut, yOut, R_mat, xRotPt, yRotPt\n\n\n##### Function to add in geoid model\ndef getGeoidHeight(geoidData,atlTruthData):\n \n # Convert truth data from UTM to Lat/Lon\n x = atlTruthData.easting\n y = atlTruthData.northing\n zone = atlTruthData.zone\n hemi = atlTruthData.hemi\n latsIn, lonsIn = getUTM2LatLon(x,y,zone,hemi)\n \n # Interpolate to find geoidal heights\n lons = geoidData.lons[0]\n # lats = geoidData.lats[0]\n # geoidalHeights = geoidData.geoidalHeights[0]\n lons[lons > 180.] = lons[lons > 180] - 360\n geoidData.lons[0] = lons\n f = interpolate.interp2d(geoidData.lons, geoidData.lats, geoidData.geoidalHeights, kind='linear')\n geoidalHeights = interpolate.dfitpack.bispeu(f.tck[0], f.tck[1], f.tck[2], f.tck[3], f.tck[4], lonsIn, latsIn)[0]\n geoidalHeights = np.c_[geoidalHeights]\n \n # Add geoidal heights to find new ellipsoidal heights (HAE)\n atlTruthData.z = atlTruthData.z + geoidalHeights\n \n return atlTruthData\n\n\n##### Function to add in geoid model\ndef getGeoidHeight2(geoidData,x,y,z_msl,zone,hemi):\n \n # Convert truth data from UTM to Lat/Lon\n latsIn, lonsIn = getUTM2LatLon(x,y,zone,hemi)\n \n # Interpolate to find geoidal heights\n lons = geoidData.lons[0]\n # lats = geoidData.lats[0]\n # geoidalHeights = geoidData.geoidalHeights[0]\n lons[lons > 180.] = lons[lons > 180] - 360\n geoidData.lons[0] = lons\n f = interpolate.interp2d(geoidData.lons, geoidData.lats, geoidData.geoidalHeights, kind='linear')\n geoidalHeights = interpolate.dfitpack.bispeu(f.tck[0], f.tck[1], f.tck[2], f.tck[3], f.tck[4], lonsIn, latsIn)[0]\n # geoidalHeights = np.c_[geoidalHeights]\n \n # Add geoidal heights to find new ellipsoidal heights (HAE)\n z_hae = z_msl + geoidalHeights\n \n return z_hae\n\n##### Function to grid point cloud data\ndef getRaster_legacy(x, y, z, resolution, method, fillValue = -999, time = [], xAllArray = [], yAllArray = []):\n \n # USER INPUTS\n # ---------------------------\n # x = input x array of values\n # y = input y array of values\n # z = input z array of values\n # resolution = resolution of grid cells (N = N x N, [M,N] = M x N)\n # method = operation to perform in each grid cell\n # - min\n # - max\n # - mean (default)\n # - median\n # - range\n # - std (standard deviation)\n # - numel (number of elements)\n # fillValue = value to fill in empty grid cells\n # time = secondary array (like z) to perform operation on in each grid cell\n # xAllArray = force output X grid cells (use np.arange(start, stop + 1, step))\n # yAllArray = force output Y grid cells (use np.arange(start, stop + 1, step))\n \n # Get X,Y resolution\n if isinstance(resolution,np.integer):\n xResolution = float(resolution)\n yResolution = float(resolution)\n elif isinstance(resolution,int):\n xResolution = float(resolution)\n yResolution = float(resolution)\n elif isinstance(resolution,float):\n xResolution = resolution\n yResolution = resolution\n elif isinstance(resolution,np.ndarray):\n if len(resolution) == 1:\n xResolution = float(resolution)\n yResolution = float(resolution)\n elif len(resolution) == 2:\n xResolution = float(resolution[0])\n yResolution = float(resolution[1])\n else:\n print(\"Incorrect resolution input\")\n elif isinstance(resolution,list):\n xResolution = float(resolution[0])\n yResolution = float(resolution[1])\n elif isinstance(resolution,str):\n strList = resolution.split(\",\")\n if len(strList) == 1:\n xResolution = float(resolution)\n yResolution = float(resolution)\n elif len(strList) == 2:\n xResolution = float(strList[0])\n yResolution = float(strList[1])\n else:\n print(\"Incorrect resolution input\")\n else:\n print(\"Incorrect resolution input\")\n\n # Get grid method\n if(method.lower() == 'min'):\n npOperation = np.nanmin\n elif(method.lower() == 'max'):\n npOperation = np.nanmax\n elif(method.lower() == 'mean'):\n npOperation = np.nanmean\n elif(method.lower() == 'median'):\n npOperation = np.nanmedian\n elif(method.lower() == 'range'):\n npOperation = np.range\n elif(method.lower() == 'std'):\n npOperation = np.nanstd\n elif(method.lower() == 'numel'):\n npOperation = np.size\n else:\n npOperation = np.mean\n # EndIf\n \n # Round all incoming X,Y data\n xRnd = (np.round(x/xResolution)*xResolution).astype(int)\n yRnd = (np.round(y/yResolution)*yResolution).astype(int)\n \n # Get output X,Y grid cells\n if(any(xAllArray) and any(yAllArray)):\n \n xAll = xAllArray\n yAll = yAllArray\n \n else:\n \n # Get min,max of rounded X,Y data\n xRndMin = xRnd.min()\n xRndMax = xRnd.max()\n yRndMin = yRnd.min()\n yRndMax = yRnd.max()\n \n # Get all possible grid combinations\n xAll = np.arange(xRndMin, xRndMax + xResolution, xResolution)\n yAll = np.arange(yRndMax, yRndMin - yResolution, -yResolution)\n \n # endIf\n \n # Get X,Y array of all pts\n xAllArray, yAllArray = np.meshgrid(xAll,yAll)\n xyAll = np.column_stack((xAllArray.flatten(), yAllArray.flatten()))\n \n # Populate X,Y raster data\n numRows = len(yAll);\n numCols = len(xAll);\n rasterDataX = xAllArray;\n rasterDataY = yAllArray;\n \n # Get unique incoming X,Y data\n uniqueCombos, uniqueGroups = np.unique((xRnd,yRnd), return_inverse = True, axis = 1)\n uniqueCombos = np.transpose(uniqueCombos)\n \n uniqueCombos = uniqueCombos.astype('int')\n xyAll = xyAll.astype('int')\n \n # Find index locations of unique incoming X,Y data in X,Y array of all pts\n _, indsToFill = ismember(uniqueCombos,xyAll,'rows')\n indsToFill = np.c_[indsToFill]\n \n # Grid Z data, populate into raster array, and reshape \n df = pd.DataFrame(np.column_stack([z, uniqueGroups]), columns=['z', 'unique_groups'])\n zGroups = df.groupby('unique_groups')\n zOut = zGroups.aggregate(npOperation)\n zOut = np.array(zOut)\n zSplit = np.c_[zOut[:,0]]\n zRaster = fillValue*np.ones((numRows*numCols,1))\n zRaster[indsToFill,0] = zSplit\n rasterDataZ = np.reshape(zRaster,(numRows,numCols))\n \n # Grid 'time' array if necessary\n if(any(time)):\n \n df = pd.DataFrame(np.column_stack([time, uniqueGroups]), columns=['time', 'unique_groups'])\n tGroups = df.groupby('unique_groups')\n tOut = tGroups.aggregate(npOperation)\n tOut = np.array(tOut)\n tSplit = np.c_[tOut[:,0]] \n tRaster = fillValue*np.ones((numRows*numCols,1))\n tRaster[indsToFill,0] = tSplit\n rasterDataT = np.reshape(tRaster,(numRows,numCols))\n \n else:\n \n rasterDataT = []\n \n # EndIf\n\n # Return output\n return GridStruct(rasterDataX, rasterDataY, rasterDataZ, rasterDataT)\n\n\n##### Function to grid point cloud data\ndef getRaster(x, y, z, resolution, method, fillValue = -999, time = [], xAllArray = [], yAllArray = [],\n origin=None):\n \n # USER INPUTS\n # ---------------------------\n # x = input x array of values\n # y = input y array of values\n # z = input z array of values\n # resolution = resolution of grid cells (N = N x N, [M,N] = M x N)\n # method = operation to perform in each grid cell\n # - min\n # - max\n # - mean (default)\n # - median\n # - mode\n # - range\n # - std (standard deviation)\n # - numel (number of elements)\n # fillValue = value to fill in empty grid cells\n # time = secondary array (like z) to perform operation on in each grid cell\n # xAllArray = force output X grid cells (use np.arange(start, stop + 1, step))\n # yAllArray = force output Y grid cells (use np.arange(start, stop + 1, step))\n #\n # OUTPUTS\n # -------------\n # output.x = x (2D raster)\n # output.y = y (2D raster)\n # output.grid = grid (2D raster)\n # output.t = time (2D raster)\n \n \n # Get X,Y resolution\n if isinstance(resolution,np.integer):\n xResolution = float(resolution)\n yResolution = float(resolution)\n elif isinstance(resolution,int):\n xResolution = float(resolution)\n yResolution = float(resolution)\n elif isinstance(resolution,float):\n xResolution = resolution\n yResolution = resolution\n elif isinstance(resolution,np.ndarray):\n if len(resolution) == 1:\n xResolution = float(resolution)\n yResolution = float(resolution)\n elif len(resolution) == 2:\n xResolution = float(resolution[0])\n yResolution = float(resolution[1])\n else:\n print(\"Incorrect resolution input\")\n elif isinstance(resolution,list):\n xResolution = float(resolution[0])\n yResolution = float(resolution[1])\n elif isinstance(resolution,str):\n strList = resolution.split(\",\")\n if len(strList) == 1:\n xResolution = float(resolution)\n yResolution = float(resolution)\n elif len(strList) == 2:\n xResolution = float(strList[0])\n yResolution = float(strList[1])\n else:\n print(\"Incorrect resolution input\")\n else:\n print(\"Incorrect resolution input\")\n\n # Get grid method\n if(method.lower() == 'min'):\n npOperation = np.nanmin\n elif(method.lower() == 'max'):\n npOperation = np.nanmax\n elif(method.lower() == 'mean'):\n npOperation = np.nanmean\n elif(method.lower() == 'median'):\n npOperation = np.nanmedian\n elif(method.lower() == 'mode'):\n npOperation = mode\n elif(method.lower() == 'range'):\n npOperation = np.range\n elif(method.lower() == 'std'):\n npOperation = np.nanstd\n elif(method.lower() == 'numel'):\n npOperation = np.size\n else:\n npOperation = np.mean\n # EndIf\n \n # Round all incoming X,Y data\n xRnd = (np.round(x/xResolution)*xResolution)\n yRnd = (np.round(y/yResolution)*yResolution)\n \n # Get output X,Y grid cells\n if(any(xAllArray) and any(yAllArray)):\n \n xAll = xAllArray\n yAll = np.flipud(yAllArray)\n \n else:\n # Get min,max of rounded X,Y data\n xRndMin = xRnd.min()\n xRndMax = xRnd.max()\n yRndMin = yRnd.min()\n yRndMax = yRnd.max()\n \n # Get all possible grid combinations\n xAll = np.arange(xRndMin, xRndMax + xResolution, xResolution)\n yAll = np.arange(yRndMax, yRndMin - yResolution, -yResolution)\n\n # endIf\n \n # Get X,Y array of all pts\n xAllArray, yAllArray = np.meshgrid(xAll,yAll)\n \n # Get raster X, Y, Z space\n rasterDataX = xAllArray.astype('float')\n rasterDataY = yAllArray.astype('float')\n rasterDataZ = fillValue*np.ones((np.shape(rasterDataX))).astype('float')\n \n # Make sure input data is of type float\n if(xRnd.dtype!='float64'):\n xRnd = xRnd.astype('float')\n # endIf\n if(yRnd.dtype!='float64'):\n yRnd = yRnd.astype('float')\n # endIf\n if(z.dtype!='float64'):\n z = z.astype('float')\n # endIf\n \n # Get x-rastered, y-rastered, and z data into array\n data = np.column_stack([xRnd, yRnd, z])\n \n # Put array into Pandas dataframe\n df = pd.DataFrame(data, columns=['xRnd', 'yRnd', 'z'])\n \n # Do groupby.agg to get get rastered operation for group\n groupedData = df.groupby(['xRnd', 'yRnd']).agg({'z': [npOperation]})\n groupedData.columns = ['z_agg']\n groupedData = groupedData.reset_index()\n zValsNew = np.array(groupedData['z_agg'])\n \n # Determine new row, column indices to place rastered Z data into\n df_xRnd_min = np.min(groupedData['xRnd'])\n df_yRnd_min = np.min(groupedData['yRnd'])\n colIndsNew = ((np.array(groupedData['xRnd']) - df_xRnd_min)/xResolution).astype(int)\n rowIndsNew = ((np.array(groupedData['yRnd']) - df_yRnd_min)/yResolution).astype(int)\n\n # Populate rastered Z data into array\n rasterDataZ[rowIndsNew, colIndsNew] = zValsNew\n rasterDataZ = np.flipud(rasterDataZ)\n \n # Grid 'time' array if necessary\n if(any(time)):\n \n # Get x-rastered, y-rastered, and time data into array\n if(time.dtype!='float64'):\n time = time.astype('float')\n # endIf\n dataTime = np.column_stack([xRnd, yRnd, time])\n \n # Put array into Pandas dataframe\n dfTime = pd.DataFrame(dataTime, columns=['xRnd', 'yRnd', 'time'])\n \n # Do groupby.agg to get get rastered operation for group\n groupedDataTime = dfTime.groupby(['xRnd', 'yRnd']).agg({'time': [npOperation]})\n groupedDataTime.columns = ['time_agg']\n groupedDataTime = groupedDataTime.reset_index()\n tValsNew = np.array(groupedDataTime['time_agg'])\n \n # Populate rastered Z data into array\n rasterDataT = fillValue*np.ones((np.shape(rasterDataX)))\n rasterDataT[rowIndsNew, colIndsNew] = tValsNew\n rasterDataT = np.flipud(rasterDataT)\n \n else:\n \n rasterDataT = []\n \n # EndIf\n\n # Return output\n return GridStruct(rasterDataX, rasterDataY, rasterDataZ, rasterDataT)\n\n# endDef\n\n\n##### Function to find closest points in an array\ndef getClosest(inputArray, closestPts):\n \n # Initialize outputs\n minInd = np.zeros(np.shape(closestPts), dtype = int)\n minVal = np.zeros(np.shape(closestPts))\n \n # Loop through closest points array and find closest point to input array\n for i in range(0,len(closestPts)):\n closestPt = closestPts[i]\n arrayDif = np.abs(inputArray - closestPt)\n minInd[i] = np.argmin(arrayDif) \n minVal[i] = inputArray[minInd[i]]\n # EndFor\n \n # Return outputs\n return minVal, minInd\n\ndef __appendGlobalList(name):\n if name:\n global_list.append(name)\n\ndef getH5Keys(h5_file,group = None, out_txt = None, verbose = False, matchText = None):\n global global_list\n global_list = []\n try:\n h = h5py.File(h5_file, 'r')\n except:\n print(\"Could not find file or file was not proper H5 file\")\n sys.exit\n if group:\n group = str(group)\n h[group].visit(__appendGlobalList)\n else:\n h.visit(__appendGlobalList)\n if verbose:\n print(*global_list, sep = \"\\n\")\n if out_txt:\n with open(out_txt, 'w', newline = '') as csvFile:\n with open(out_txt, 'w') as f:\n for item in global_list:\n f.write(\"%s\\n\" % item)\n csvFile.close\n if matchText:\n global_list = [s for s in global_list if matchText in s]\n \n return global_list\n\ndef sortAtlMeasured(atlMeasuredData, verbose=False):\n \n if(verbose):\n print(\"Sorting ATL Measured Data...\", end = \" \")\n # endIf\n sort_index = np.argsort(atlMeasuredData.alongTrack, axis = 0)\n atlMeasuredData.alongTrack = atlMeasuredData.alongTrack[sort_index[:,0]]\n atlMeasuredData.crossTrack = atlMeasuredData.crossTrack[sort_index[:,0]]\n atlMeasuredData.lat = atlMeasuredData.lat[sort_index[:,0]]\n atlMeasuredData.lon = atlMeasuredData.lon[sort_index[:,0]]\n atlMeasuredData.northing = atlMeasuredData.northing[sort_index[:,0]]\n atlMeasuredData.easting = atlMeasuredData.easting[sort_index[:,0]]\n atlMeasuredData.z = atlMeasuredData.z[sort_index[:,0]]\n atlMeasuredData.time = atlMeasuredData.time[sort_index[:,0]]\n atlMeasuredData.classification = \\\n atlMeasuredData.classification[sort_index[:,0]]\n atlMeasuredData.signalConf = atlMeasuredData.signalConf[sort_index[:,0]]\n \n if(verbose):\n print(\"Complete\")\n # endIf\n return atlMeasuredData\n \ndef sortAtlTruth(atlTruthData, verbose=False):\n if(verbose):\n print(\"Sorting ATL Truth Data...\", end = \" \") \n # endIf\n sort_index = np.argsort(atlTruthData.alongTrack, axis = 0)\n atlTruthData.alongTrack = atlTruthData.alongTrack[sort_index[:,0]]\n atlTruthData.crossTrack = atlTruthData.crossTrack[sort_index[:,0]]\n atlTruthData.northing = atlTruthData.northing[sort_index[:,0]]\n atlTruthData.easting = atlTruthData.easting[sort_index[:,0]]\n atlTruthData.lat = atlTruthData.lat[sort_index[:,0]]\n atlTruthData.lon = atlTruthData.lon[sort_index[:,0]]\n atlTruthData.z = atlTruthData.z[sort_index[:,0]]\n atlTruthData.intensity = atlTruthData.intensity[sort_index[:,0]]\n atlTruthData.classification = atlTruthData.classification[sort_index[:,0]]\n\n if(verbose):\n print(\"Complete\")\n # endIF\n return atlTruthData\n\ndef indexMatch(measuredArray,truthArray,verbose=False):\n if(verbose):\n print(\"Match corresponding indices...\", end = \" \")\n # endIf\n A = np.array(measuredArray)\n B = np.array(truthArray)\n C = np.empty((len(B)))\n \n \n if os.name == 'nt':\n lib = ctypes.cdll.LoadLibrary(os.path.abspath(superFilterFile_windows))\n else:\n lib = ctypes.cdll.LoadLibrary(os.path.abspath(superFilterFile_linux))\n fun = lib.cfun\n fun.restype = None\n fun.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t]\n fun(A, A.size, B, C, B.size)\n if(verbose):\n print(\"Complete\")\n # endIf\n return np.array(C).astype(int)\n\n \ndef superFilter(atlMeasuredData_in, atlTruthData_in, xBuf = 7, classCode = [], verbose=False):\n if(verbose):\n print(\"Applying Superfilter\")\n # endIf\n #Sort Measured\n atlMeasuredData = copy.deepcopy(atlMeasuredData_in) \n atlTruthData = copy.deepcopy(atlTruthData_in) \n\n atlMeasuredData = sortAtlMeasured(atlMeasuredData)\n #Sort Truth\n atlTruthData = sortAtlTruth(atlTruthData)\n \n \n #Find Matching Indexes\n if classCode:\n filter_class = np.isin(atlMeasuredData.classification,classCode)\n \n alongTrack = atlMeasuredData.alongTrack[filter_class]\n crossTrack = atlMeasuredData.crossTrack[filter_class]\n indexMatches = indexMatch(alongTrack,atlTruthData.alongTrack)\n #Generate filter\n indexMatches[indexMatches >= len(crossTrack)] = (len(crossTrack) - 1)\n x_check = crossTrack[indexMatches]\n x_diff = atlTruthData.crossTrack[:,0] - x_check\n filter_data = np.where((x_diff < xBuf) & (x_diff > -xBuf)) \n #Apply filter to truth data\n atlTruthData.alongTrack = atlTruthData.alongTrack[filter_data]\n atlTruthData.crossTrack = atlTruthData.crossTrack[filter_data]\n atlTruthData.northing = atlTruthData.northing[filter_data]\n atlTruthData.easting = atlTruthData.easting[filter_data]\n atlTruthData.lat = atlTruthData.lat[filter_data]\n atlTruthData.lon = atlTruthData.lon[filter_data]\n atlTruthData.z = atlTruthData.z[filter_data]\n atlTruthData.intensity = atlTruthData.intensity[filter_data]\n atlTruthData.classification = atlTruthData.classification[filter_data]\n atlTruthData.time = atlTruthData.time[filter_data]\n atlTruthData.deltaTime = atlTruthData.deltaTime[filter_data]\n else:\n indexMatches = indexMatch(atlMeasuredData.alongTrack, \\\n atlTruthData.alongTrack)\n #Generate filter\n indexMatches[indexMatches >= len(atlMeasuredData.crossTrack)] = \\\n len(atlMeasuredData.crossTrack) - 1\n x_check = atlMeasuredData.crossTrack[indexMatches]\n x_diff = atlTruthData.crossTrack - x_check\n filter_data = np.where((x_diff < xBuf) & (x_diff > -xBuf)) \n #Apply filter to truth data\n atlTruthData.alongTrack = atlTruthData.alongTrack[filter_data]\n atlTruthData.crossTrack = atlTruthData.crossTrack[filter_data]\n atlTruthData.northing = atlTruthData.northing[filter_data]\n atlTruthData.easting = atlTruthData.easting[filter_data]\n atlTruthData.lat = atlTruthData.lat[filter_data]\n atlTruthData.lon = atlTruthData.lon[filter_data]\n atlTruthData.z = atlTruthData.z[filter_data]\n atlTruthData.intensity = atlTruthData.intensity[filter_data]\n atlTruthData.classification = atlTruthData.classification[filter_data] \n atlTruthData.time = atlTruthData.time[filter_data] \n atlTruthData.deltaTime = atlTruthData.deltaTime[filter_data] \n if(verbose):\n print(\"Superfilter complete\")\n # endIf\n #Return truthdata\n return atlTruthData, atlMeasuredData\n\ndef getBins(atlMeasuredData_in,atlTruthData_in, binsize, \n measClassification = None, truthClassification = None, \n operation = 'median', matchfilter = True, nanfilter = True):\n \n if measClassification:\n classlist = np.array(measClassification)\n classfilter = np.isin(atlMeasuredData_in.classification, classlist)\n measuredAlongTrack = atlMeasuredData_in.crossTrack[classfilter]\n measuredCrossTrack = atlMeasuredData_in.alongTrack[classfilter]\n measuredVal = atlMeasuredData_in.z[classfilter]\n \n else:\n measuredAlongTrack = atlMeasuredData_in.crossTrack\n measuredCrossTrack = atlMeasuredData_in.alongTrack\n measuredVal = atlMeasuredData_in.z \n \n if truthClassification:\n classlistTruth = np.array(truthClassification)\n classfilterTruth = np.isin(atlTruthData_in.classification, classlistTruth)\n truthAlongTrack = atlTruthData_in.crossTrack[classfilterTruth]\n truthCrossTrack = atlTruthData_in.alongTrack[classfilterTruth]\n truthVal = atlTruthData_in.z[classfilterTruth]\n else:\n truthAlongTrack = atlTruthData_in.crossTrack\n truthCrossTrack = atlTruthData_in.alongTrack\n truthVal = atlTruthData_in.z\n \n #Rasterize Measured and Truth\n try:\n binMeasured = getRaster(measuredAlongTrack[:,0], measuredCrossTrack[:,0], \n measuredVal[:,0], [100000,binsize], str(operation), \n fillValue = -999, time = [])\n except:\n binMeasured = getRaster(measuredAlongTrack, measuredCrossTrack, \n measuredVal, [100000,binsize], str(operation), \n fillValue = -999, time = [])\n try: \n binTruth = getRaster(truthAlongTrack[:,0], truthCrossTrack[:,0], \n truthVal[:,0], [100000,binsize], str(operation), \n fillValue = -999, time = [])\n except:\n binTruth = getRaster(truthAlongTrack, truthCrossTrack, \n truthVal, [100000,binsize], str(operation), \n fillValue = -999, time = [])\n if matchfilter == True:\n idx_atl03 = np.isin(binMeasured.y, binTruth.y)\n idx_truth = np.isin(binTruth.y, binMeasured.y)\n \n binMeasured.x = binMeasured.x[idx_atl03]\n binMeasured.y = binMeasured.y[idx_atl03]\n binMeasured.grid = binMeasured.grid[idx_atl03]\n \n binTruth.x = binTruth.x[idx_truth]\n binTruth.y = binTruth.y[idx_truth]\n binTruth.grid = binTruth.grid[idx_truth]\n \n if nanfilter == True:\n nan_filter = np.where((binMeasured.grid > -998) & (binTruth.grid > -998) & \n (binMeasured.grid < 9000) & (binTruth.grid < 9000))\n \n binMeasured.x = binMeasured.x[nan_filter] \n binMeasured.y = binMeasured.y[nan_filter]\n binMeasured.grid = binMeasured.grid[nan_filter]\n \n binTruth.x = binTruth.x[nan_filter]\n binTruth.y = binTruth.y[nan_filter]\n binTruth.grid = binTruth.grid[nan_filter]\n return binTruth, binMeasured\n\ndef createScalogram(atlMeasured, altTruth, measClassification = [1],\n truthClassification = [2], scalelist = []):\n superTruth, sortedMeasured = superFilter(atlMeasured, altTruth, xBuf = 7, \n classCode = measClassification)\n\n if len(scalelist) == 0:\n scalelist = [5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100]\n minbin = 5\n count = 0\n for scale in scalelist:\n print(str(scale))\n binTruth, binMeasured = getBins(sortedMeasured,superTruth, scale, \n measClassification, truthClassification = [2], \n operation = 'median', matchfilter = True, nanfilter = False)\n \n\n binTruth.grid[binTruth.grid <= -999] = np.nan\n binMeasured.grid[binMeasured.grid <= -999] = np.nan\n me = binTruth.grid - binMeasured.grid\n \n \n repeat = scale/minbin\n \n me = np.repeat(me,[repeat],axis=0)\n if count == 0:\n lencap = np.int(len(me))\n scalogram = me\n else:\n me = me[0:lencap]\n print(me.shape)\n print(scalogram.shape)\n scalogram = np.vstack((scalogram,me))\n\n count = count + 1\n\n return scalogram\n\n\n# Moving mean function\ndef getMovingMean(x, nBack, nForward):\n \n # Get length of input array\n N = len(x)\n \n # Get starting/ending input array values\n firstVal = x[0]\n lastVal = x[-1]\n \n # Get window size\n windowSize = nBack + nForward + 1\n \n # Get starting/ending arrays to concatenate onto input array\n xStart = firstVal*np.ones((nBack + 1), dtype = 'int')\n xEnd = lastVal*np.ones(nForward, dtype = 'int')\n \n # Concatenate starting/ending arrays onto input array\n xNew = np.concatenate((xStart, x, xEnd))\n \n # Get cumulative sum of input array\n cumsum = np.cumsum(xNew)\n \n # Get back/forward sums\n backSum = cumsum[:N]\n forwardSum = cumsum[-N:]\n \n # Compute moving average\n movingMean = (forwardSum - backSum)/windowSize\n \n return movingMean\n\n# endDef\n \ndef calculateangle(x1,x2,y1,y2):\n if (x2 - x1) == 0:\n slope = np.inf\n else:\n slope = (y2 - y1)/(x2 - x1)\n degree = np.rad2deg(np.arctan(slope))\n return degree\n\ndef calculategrounddirection(xx,yy):\n degree = np.zeros(len(xx))\n for i in range(0,len(xx)):\n if i == 0:\n degree[i] = calculateangle(xx[i], xx[i+1], yy[i], yy[i+1])\n elif i == (len(xx))-1:\n degree[i] = calculateangle(xx[i-1], xx[i], yy[i-1], yy[i])\n else:\n degree[i] = calculateangle(xx[i-1], xx[i+1], yy[i-1], yy[i+1])\n return degree\n \ndef rotatepoint(degree,xpos,ypos):\n angle = np.deg2rad(degree)\n xrot = (xpos * np.cos(angle)) - (ypos * np.sin(angle)) \n yrot = (xpos * np.sin(angle)) + (ypos * np.cos(angle))\n return xrot, yrot\n\ndef calculatecorners(degree,xcenter,ycenter,width,height):\n # Set corner values\n xul = -width / 2\n yul = height / 2\n xur = width / 2\n yur = height / 2\n xll = -width / 2\n yll = -height / 2\n xlr = width / 2\n ylr = -height / 2\n \n # Rotate based on the angle degree\n xul, yul = rotatepoint((degree-90),xul,yul)\n xur, yur = rotatepoint((degree-90),xur,yur)\n xll, yll = rotatepoint((degree-90),xll,yll)\n xlr, ylr = rotatepoint((degree-90),xlr,ylr)\n \n # Add corner values to centeroid\n xul = xcenter + xul\n yul = ycenter + yul\n xur = xcenter + xur\n yur = ycenter + yur\n xll = xcenter + xll\n yll = ycenter + yll\n xlr = xcenter + xlr\n ylr = ycenter + ylr\n \n return xul, yul, xur, yur, xll, yll, xlr, ylr\n\ndef createShapefiles(xx, yy, width, height, epsg, outfile = \"atl08.shp\"):\n # Generate list of degrees\n degreelist = calculategrounddirection(xx,yy)\n \n # Define Esri Shapefile output\n driver = ogr.GetDriverByName('Esri Shapefile')\n \n # Name output shape file (foo.shp)\n ds = driver.CreateDataSource(outfile)\n \n # Define spatial reference based on EPSG code \n # https://spatialreference.org/ref/epsg/\n srs = ogr.osr.SpatialReference()\n srs.ImportFromEPSG(epsg)\n \n # Create file with srs\n layer = ds.CreateLayer('', srs, ogr.wkbPolygon)\n \n # Create arbitary id field\n layer.CreateField(ogr.FieldDefn('id', ogr.OFTInteger))\n defn = layer.GetLayerDefn()\n \n # Create a new feature (attribute and geometry)\n for i in range(0,len(xx)):\n # Generate the corner points\n xul, yul, xur, yur, xll, yll, xlr, ylr = \\\n calculatecorners(degreelist[i],xx[i],yy[i],width,height) \n \n # Create rectangle corners\n ring = ogr.Geometry(ogr.wkbLinearRing)\n ring.AddPoint(xul, yul)\n ring.AddPoint(xur, yur)\n ring.AddPoint(xlr, ylr)\n ring.AddPoint(xll, yll)\n ring.AddPoint(xul, yul)\n \n # Create polygon from corners\n poly = ogr.Geometry(ogr.wkbPolygon)\n poly.AddGeometry(ring)\n \n # Export well-known binary\n wkb = poly.ExportToWkb()\n \n # Assign arbitary number to field ID\n feat = ogr.Feature(defn)\n feat.SetField('id', i)\n \n # Make a geometry, from Shapely object\n geom = ogr.CreateGeometryFromWkb(wkb)\n feat.SetGeometry(geom)\n \n # Write out geometry\n layer.CreateFeature(feat)\n \n # Remove ring and poly\n ring = poly = None\n \n # Remove feat and geom\n feat = geom = None\n \n # Save and close everything\n ds = layer = feat = geom = None \n\ndef pause(msg='enter to continue'):\n input(msg)\n\ndef mode(arr):\n from scipy.stats import mode as mode_func\n val = mode_func(arr)[0][0]\n return val\n\ndef sort_first(arr):\n \"\"\"\n example: sort by x\n x_sort, y_sort, z_sort = cm.sort_first([x, y, z])\n \"\"\"\n s = np.transpose(sorted(np.transpose(arr), key=lambda x: x[0]))\n return s\n\ndef remove_numpy_printing():\n np.set_printoptions(suppress=True)\n\ndef array(*argv):\n \"\"\"\n Turns this:\n x, y, z = np.array(x), np.array(y), np.array(z)\n into\n import icesatUtils as it\n x, y, z = it.array(x, y, z)\n \"\"\"\n var_out = []\n for var in argv:\n var_out.append(np.array(var))\n return var_out\n\ndef meshgrid_to_xy(xx, yy, zz=None):\n \"\"\"\n Example:\n import icesatUtils\n import numpy as np\n x, y = np.linspace(0,1), np.linspace(0,1)\n xx, yy = np.meshgrid(x, y)\n xr, yr = icesatUtils.meshgrid_to_xy(xx, yy)\n \"\"\"\n if type(zz) == type(None):\n return xx.flatten(), yy.flatten()\n else:\n return xx.flatten(), yy.flatten(), zz.flatten()\n\n\ndef get_date(*args, debug=0):\n \n \"\"\"\n Written by Michael James\n\n Example:\n import icesatUtils\n doy = icesatUtils.get_date(year, month, day)\n # or\n month, day = icesatUtils.get_date(year, doy)\n\n \"\"\"\n def help():\n print(\"\"\"\n Example:\n import icesatUtils\n doy = icesatUtils.get_date(year, month, day)\n # or\n month, day = icesatUtils.get_date(year, doy)\n \"\"\")\n\n import datetime\n\n if len(args) == 2:\n y = int(args[0]) #int(sys.argv[1])\n d = int(args[1]) #int(sys.argv[2])\n yp1 = datetime.datetime(y+1, 1, 1)\n y_last_day = yp1 - datetime.timedelta(days=1)\n if d <= y_last_day.timetuple().tm_yday:\n date = datetime.datetime(y, 1, 1) + datetime.timedelta(days=d-1)\n return date.month, date.day\n else:\n print(\"error\")\n help()\n\n elif len(args) == 3:\n y, m, d = args\n date = datetime.datetime(y, m, d)\n doy = int(date.timetuple().tm_yday)\n # print(\"doy = {}\".format(date.timetuple().tm_yday))\n return str(doy).zfill(3)\n\n else:\n print(\"error: incorrect number of args\")\n help()\n\n\n\ndef get_h5_meta(h5_file, meta='date', rtn_doy=False, rtn_hms=True, file_start='ATL'): #, debug=0):\n # ATL03_20181016000635_02650109_200_01.h5\n \"\"\"\n This function gets metadata directly from the ATL filename.\n\n Input:\n h5_file - the ATL file, full-path or not\n meta - the type of metadata to output\n ['date', 'track', 'release', 'version', \n 'f_type', 'hms', 'cycle']\n f_type - either rapid or final\n rtn_doy - return day of year or not, if date is chosen\n rtn_hms - return hour/min/sec, or time in sec\n file_start - search for info based on file_start index;\n useful if given an ATL file that starts\n with \"errorprocessing_...\" or any other\n prefix\n debug - for small errors\n\n Output:\n varies, but generally it's one value, unless 'hms' meta is chosen,\n in which case it is two values.\n\n Example:\n import icesatUtils\n fn = DIR + '/ATL03_20181016000635_02650109_200_01.h5'\n # or fn = 'ATL03_20181016000635_02650109_200_01.h5'\n year, day_of_year = icesatUtils.get_h5_meta(fn, meta='date', rtn_doy=True)\n version = icesatUtils.get_h5_meta(fn, meta='version')\n release = icesatUtils.get_h5_meta(fn, meta='release')\n \"\"\"\n\n h5_file = os.path.basename(h5_file) # h5_file.split('/')[-1]\n\n meta = meta.lower()\n\n i0 = 0\n try:\n i0 = h5_file.index(file_start)\n except ValueError:\n print('warning: substring %s not found in %s' % (file_start, h5_file))\n # i0 = check_file_start(h5_file, file_start, debug)\n\n if meta == 'date':\n year = int(h5_file[i0+6:i0+10])\n month = int(h5_file[i0+10:i0+12])\n day = int(h5_file[i0+12:i0+14])\n\n if rtn_doy:\n doy0 = get_date(year, month, day)\n return str(year), str(doy0).zfill(3)\n\n return str(year), str(month).zfill(2), str(day).zfill(2)\n\n elif meta == 'track':\n return int(h5_file[i0+21:i0+25])\n\n elif meta == 'release':\n r = h5_file[i0+30:i0+34]\n if '_' in r:\n r = h5_file[i0+30:i0+33]\n return r\n\n elif meta == 'version':\n v = h5_file[i0+34:i0+36]\n if '_' in v:\n v = h5_file[i0+35:i0+37]\n return v\n\n elif meta == 'f_type':\n r = h5_file[i0+30:i0+34]\n f_type = 'rapid'\n if '_' in r:\n r = h5_file[i0+30:i0+33]\n f_type = 'final'\n return f_type\n\n elif meta == 'hms':\n hms = h5_file[i0+14:i0+20]\n h, m, s = hms[0:2], hms[2:4], hms[4:6]\n if rtn_hms:\n return h, m, s\n else:\n h, m, s = int(h), int(m), int(s)\n t0, t0_full = t2t(h,m,s)\n return t0, t0_full\n\n elif meta == 'cycle':\n return int(h5_file[i0+25:i0+29])\n\n else:\n print('error: unknown meta=%s' % meta)\n return 0\n\ndef get_index(t, t0, tol=2.0, err_msg=True):\n \"\"\"\n Gets closest index of t0 in array t, i.e. the index of time t0\n in time array t\n\n tol is used in case t0 is not found; default is tol = 2.0 sec\n \"\"\"\n if type(t) != np.ndarray:\n t = np.array(t)\n index = np.argmin(abs(t - t0))\n if abs(t[index] - t0) > tol:\n if err_msg:\n print('warning: index not found: looking for %4.2f vs. found %4.2f' % (t0, t[index]))\n # return -1\n return index\n\n\ndef t2t(hours, mins=None, sec=None):\n \"\"\"\n This function changes hms into seconds, or \n HH:MM:SSSSSS format, such as in GPS time-stamps.\n \"\"\"\n if mins==None and sec== None:\n tot_sec = hours%86400\n choice = 1\n elif hours != None and mins != None and sec != None:\n choice = 2\n if hours > 24 or mins > 60 or sec > 60 or hours < 0 or mins < 0 or sec < 0:\n raise ValueError(\"Value out of range\")\n if choice == 1:\n hours = int(tot_sec/3600)\n mins = int((tot_sec%3600)/(60))\n sec = ((tot_sec%3600)%(60))\n\n if choice == 2:\n tot_sec = hours*3600 + mins*60 + sec\n\n return [tot_sec, \"{:>02d}:{:>02d}:{:>09.6f}\".format(hours, mins, sec)]\n\n\ndef get_outlier_data(data):\n \"\"\"\n This function returns simple Q1,Q2,Q3,IQR. It uses\n nanpercentile and nanmedian, so nans are removed/handled.\n \"\"\"\n Q1,Q2,Q3 = np.nanpercentile(data,25), np.nanmedian(data), np.nanpercentile(data,75)\n IQR = Q3-Q1\n return Q1,Q2,Q3,IQR\n\ndef get_root_dir(dir_type=None, username=None, debug=0):\n\n \"\"\"\n This function outputs directories relative to GLAM operatives.\n\n Input:\n dir_type - either 'data' or 'user', optional\n username - the function will attempt to detect the username\n of the person that called it, but if you'd like\n to manually specify, the option is there\n debug - optionally prints out the username the function detects\n\n Output:\n if dir_type is None:\n returns [data, user] root directories, relative to user\n elif dir_type is 'data':\n returns only data\n elif dir_type is 'user':\n returns only user\n\n Example:\n import icesatUtils as iu\n USER_ROOT, DATA_ROOT = iu.get_root_dir()\n USER_JSIPPS = iu.get_root_dir(dir_type='user', username='jsipps')\n\n \"\"\"\n\n dirs = {'jsipps': ['/LIDAR/server/poseidon_files/USERS/jsipps', '/bigtex_data'],\n 'malonzo': ['N:\\\\USERS\\\\mike', 'Z:'],\n 'eguenther': ['/LIDAR/server/USERS/eric', '/laserpewpew'],\n 'jmarkel': ['/LIDAR/server/USERS/jmarkel', '/laserpewpew'],\n 'hleigh': ['L:\\\\USERS\\\\holly', 'N:']}\n\n # get or format username\n import pwd\n if type(username) == type(None):\n username = pwd.getpwuid(os.getuid()).pw_name\n if debug:\n print('username: %s' % username)\n username = username.lower()\n\n # get or format directory type\n # user or data\n dir_types = {'user': 0, 'data': 1}\n if type(dir_type) != type(None):\n if type(dir_type) != str:\n dir_type = str(dir_type)\n dir_type = dir_type.lower()\n if dir_type in dir_types:\n i = dir_types[dir_type]\n else:\n keys = [key for key in dir_types.keys()]\n raise KeyError(dir_type + ' directory type unknown. Try one of', keys)\n\n # return directory\n if username in dirs:\n if type(dir_type) == type(None):\n return dirs[username]\n else:\n return dirs[username][i]\n\n else:\n keys = [key for key in dirs.keys()]\n raise KeyError('username ' + username + ' not found in', keys)\n\ndef reload(module):\n import imp\n imp.reload(module)\n\n\ndef get_sc_orient(file, delta_time=None):\n \"\"\"\n Takes in any h5 file, but 03 and 08 should have\n the orbit_info group. If not, this function raises\n a KeyError.\n\n Outputs an array of sc_orient values, dependent on\n delta_time, unless delta_time is not specified, in\n which case it just outputs the sc_orient and sc_orient_time\n datasets, which are typically one element in length.\n \"\"\"\n\n INT_MAX = np.iinfo(int).max\n group = 'orbit_info'\n datasets = [group + '/sc_orient', group + '/sc_orient_time']\n\n found = False\n with h5py.File(file, 'r') as fp:\n if datasets[0] in fp and datasets[1] in fp:\n sc_orient = np.array(fp[datasets[0]]).astype(int)\n sc_orient_time = np.array(fp[datasets[1]])\n found = True\n\n if found:\n if type(delta_time) != type(None):\n n = len(delta_time)\n sc_orient_arr = np.full(n, INT_MAX)\n for k, t0 in enumerate(sc_orient_time):\n b = delta_time >= t0\n sc_orient_arr[b] = sc_orient[k]\n\n return sc_orient_arr\n\n else:\n return sc_orient, sc_orient_time\n\n else:\n raise KeyError('could not find [%s or %s]' % (datasets[0], datasets[1]))\n\n\ndef get_beam_info(sc_orient_arr, gt):\n \"\"\"\n Takes as input the sc_orient at every sample\n and a ground track; outputs what beam number\n and beam type should be at these samples.\n\n beam_number of -1 is assigned if sc_orient \n value is unknown, such as 2 (during yaw-flip)\n and any other value.\n\n beam_type is set to unknown is sc_orient\n value is unknown.\n \"\"\"\n\n n = len(sc_orient_arr)\n s0 = sc_orient_arr == 0\n s1 = sc_orient_arr == 1\n\n beam_type = np.full(n,'unknown')\n beam_number = np.full(n,-1)\n if gt == 'gt1r':\n beam_number[s0] = 2\n beam_number[s1] = 1\n beam_type[s0] = 'weak'\n beam_type[s1] = 'strong'\n\n elif gt == 'gt2r':\n beam_number[s0] = 4\n beam_number[s1] = 3\n beam_type[s0] = 'weak'\n beam_type[s1] = 'strong'\n\n elif gt == 'gt3r':\n beam_number[s0] = 6\n beam_number[s1] = 5\n beam_type[s0] = 'weak'\n beam_type[s1] = 'strong'\n\n elif gt == 'gt1l':\n beam_number[s0] = 1\n beam_number[s1] = 2\n beam_type[s0] = 'strong'\n beam_type[s1] = 'weak'\n\n elif gt == 'gt2l':\n beam_number[s0] = 3\n beam_number[s1] = 4\n beam_type[s0] = 'strong'\n beam_type[s1] = 'weak'\n\n elif gt == 'gt3l':\n beam_number[s0] = 5\n beam_number[s1] = 6\n beam_type[s0] = 'strong'\n beam_type[s1] = 'weak'\n\n return beam_number, beam_type\n\n\ndef calc_rdm_segment(t, c, segment_id_beg, segment_id_end, segment_id, ph_index_beg, segment_ph_cnt, debug=0):\n\n \"\"\"\n Function to calculate radiometry (rdm)\n\n Input:\n t - time or delta_time of ATL03, for a given gt num\n c - classification of ATL03 for a given gt num\n ensure that no nans exist\n segment_id_beg - segment_id_beg from ATL08\n segment_id_end - segment_id_end from ATL08\n segment_id - segment_id from ATL03 geolocation/\n ph_index_beg - ph_index_beg from ATL03 geolocation/\n segment_ph_cnt - segment_ph_cnt from ATL03 geolocation/\n debug - val != 0 enables print statements if segments\n do not match from 03 to 08 (see caveats)\n\n Output:\n n_shots_unique - total number of unique ttg per ATL08 100m bin\n rdm_ground - rdm of ground photons (c==1)\n rdm_veg - rdm of veg photons (c==2)\n rdm_canopy - rdm of canopy photons (c==3)\n\n Example:\n n_shots_unique, rdm_ground, rdm_veg, rdm_canopy = \\\n calc_rdm(t, c, segment_id_beg, segment_id_end, ph_index_beg, segment_ph_cnt, debug=0)\n\n Caveats:\n Ensure that no nans exist in classification c\n\n rdm_ground/veg/canopy and n_shots_unique are floating point\n b/c it's possible to have no overlap in 03 and 08 data, in\n which case the radiometry value is NaN; this is implemented by\n initializing rdm vectors are NaN. Thus, they are floating-point-\n valued.\n\n This functions can handle when 03/08 do not totally overlap,\n or when there is no overlap. That said, one should proceed with\n caution knowing 03 and 08 do not overlap at all. NaN values are\n initialized in rdm vectors based on these cases.\n\n \"\"\"\n\n if np.isnan(c).sum() > 0 and debug:\n print('warning: NaN values found in c')\n\n rdm_ground = np.full(segment_id_beg.shape, np.nan)\n rdm_veg = np.full(segment_id_beg.shape, np.nan)\n rdm_canopy = np.full(segment_id_beg.shape, np.nan)\n n_shots_unique = np.full(segment_id_beg.shape, np.nan)\n \n n_id = len(segment_id)\n for s in range(len(segment_id_beg)):\n# _, k0 = iu.getClosest(segment_id, [segment_id_beg[s]])\n# _, k1 = iu.getClosest(segment_id, [segment_id_end[s]])\n _, k0 = getClosest(segment_id, [segment_id_beg[s]])\n _, k1 = getClosest(segment_id, [segment_id_end[s]])\n k0, k1 = int(k0), int(k1)\n \n warn = False\n b_edge = False\n if segment_id[k0] < segment_id_beg[s]:\n # left side incomplete\n # cm.pause('beg')\n k = k0\n while segment_id[k] < segment_id_beg[s]:\n k += 1\n if k >= n_id:\n b_edge = True\n break\n\n elif segment_id[k0] > segment_id_beg[s]:\n # print('warning: 03 seg id beg %d > 08 seg id beg %d' % (segment_id[k0], segment_id_beg[s]))\n warn = True\n\n # else:\n # equal, totally fine\n\n # if segment_id[k1] != segment_id_end[s]:\n if segment_id[k1] > segment_id_end[s]:\n # right side incomplete\n # cm.pause('end')\n k = k1\n while segment_id[k] > segment_id_end[s]:\n k -= 1\n if k < 0:\n b_edge = True\n break\n\n elif segment_id[k1] < segment_id_end[s]:\n # print('warning: 03 seg id beg %d < 08 seg id beg %d' % (segment_id[k0], segment_id_beg[s]))\n warn = True\n\n # else:\n # equal, totally fine\n\n if b_edge and debug:\n # 08 segment is entirely outside of 03 segment data\n print('outside')\n print('03: [%d, %d]' % (segment_id[k0], segment_id[k1]))\n print('08: [%d, %d]' % (segment_id_beg[s], segment_id_end[s]))\n # cm.pause()\n input('enter to continue')\n continue\n\n if warn and debug:\n print('partial')\n print('03: [%d, %d]' % (segment_id[k0], segment_id[k1]))\n print('08: [%d, %d]' % (segment_id_beg[s], segment_id_end[s]))\n # cm.pause()\n input('enter to continue')\n\n i0, i1 = ph_index_beg[k0], ph_index_beg[k1] + segment_ph_cnt[k1] - 1\n\n t_seg = t[i0:i1+1] # inclusive index\n c_seg = c[i0:i1+1]\n\n n_shots_total_uq = len(np.unique(t_seg))\n n_shots_ground = (c_seg == 1).sum()\n n_shots_veg = (c_seg == 2).sum()\n n_shots_canopy = (c_seg == 3).sum()\n\n n_shots_unique[s] = n_shots_total_uq\n rdm_ground[s] = float(n_shots_ground / n_shots_total_uq)\n rdm_veg[s] = float(n_shots_veg / n_shots_total_uq)\n rdm_canopy[s] = float(n_shots_canopy / n_shots_total_uq)\n\n return n_shots_unique, rdm_ground, rdm_veg, rdm_canopy\n\n\ndef unit(v):\n mag = np.linalg.norm(v)\n return np.array([vi/mag for vi in v])\n\ndef df_to_np(df, *args):\n rtn = []\n for var in args:\n rtn.append(np.array(df[var]))\n return rtn\n\ndef downsample(Fs_ds, t, *data):\n \"\"\"\n Input:\n Fs_ds - downsampled new frequency\n t - time-series\n *data - one or multiple args to be\n downsampled along with t\n\n Output:\n t_ds - downsampled time-series\n n_ds - len(t_ds)\n data_out - one or multiple args\n\n Example:\n dt_ds = 1.0 # sec\n Fs_ds = 1.0 / dt_ds\n t_ds, n_ds, (arg1, arg2, ...) = downsample(Fs_ds, t, arg1, arg2, ...)\n\n \"\"\"\n\n dt_ds = 1.0 / Fs_ds # sec\n tol = 0.01*dt_ds\n t_ds = []\n # data_ds = []\n data_new = []\n num_y = len(data)\n for y in data:\n data_new.append([])\n\n n = len(t)\n t_prev = t[0]\n for j in range(1,n):\n if abs(t[j] - t_prev) > dt_ds-tol:\n t_ds.append(t[j])\n for i in range(num_y):\n data_new[i].append(data[i][j])\n t_prev = t[j]\n\n n_ds = len(t_ds)\n\n # if rtn_numpy:\n if 1:\n t_ds = np.array(t_ds)\n for i in range(num_y):\n data_new[i] = np.array(data_new[i])\n\n data_out = tuple(data_new)\n\n if len(data_out) > 1:\n return t_ds, n_ds, data_out\n else:\n return t_ds, n_ds, data_out[0]\n\n\ndef b_filt(b, *args):\n arr_new = []\n for arr in args:\n arr_new.append(arr[b])\n return arr_new\n# endDef\n \n# Function to interpolate 1d\ndef interp_vals(input_x, input_y, interp_x, removeThresh=False):\n \n # Remove y values > 1e30\n if(removeThresh):\n indsUnderThresh = input_y <= 1e30\n input_x = input_x[indsUnderThresh]\n input_y = input_y[indsUnderThresh]\n # endIf\n \n # Remove x duplicate values for scipy interp\n indsUnique = np.unique(input_x, return_index=True)[1]\n input_x = input_x[indsUnique]\n input_y = input_y[indsUnique]\n \n # Interpolate delta_time\n f1 = interpolate.interp1d(input_x, input_y, kind='linear', fill_value='extrapolate')\n interp_y = f1(interp_x)\n \n return interp_y\n\n# endDef\n\n# Function to interpolate 2d\ndef interp_vals2d(input_x, input_y, input_z, interp_x, interp_y, removeThresh=False, removeDuplicates=False):\n \n # Remove y values > 1e30\n if(removeThresh):\n indsUnderThresh = input_y <= removeThresh\n input_x = input_x[indsUnderThresh]\n input_y = input_y[indsUnderThresh]\n # endIf\n \n # Remove x duplicate values for scipy interp\n if(removeDuplicates):\n indsUnique = np.unique(input_x, return_index=True)[1]\n input_x = input_x[indsUnique]\n input_y = input_y[indsUnique]\n # endIf\n \n # Interpolate delta_time\n f1 = interpolate.interp2d(input_x, input_y, input_z, kind='linear', fill_value='extrapolate')\n interp_z = f1(interp_x, interp_y)\n \n return interp_z\n\n# endDef\n\nif __name__ == \"__main__\":\n print(\"Test\")\n# end" ]
[ [ "numpy.matrix", "numpy.nanmedian", "numpy.radians", "numpy.arctan", "numpy.asarray", "scipy.interpolate.dfitpack.bispeu", "numpy.flipud", "numpy.cumsum", "pandas.DataFrame", "numpy.concatenate", "numpy.max", "numpy.arctan2", "numpy.round", "numpy.argmin", "numpy.iinfo", "scipy.interpolate.interp2d", "numpy.where", "numpy.unique", "numpy.reshape", "numpy.arange", "numpy.matmul", "numpy.full", "numpy.sin", "numpy.intersect1d", "scipy.interpolate.interp1d", "numpy.column_stack", "numpy.ctypeslib.ndpointer", "numpy.repeat", "numpy.zeros", "numpy.isin", "numpy.min", "numpy.linalg.inv", "numpy.isnan", "numpy.arccos", "numpy.deg2rad", "numpy.char.array", "numpy.floor", "numpy.transpose", "numpy.argsort", "numpy.meshgrid", "numpy.array", "numpy.nanpercentile", "numpy.abs", "numpy.set_printoptions", "numpy.linalg.norm", "numpy.cos", "numpy.ones", "numpy.shape", "scipy.stats.mode", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
f1amingo/logparser
[ "65f077a78a974a50e0fff792257fb6fea0a86821" ]
[ "logparser/IPLoM/IPLoM.py" ]
[ "\"\"\"\r\nDescription : This file implements the IPLoM algorithm for log parsing\r\nAuthor : LogPAI team\r\nLicense : MIT\r\n\"\"\"\r\n\r\nimport copy\r\nimport sys\r\nfrom datetime import datetime\r\nimport os\r\nimport gc\r\nimport re\r\nimport pandas as pd\r\nimport hashlib\r\nimport string\r\n\r\n\r\nclass Partition:\r\n \"\"\" Wrap around the logs and the step number\r\n \"\"\"\r\n\r\n def __init__(self, stepNo, numOfLogs=0, lenOfLogs=0):\r\n self.logLL = []\r\n self.stepNo = stepNo\r\n self.valid = True\r\n self.numOfLogs = numOfLogs\r\n self.lenOfLogs = lenOfLogs\r\n\r\n\r\nclass Event:\r\n def __init__(self, eventStr):\r\n self.eventStr = eventStr\r\n self.eventId = hashlib.md5(' '.join(eventStr).encode('utf-8')).hexdigest()[0:8]\r\n self.eventCount = 0\r\n\r\n\r\nclass Para:\r\n \"\"\" Para class\r\n\r\n Attributes\r\n ----------\r\n maxEventLen : the length of the longest log/event, which is used in step 1 to split logs into partitions \r\n according to their length\r\n path : the path of the input file\r\n step2Support : the support threshold to create a new partition, partitions which contains less than \r\n step2Support logs will not go through step 2\r\n PST : Partition support ratio threshold\r\n CT : Cluster goodness threshold used in DetermineP1P2 in step3. If the columns with unique term more \r\n than CT, we skip step 3\r\n \"\"\"\r\n\r\n def __init__(self, log_format, indir, outdir, maxEventLen, step2Support, PST, CT, lowerBound,\r\n upperBound, rex):\r\n self.maxEventLen = maxEventLen\r\n self.path = indir\r\n self.savePath = outdir\r\n self.step2Support = step2Support\r\n self.PST = PST\r\n self.CT = CT\r\n self.lowerBound = lowerBound\r\n self.upperBound = upperBound\r\n self.rex = rex\r\n self.logformat = log_format\r\n\r\n\r\nclass LogParser:\r\n def __init__(self, log_format, indir='../logs/', outdir='./result/',\r\n maxEventLen=500, step2Support=0, PST=0,\r\n CT=0.35, lowerBound=0.25, upperBound=0.9,\r\n rex=[], keep_para=True):\r\n\r\n self.para = Para(log_format=log_format, indir=indir, outdir=outdir, maxEventLen=maxEventLen,\r\n step2Support=step2Support,\r\n PST=PST, CT=CT, lowerBound=lowerBound, upperBound=upperBound, rex=rex)\r\n self.partitionsL = []\r\n self.eventsL = []\r\n self.output = []\r\n self.keep_para = keep_para\r\n\r\n if not os.path.exists(self.para.savePath):\r\n os.makedirs(self.para.savePath)\r\n\r\n # Initialize some partitions which contain logs with different length\r\n for logLen in range(self.para.maxEventLen + 1):\r\n self.partitionsL.append(Partition(stepNo=1, numOfLogs=0, lenOfLogs=logLen))\r\n\r\n def parse(self, logname):\r\n print('Parsing file: ' + os.path.join(self.para.path, logname))\r\n self.logname = logname\r\n starttime = datetime.now()\r\n self.Step1()\r\n self.Step2()\r\n self.Step3()\r\n self.Step4()\r\n self.getOutput()\r\n self.WriteEventToFile()\r\n\r\n time_elapsed = datetime.now() - starttime\r\n print('Parsing done. [Time taken: {!s}]'.format(time_elapsed))\r\n return time_elapsed\r\n\r\n def Step1(self):\r\n headers, regex = self.generate_logformat_regex(self.para.logformat)\r\n self.df_log = self.log_to_dataframe(os.path.join(self.para.path, self.logname), regex, headers,\r\n self.para.logformat)\r\n lineCount = 1\r\n for idx, line in self.df_log.iterrows():\r\n line = line['Content']\r\n # If line is empty, skip\r\n if line.strip() == \"\":\r\n continue\r\n\r\n if self.para.rex:\r\n for currentRex in self.para.rex:\r\n line = re.sub(currentRex, '', line)\r\n\r\n wordSeq = list(filter(lambda x: x != '', re.split(r'[\\s=:,]', line)))\r\n if not wordSeq:\r\n wordSeq = [' ']\r\n\r\n # Generate terms list, with ID in the end\r\n wordSeq.append(str(lineCount))\r\n lineCount += 1\r\n\r\n try:\r\n # Add current log to the corresponding partition\r\n self.partitionsL[len(wordSeq) - 1].logLL.append(wordSeq)\r\n self.partitionsL[len(wordSeq) - 1].numOfLogs += 1\r\n except:\r\n print(wordSeq)\r\n\r\n for partition in self.partitionsL:\r\n if partition.numOfLogs == 0:\r\n partition.valid = False\r\n\r\n elif self.para.PST != 0 and 1.0 * partition.numOfLogs / lineCount < self.para.PST:\r\n for logL in partition.logLL:\r\n self.partitionsL[0].logLL.append(logL)\r\n self.partitionsL[0].numOfLogs += 1\r\n partition.valid = False\r\n\r\n def Step2(self):\r\n\r\n for partition in self.partitionsL:\r\n\r\n if not partition.valid:\r\n continue\r\n\r\n if partition.numOfLogs <= self.para.step2Support:\r\n continue\r\n\r\n # Avoid going through newly generated partitions\r\n if partition.stepNo == 2:\r\n break\r\n\r\n # For each column, create a set to hold the unique tokens in that column. \r\n # And finally, calculate the number of the unique tokens in each column\r\n uniqueTokensCountLS = []\r\n for columnIdx in range(partition.lenOfLogs):\r\n uniqueTokensCountLS.append(set())\r\n\r\n for logL in partition.logLL:\r\n for columnIdx in range(partition.lenOfLogs):\r\n uniqueTokensCountLS[columnIdx].add(logL[columnIdx])\r\n\r\n # Find the column with minimum unique tokens\r\n minColumnIdx = 0\r\n minColumnCount = len(uniqueTokensCountLS[0])\r\n\r\n for columnIdx in range(partition.lenOfLogs):\r\n if minColumnCount > len(uniqueTokensCountLS[columnIdx]):\r\n minColumnCount = len(uniqueTokensCountLS[columnIdx])\r\n minColumnIdx = columnIdx\r\n\r\n # If there is one column with one unique term, do not split this partition\r\n if minColumnCount == 1:\r\n continue\r\n\r\n # From split-token to log list\r\n logDLL = {}\r\n for logL in partition.logLL:\r\n if logL[minColumnIdx] not in logDLL:\r\n logDLL[logL[minColumnIdx]] = []\r\n logDLL[logL[minColumnIdx]].append(logL)\r\n\r\n for key in logDLL:\r\n if self.para.PST != 0 and 1.0 * len(logDLL[key]) / partition.numOfLogs < self.para.PST:\r\n self.partitionsL[0].logLL += logDLL[key]\r\n self.partitionsL[0].numOfLogs += len(logDLL[key])\r\n else:\r\n newPartition = Partition(stepNo=2, numOfLogs=len(logDLL[key]), lenOfLogs=partition.lenOfLogs)\r\n newPartition.logLL = logDLL[key]\r\n self.partitionsL.append(newPartition)\r\n\r\n partition.valid = False\r\n\r\n def Step3(self):\r\n\r\n for partition in self.partitionsL:\r\n\r\n if not partition.valid:\r\n continue\r\n\r\n if partition.stepNo == 3:\r\n break\r\n\r\n # Find two columns that my cause split in this step\r\n p1, p2 = self.DetermineP1P2(partition)\r\n\r\n if p1 == -1 or p2 == -1:\r\n continue\r\n\r\n try:\r\n\r\n p1Set = set()\r\n p2Set = set()\r\n mapRelation1DS = {}\r\n mapRelation2DS = {}\r\n\r\n # Construct token sets for p1 and p2, dictionary to record the mapping relations between p1 and p2\r\n for logL in partition.logLL:\r\n p1Set.add(logL[p1])\r\n p2Set.add(logL[p2])\r\n\r\n if (logL[p1] == logL[p2]):\r\n print(\"Warning: p1 may be equal to p2\")\r\n\r\n if logL[p1] not in mapRelation1DS:\r\n mapRelation1DS[logL[p1]] = set()\r\n mapRelation1DS[logL[p1]].add(logL[p2])\r\n\r\n if logL[p2] not in mapRelation2DS:\r\n mapRelation2DS[logL[p2]] = set()\r\n mapRelation2DS[logL[p2]].add(logL[p1])\r\n\r\n # Construct sets to record the tokens in 1-1, 1-M, M-1 relationships, the left-tokens in p1Set & p2Set \r\n # are in M-M relationships\r\n oneToOneS = set()\r\n oneToMP1D = {}\r\n oneToMP2D = {}\r\n\r\n # select 1-1 and 1-M relationships\r\n for p1Token in p1Set:\r\n if len(mapRelation1DS[p1Token]) == 1:\r\n if len(mapRelation2DS[list(mapRelation1DS[p1Token])[0]]) == 1:\r\n oneToOneS.add(p1Token)\r\n\r\n else:\r\n isOneToM = True\r\n\r\n for p2Token in mapRelation1DS[p1Token]:\r\n if len(mapRelation2DS[p2Token]) != 1:\r\n isOneToM = False\r\n break\r\n if isOneToM:\r\n oneToMP1D[p1Token] = 0\r\n\r\n # delete the tokens which are picked to 1-1 and 1-M relationships from p1Set, so that the left are M-M\r\n for deleteToken in oneToOneS:\r\n p1Set.remove(deleteToken)\r\n p2Set.remove(list(mapRelation1DS[deleteToken])[0])\r\n\r\n for deleteToken in oneToMP1D:\r\n for deleteTokenP2 in mapRelation1DS[deleteToken]:\r\n p2Set.remove(deleteTokenP2)\r\n p1Set.remove(deleteToken)\r\n\r\n # select M-1 relationships\r\n for p2Token in p2Set:\r\n if len(mapRelation2DS[p2Token]) != 1:\r\n isOneToM = True\r\n for p1Token in mapRelation2DS[p2Token]:\r\n if len(mapRelation1DS[p1Token]) != 1:\r\n isOneToM = False\r\n break\r\n if isOneToM:\r\n oneToMP2D[p2Token] = 0\r\n\r\n # delete the tokens which are picked to M-1 relationships from p2Set, so that the left are M-M\r\n for deleteToken in oneToMP2D:\r\n p2Set.remove(deleteToken)\r\n for deleteTokenP1 in mapRelation2DS[deleteToken]:\r\n p1Set.remove(deleteTokenP1)\r\n\r\n # calculate the #Lines_that_match_S\r\n for logL in partition.logLL:\r\n if logL[p1] in oneToMP1D:\r\n oneToMP1D[logL[p1]] += 1\r\n\r\n if logL[p2] in oneToMP2D:\r\n oneToMP2D[logL[p2]] += 1\r\n\r\n except KeyError as er:\r\n print(er)\r\n print('error: ' + str(p1) + '\\t' + str(p2))\r\n\r\n newPartitionsD = {}\r\n if partition.stepNo == 2:\r\n newPartitionsD[\"dumpKeyforMMrelationInStep2__\"] = Partition(stepNo=3, numOfLogs=0,\r\n lenOfLogs=partition.lenOfLogs)\r\n # Split partition\r\n for logL in partition.logLL:\r\n # If is 1-1\r\n if logL[p1] in oneToOneS:\r\n if logL[p1] not in newPartitionsD:\r\n newPartitionsD[logL[p1]] = Partition(stepNo=3, numOfLogs=0, lenOfLogs=partition.lenOfLogs)\r\n newPartitionsD[logL[p1]].logLL.append(logL)\r\n newPartitionsD[logL[p1]].numOfLogs += 1\r\n\r\n # This part can be improved. The split_rank can be calculated once.\r\n # If is 1-M\r\n elif logL[p1] in oneToMP1D:\r\n split_rank = self.Get_Rank_Posistion(len(mapRelation1DS[logL[p1]]), oneToMP1D[logL[p1]], True)\r\n if split_rank == 1:\r\n if logL[p1] not in newPartitionsD:\r\n newPartitionsD[logL[p1]] = Partition(stepNo=3, numOfLogs=0, lenOfLogs=partition.lenOfLogs)\r\n newPartitionsD[logL[p1]].logLL.append(logL)\r\n newPartitionsD[logL[p1]].numOfLogs += 1\r\n else:\r\n if logL[p2] not in newPartitionsD:\r\n newPartitionsD[logL[p2]] = Partition(stepNo=3, numOfLogs=0, lenOfLogs=partition.lenOfLogs)\r\n newPartitionsD[logL[p2]].logLL.append(logL)\r\n newPartitionsD[logL[p2]].numOfLogs += 1\r\n\r\n # If is M-1\r\n elif logL[p2] in oneToMP2D:\r\n split_rank = self.Get_Rank_Posistion(len(mapRelation2DS[logL[p2]]), oneToMP2D[logL[p2]], False)\r\n if split_rank == 1:\r\n if logL[p1] not in newPartitionsD:\r\n newPartitionsD[logL[p1]] = Partition(stepNo=3, numOfLogs=0, lenOfLogs=partition.lenOfLogs)\r\n newPartitionsD[logL[p1]].logLL.append(logL)\r\n newPartitionsD[logL[p1]].numOfLogs += 1\r\n else:\r\n if logL[p2] not in newPartitionsD:\r\n newPartitionsD[logL[p2]] = Partition(stepNo=3, numOfLogs=0, lenOfLogs=partition.lenOfLogs)\r\n newPartitionsD[logL[p2]].logLL.append(logL)\r\n newPartitionsD[logL[p2]].numOfLogs += 1\r\n\r\n # M-M\r\n else:\r\n if partition.stepNo == 2:\r\n newPartitionsD[\"dumpKeyforMMrelationInStep2__\"].logLL.append(logL)\r\n newPartitionsD[\"dumpKeyforMMrelationInStep2__\"].numOfLogs += 1\r\n else:\r\n if len(p1Set) < len(p2Set):\r\n if logL[p1] not in newPartitionsD:\r\n newPartitionsD[logL[p1]] = Partition(stepNo=3, numOfLogs=0,\r\n lenOfLogs=partition.lenOfLogs)\r\n newPartitionsD[logL[p1]].logLL.append(logL)\r\n newPartitionsD[logL[p1]].numOfLogs += 1\r\n else:\r\n if logL[p2] not in newPartitionsD:\r\n newPartitionsD[logL[p2]] = Partition(stepNo=3, numOfLogs=0,\r\n lenOfLogs=partition.lenOfLogs)\r\n newPartitionsD[logL[p2]].logLL.append(logL)\r\n newPartitionsD[logL[p2]].numOfLogs += 1\r\n\r\n if \"dumpKeyforMMrelationInStep2__\" in newPartitionsD and newPartitionsD[\r\n \"dumpKeyforMMrelationInStep2__\"].numOfLogs == 0:\r\n newPartitionsD[\"dumpKeyforMMrelationInStep2__\"].valid = False\r\n # Add all the new partitions to collection\r\n for key in newPartitionsD:\r\n if self.para.PST != 0 and 1.0 * newPartitionsD[key].numOfLogs / partition.numOfLogs < self.para.PST:\r\n self.partitionsL[0].logLL += newPartitionsD[key].logLL\r\n self.partitionsL[0].numOfLogs += newPartitionsD[key].numOfLogs\r\n else:\r\n self.partitionsL.append(newPartitionsD[key])\r\n\r\n partition.valid = False\r\n\r\n def Step4(self):\r\n self.partitionsL[0].valid = False\r\n if self.para.PST == 0 and self.partitionsL[0].numOfLogs != 0:\r\n event = Event(['Outlier'])\r\n event.eventCount = self.partitionsL[0].numOfLogs\r\n self.eventsL.append(event)\r\n\r\n for logL in self.partitionsL[0].logLL:\r\n logL.append(str(event.eventId))\r\n\r\n for partition in self.partitionsL:\r\n if not partition.valid:\r\n continue\r\n\r\n if partition.numOfLogs == 0:\r\n print(str(partition.stepNo) + '\\t')\r\n\r\n uniqueTokensCountLS = []\r\n for columnIdx in range(partition.lenOfLogs):\r\n uniqueTokensCountLS.append(set())\r\n\r\n for logL in partition.logLL:\r\n for columnIdx in range(partition.lenOfLogs):\r\n uniqueTokensCountLS[columnIdx].add(logL[columnIdx])\r\n\r\n e = copy.deepcopy(partition.logLL[0])[:partition.lenOfLogs]\r\n\r\n for columnIdx in range(partition.lenOfLogs):\r\n if len(uniqueTokensCountLS[columnIdx]) == 1:\r\n continue\r\n else:\r\n e[columnIdx] = '<*>'\r\n\r\n event = Event(e)\r\n event.eventCount = partition.numOfLogs\r\n\r\n self.eventsL.append(event)\r\n\r\n for logL in partition.logLL:\r\n logL.append(str(event.eventId))\r\n\r\n def getOutput(self):\r\n if self.para.PST == 0 and self.partitionsL[0].numOfLogs != 0:\r\n for logL in self.partitionsL[0].logLL:\r\n self.output.append(logL[-2:] + logL[:-2])\r\n i = 0\r\n for partition in self.partitionsL:\r\n if not partition.valid:\r\n continue\r\n for logL in partition.logLL:\r\n self.output.append(logL[-2:] + logL[:-2])\r\n\r\n def WriteEventToFile(self):\r\n eventID_template = {event.eventId: ' '.join(event.eventStr) for event in self.eventsL}\r\n eventList = [[event.eventId, ' '.join(event.eventStr), event.eventCount] for event in self.eventsL]\r\n eventDf = pd.DataFrame(eventList, columns=['EventId', 'EventTemplate', 'Occurrences'])\r\n eventDf.to_csv(os.path.join(self.para.savePath, self.logname + '_templates.csv'), index=False)\r\n\r\n self.output.sort(key=lambda x: int(x[0]))\r\n self.df_log['EventId'] = [str(logL[1]) for logL in self.output]\r\n self.df_log['EventTemplate'] = [eventID_template[logL[1]] for logL in self.output]\r\n # if self.keep_para:\r\n # self.df_log[\"ParameterList\"] = self.df_log.apply(self.get_parameter_list, axis=1)\r\n self.df_log.to_csv(os.path.join(self.para.savePath, self.logname + '_structured.csv'), index=False)\r\n\r\n \"\"\"\r\n For 1-M and M-1 mappings, you need to decide whether M side are constants or variables. This method is to decide which side to split\r\n\r\n cardOfS : The number of unique values in this set\r\n Lines_that_match_S: The number of lines that have these values\r\n one_m : If the mapping is 1-M, this value is True. Otherwise, False\r\n \"\"\"\r\n\r\n def Get_Rank_Posistion(self, cardOfS, Lines_that_match_S, one_m):\r\n try:\r\n distance = 1.0 * cardOfS / Lines_that_match_S\r\n except ZeroDivisionError as er1:\r\n print(er1)\r\n print(\"cardOfS: \" + str(cardOfS) + '\\t' + 'Lines_that_match_S: ' + str(Lines_that_match_S))\r\n\r\n if distance <= self.para.lowerBound:\r\n if one_m:\r\n split_rank = 2\r\n else:\r\n split_rank = 1\r\n elif distance >= self.para.upperBound:\r\n if one_m:\r\n split_rank = 1\r\n else:\r\n split_rank = 2\r\n else:\r\n if one_m:\r\n split_rank = 1\r\n else:\r\n split_rank = 2\r\n\r\n return split_rank\r\n\r\n def DetermineP1P2(self, partition):\r\n if partition.lenOfLogs > 2:\r\n count_1 = 0\r\n\r\n uniqueTokensCountLS = []\r\n for columnIdx in range(partition.lenOfLogs):\r\n uniqueTokensCountLS.append(set())\r\n\r\n for logL in partition.logLL:\r\n for columnIdx in range(partition.lenOfLogs):\r\n uniqueTokensCountLS[columnIdx].add(logL[columnIdx])\r\n\r\n # Count how many columns have only one unique term\r\n for columnIdx in range(partition.lenOfLogs):\r\n if len(uniqueTokensCountLS[columnIdx]) == 1:\r\n count_1 += 1\r\n\r\n # If the columns with unique term more than a threshold, we return (-1, -1) to skip step 3\r\n GC = 1.0 * count_1 / partition.lenOfLogs\r\n\r\n if GC < self.para.CT:\r\n return self.Get_Mapping_Position(partition, uniqueTokensCountLS)\r\n else:\r\n return (-1, -1)\r\n\r\n\r\n elif partition.lenOfLogs == 2:\r\n return (0, 1)\r\n else:\r\n return (-1, -1)\r\n\r\n def Get_Mapping_Position(self, partition, uniqueTokensCountLS):\r\n p1 = p2 = -1\r\n\r\n # Caculate #unqiueterms in each column, and record how many column with each #uniqueterms\r\n numOfUniqueTokensD = {}\r\n for columnIdx in range(partition.lenOfLogs):\r\n if len(uniqueTokensCountLS[columnIdx]) not in numOfUniqueTokensD:\r\n numOfUniqueTokensD[len(uniqueTokensCountLS[columnIdx])] = 0\r\n numOfUniqueTokensD[len(uniqueTokensCountLS[columnIdx])] += 1\r\n\r\n if partition.stepNo == 2:\r\n\r\n # Find the largest card and second largest card\r\n maxIdx = secondMaxIdx = -1\r\n maxCount = secondMaxCount = 0\r\n for key in numOfUniqueTokensD:\r\n if key == 1:\r\n continue\r\n if numOfUniqueTokensD[key] > maxCount:\r\n secondMaxIdx = maxIdx\r\n secondMaxCount = maxCount\r\n maxIdx = key\r\n maxCount = numOfUniqueTokensD[key]\r\n elif numOfUniqueTokensD[key] > secondMaxCount and numOfUniqueTokensD[key] != maxCount:\r\n secondMaxIdx = key\r\n secondMaxCount = numOfUniqueTokensD[key]\r\n\r\n # If the frequency of the freq_card>1 then\r\n if maxCount > 1:\r\n for columnIdx in range(partition.lenOfLogs):\r\n if len(uniqueTokensCountLS[columnIdx]) == maxIdx:\r\n if p1 == -1:\r\n p1 = columnIdx\r\n else:\r\n p2 = columnIdx\r\n break\r\n\r\n # for columnIdx in range(partition.lenOfLogs):\r\n # if p2 != -1:\r\n # break\r\n # if numOfUniqueTokensD[len(uniqueTokensCountLS[columnIdx])] == secondMaxCount:\r\n # p2 = columnIdx\r\n # break\r\n\r\n # If the frequency of the freq_card==1 then\r\n else:\r\n for columnIdx in range(partition.lenOfLogs):\r\n if len(uniqueTokensCountLS[columnIdx]) == maxIdx:\r\n p1 = columnIdx\r\n break\r\n\r\n for columnIdx in range(partition.lenOfLogs):\r\n if len(uniqueTokensCountLS[columnIdx]) == secondMaxIdx:\r\n p2 = columnIdx\r\n break\r\n\r\n if p1 == -1 or p2 == -1:\r\n return (-1, -1)\r\n else:\r\n return (p1, p2)\r\n\r\n # If it is from step 1\r\n else:\r\n minIdx = secondMinIdx = -1\r\n minCount = secondMinCount = sys.maxsize\r\n for key in numOfUniqueTokensD:\r\n if numOfUniqueTokensD[key] < minCount:\r\n secondMinIdx = minIdx\r\n secondMinCount = minCount\r\n minIdx = key\r\n minCount = numOfUniqueTokensD[key]\r\n elif numOfUniqueTokensD[key] < secondMinCount and numOfUniqueTokensD[key] != minCount:\r\n secondMinIdx = key\r\n secondMinCount = numOfUniqueTokensD[key]\r\n\r\n for columnIdx in range(len(uniqueTokensCountLS)):\r\n if numOfUniqueTokensD[len(uniqueTokensCountLS[columnIdx])] == minCount:\r\n if p1 == -1:\r\n p1 = columnIdx\r\n break\r\n\r\n for columnIdx in range(len(uniqueTokensCountLS)):\r\n if numOfUniqueTokensD[len(uniqueTokensCountLS[columnIdx])] == secondMinCount:\r\n p2 = columnIdx\r\n break\r\n\r\n return (p1, p2)\r\n\r\n def PrintPartitions(self):\r\n for idx in range(len(self.partitionsL)):\r\n print('Partition {}:(from step {}) Valid:{}'.format(idx, self.partitionsL[idx].stepNo,\r\n self.partitionsL[idx].valid))\r\n\r\n for log in self.partitionsL[idx].logLL:\r\n print(log)\r\n\r\n def PrintEventStats(self):\r\n for event in self.eventsL:\r\n if event.eventCount > 1:\r\n print(str(event.eventId) + '\\t' + str(event.eventCount))\r\n print(event.eventStr)\r\n\r\n def log_to_dataframe(self, log_file, regex, headers, logformat):\r\n \"\"\" Function to transform log file to dataframe \r\n \"\"\"\r\n log_messages = []\r\n linecount = 0\r\n with open(log_file, 'r') as fin:\r\n for line in fin.readlines():\r\n try:\r\n match = regex.search(line.strip())\r\n message = [match.group(header) for header in headers]\r\n log_messages.append(message)\r\n linecount += 1\r\n except Exception as e:\r\n pass\r\n logdf = pd.DataFrame(log_messages, columns=headers)\r\n logdf.insert(0, 'LineId', None)\r\n logdf['LineId'] = [i + 1 for i in range(linecount)]\r\n return logdf\r\n\r\n def generate_logformat_regex(self, logformat):\r\n \"\"\" Function to generate regular expression to split log messages\r\n \"\"\"\r\n headers = []\r\n splitters = re.split(r'(<[^<>]+>)', logformat)\r\n regex = ''\r\n for k in range(len(splitters)):\r\n if k % 2 == 0:\r\n splitter = re.sub(' +', '\\s+', splitters[k])\r\n regex += splitter\r\n else:\r\n header = splitters[k].strip('<').strip('>')\r\n regex += '(?P<%s>.*?)' % header\r\n headers.append(header)\r\n regex = re.compile('^' + regex + '$')\r\n return headers, regex\r\n\r\n def get_parameter_list(self, row):\r\n template_regex = re.sub(r\"\\s<.{1,5}>\\s\", \"<*>\", row[\"EventTemplate\"])\r\n if \"<*>\" not in template_regex: return []\r\n template_regex = re.sub(r'([^A-Za-z0-9])', r'\\\\\\1', template_regex)\r\n template_regex = re.sub(r'\\\\ +', r'[^A-Za-z0-9]+', template_regex)\r\n template_regex = \"^\" + template_regex.replace(\"\\<\\*\\>\", \"(.*?)\") + \"$\"\r\n parameter_list = re.findall(template_regex, row[\"Content\"])\r\n parameter_list = parameter_list[0] if parameter_list else ()\r\n parameter_list = list(parameter_list) if isinstance(parameter_list, tuple) else [parameter_list]\r\n parameter_list = [para.strip(string.punctuation).strip(' ') for para in parameter_list]\r\n return parameter_list\r\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
vanya2v/Multi-modal-learning
[ "628acee4e275db16733b13e4b1ae766132030b28", "628acee4e275db16733b13e4b1ae766132030b28" ]
[ "dltk/core/io/reader.py", "dltk/core/metrics.py" ]
[ "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport tensorflow as tf\nimport numpy as np\nimport SimpleITK as sitk\nimport traceback\n\nclass AbstractReader(object):\n \"\"\"Abstract reader\n\n Abstract reader class for data I/O. Provides the queue handling and wraps the specific reader functions to adapt\n given data types.\n\n \"\"\"\n\n def __init__(self, dtypes, dshapes, name='reader'):\n \"\"\"AbstractReader\n\n Construcsts an abstract reader\n\n Parameters\n ----------\n dtypes : list or tuple\n list of dtypes for the tensors in the queue\n dshapes : list or tuple\n list of shapes for the tensors in the queue\n name : string\n name of the reader, used for the name scope\n \"\"\"\n self.name = name\n self.dtypes = dtypes\n self.dshapes = dshapes\n\n self.__call__.__func__.__doc__ = self._create_queue.__doc__\n# print ('disini wrapper')\n \n def _preprocess(self, data):\n \"\"\" placeholder for the preprocessing of reader subclasses \"\"\"\n return data\n\n def _augment(self, data):\n \"\"\" placeholder for the augmentation of reader subclasses \"\"\"\n return data\n\n def _read_sample(self, id_queue, **kwargs):\n \"\"\" placeholder for the reading of independent samples of reader subclasses \"\"\"\n raise NotImplementedError('Abstract reader - not implemented')\n\n @staticmethod\n def _map_dtype(dtype):\n \"\"\" helper function to map tf data types to np data types \"\"\"\n if dtype == tf.float32:\n return np.float32\n elif dtype == tf.int32:\n return np.int32\n elif dtype == tf.float64:\n return np.float64\n elif dtype == tf.int64:\n return np.int64\n else:\n raise Exception('Dtype not handled')\n\n def _create_queue(self, id_list, shuffle=True, batch_size=16, num_readers=1, min_queue_examples=64,\n capacity=128, **kwargs):\n \"\"\" Builds the data queue using the '_read_sample' function\n\n Parameters\n ----------\n id_list : list or tuple\n list of examples to read. This can be a list of files or a list of ids or something else the read function\n understands\n shuffle : bool\n flag to toggle shuffling of examples\n batch_size : int\n num_readers : int\n number of readers to spawn to fill the queue. this is used for multi-threading and should be tuned\n according to the specific problem at hand and hardware available\n min_queue_examples : int\n minimum number of examples currently in the queue. This can be tuned for more preloading of the data\n capacity : int\n maximum number of examples the queue will hold. a lower number needs less memory whereas a higher number\n enables better mixing of the examples\n kwargs :\n additional arguments to be passed to the reader function\n\n Returns\n -------\n list\n list of tensors representing a batch from the queue\n\n \"\"\"\n with tf.name_scope(self.name):\n # Create filename_queue\n id_tensor = tf.convert_to_tensor(id_list, dtype=tf.string)\n\n id_queue = tf.train.slice_input_producer([id_tensor], capacity=16, shuffle=shuffle)\n\n if num_readers < 1:\n raise ValueError('Please make num_readers at least 1')\n\n # Build a FIFO or a shuffled queue\n if shuffle:\n examples_queue = tf.RandomShuffleQueue(\n capacity=capacity,\n min_after_dequeue=min_queue_examples,\n dtypes=self.dtypes)\n else:\n examples_queue = tf.FIFOQueue(\n capacity=capacity,\n dtypes=self.dtypes)\n\n if num_readers > 1:\n # Create multiple readers to populate the queue of examples.\n enqueue_ops = []\n for _ in range(num_readers):\n ex = self._read_wrapper(id_queue, **kwargs)\n enqueue_ops.append(examples_queue.enqueue_many(ex))\n\n tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))\n\n ex_tensors = examples_queue.dequeue()\n\n ex = []\n\n for t, s in zip(ex_tensors, self.dshapes):\n t.set_shape(list(s))\n t = tf.expand_dims(t, 0)\n ex.append(t)\n else:\n # Use a single reader for population\n ex = self._read_wrapper(id_queue, **kwargs)\n\n # create a batch_size tensor with default shape, to keep the downstream graph flexible\n batch_size_tensor = tf.placeholder_with_default(batch_size, shape=[], name='batch_size_ph')\n\n # batch the read examples\n ex_batch = tf.train.batch(\n ex,\n batch_size=batch_size_tensor,\n enqueue_many=True,\n capacity=2 * num_readers * batch_size)\n\n return ex_batch\n\n def _read_wrapper(self, id_queue, **kwargs):\n \"\"\" Wrapper for the '_read_sample' function\n\n Wraps the 'read_sample function and handles tensor shapes and data types\n\n Parameters\n ----------\n id_queue : list\n list of tf.Tensors from the id_list queue. Provides an identifier for the examples to read.\n kwargs :\n additional arguments for the '_read_sample function'\n\n Returns\n -------\n list\n list of tf.Tensors read for this example\n\n \"\"\"\n def f(id_queue):\n \"\"\" Wrapper for the python function\n\n Handles the data types of the py_func\n\n Parameters\n ----------\n id_queue : list\n list of tf.Tensors from the id_list queue. Provides an identifier for the examples to read.\n\n Returns\n -------\n list\n list of things just read\n \"\"\"\n try:\n ex = self._read_sample(id_queue, ** kwargs)\n except Exception as e:\n print('got error `{} from `_read_sample`:'.format(e))\n print(traceback.format_exc())\n raise\n\n # eventually fix data types of read objects\n tensors = []\n for t, d in zip(ex, self.dtypes):\n if isinstance(t, np.ndarray):\n tensors.append(t.astype(self._map_dtype(d)))\n elif isinstance(t, (float, int)):\n if d is tf.float32 and isinstance(t, int):\n print('Warning: Losing accuracy by converting int to float')\n tensors.append(self._map_dtype(d)(t))\n elif isinstance(t, bool):\n tensors.append(t)\n else:\n raise Exception('Not sure how to interpret \"{}\"'.format(type(t)))\n return tensors\n\n \n ex = tf.py_func(f, [id_queue], self.dtypes)\n tensors = []\n # set shape of tensors for downstream inference of shapes\n for t, s in zip(ex, self.dshapes):\n t.set_shape([None] + list(s))\n tensors.append(t)\n return tensors\n\n def __call__(self, *args, **kwargs):\n return self._create_queue(*args, **kwargs)\n\n\nclass SimpleSITKReader(AbstractReader):\n \"\"\"SimpleSITKReader\n\n Simple reader class to read sitk files by file path\n\n \"\"\"\n def __init__(self, dtypes, dshapes, name='simplesitkreader'):\n super(SimpleSITKReader, self).__init__(dtypes, dshapes, name=name)\n\n def _read_sample(self, id_queue, **kwargs):\n path_list = id_queue[0]\n\n data = []\n\n for p, d in zip(list(path_list), self.dtypes):\n if isinstance(p, str):\n # load image etc\n sample = sitk.GetArrayFromImage(sitk.ReadImage(p))\n data.append(sample.astype(self._map_dtype(d)))\n elif isinstance(p, (float, int)):\n # load label\n if d is tf.float32 and isinstance(p, int):\n print('Warning: Losing accuracy by converting int to float')\n data.append(self._map_dtype(d)(p))\n else:\n raise Exception('Not sure how to interpret \"{}\"'.format(p))\n\n data = self._preprocess(data)\n data = self._augment(data)\n\n return data", "import numpy as np\n\n\ndef dice(pred, labels, num_classes):\n \"\"\"Calculates the dice score of labels and predictions\n\n Parameters\n ----------\n pred : np.ndarray\n predictions\n labels : np.ndarray\n labels\n num_classes : int\n number of classes to calculate avd for\n\n Returns\n -------\n np.ndarray\n dice per class\n\n \"\"\"\n\n dice_scores = np.zeros((num_classes))\n for i in range(num_classes):\n tmp_den = (np.sum(pred == i) + np.sum(labels == i))\n tmp_dice = 2. * np.sum((pred == i) * (labels == i)) / tmp_den if tmp_den > 0 else 1. \n dice_scores[i] = tmp_dice\n return dice_scores\n\n\ndef abs_vol_difference(pred, labels, num_classes):\n \"\"\"Calculates the average volume difference of labels and predictions per class\n\n Parameters\n ----------\n pred : np.ndarray\n predictions\n labels : np.ndarray\n labels\n num_classes : int\n number of classes to calculate avd for\n\n Returns\n -------\n np.ndarray\n avd per class\n\n \"\"\"\n\n avd = np.zeros((num_classes))\n eps = 1e-6\n for i in range(num_classes):\n avd[i] = np.abs(np.sum(pred == i) - np.sum(labels == i)) / (np.float(np.sum(labels == i)) + eps)\n \n return avd\n\n\ndef crossentropy(pred, labels, logits=True):\n \"\"\" Calculates the crossentropy loss between prediction and labels\n\n Parameters\n ----------\n pred : np.ndarray\n prediction of the system\n labels : np.ndarray\n labels\n logits : bool\n flag whether pred are logits or probabilities\n\n Returns\n -------\n float\n crossentropy error\n\n \"\"\"\n if logits:\n maxes = np.amax(pred, axis=-1, keepdims=True)\n softexp = np.exp(pred - maxes)\n softm = softexp / np.sum(softexp, axis=-1, keepdims=True)\n else:\n softm = pred\n \n if np.isnan(np.max(softm)):\n loss = np.mean(-1. * np.sum(labels * np.log(1e-7), axis=-1))\n else: \n loss = np.mean(-1. * np.sum(labels * np.log(softm + 1e-7), axis=-1))\n# print('softm', np.max(softm))\n# print('loss dalam', np.isnan(np.log(softm + 1e-7)))\n return loss.astype(np.float32)\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.FIFOQueue", "tensorflow.placeholder_with_default", "tensorflow.expand_dims", "tensorflow.RandomShuffleQueue", "tensorflow.train.queue_runner.QueueRunner", "tensorflow.name_scope", "tensorflow.train.slice_input_producer", "tensorflow.train.batch", "tensorflow.py_func" ], [ "numpy.amax", "numpy.log", "numpy.max", "numpy.exp", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vladiant/ComputerVisionUtils
[ "d893726bff2869bf11e6f6dbfc7702ed875760f8" ]
[ "FeatureMatchingSamples/Python/Combined/ORB/orb_flann_knn_matcher.py" ]
[ "# https://github.com/methylDragon/opencv-python-reference/blob/master/02%20OpenCV%20Feature%20Detection%20and%20Description.md\n# Source: https://docs.opencv.org/3.4.4/dc/dc3/tutorial_py_matcher.html\n\nimport numpy as np\nimport cv2 as cv\n\nimg1 = cv.imread(\"box.png\") # queryImage\nimg2 = cv.imread(\"box_in_scene.png\") # trainImage\n\n# Initiate ORB detector\norb = cv.ORB_create()\n\n# find the keypoints and descriptors with ORB\nkp1, des1 = orb.detectAndCompute(img1, None)\nkp2, des2 = orb.detectAndCompute(img2, None)\n\nFLANN_INDEX_LSH = 6\nindex_params = dict(\n algorithm=FLANN_INDEX_LSH,\n table_number=6, # 12\n key_size=12, # 20\n multi_probe_level=1,\n) # 2\n\n# Then set number of searches. Higher is better, but takes longer\nsearch_params = dict(checks=100)\n\n# Initialize matches\nflann = cv.FlannBasedMatcher(index_params, search_params)\n\n# Find matches\nmatches = flann.knnMatch(des1, des2, k=2)\n\n# Flags:\n# cv.DRAW_MATCHES_FLAGS_DEFAULT\n# cv.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG\n# cv.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS\n# cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS\nimg3 = cv.drawMatchesKnn(\n img1,\n kp1,\n img2,\n kp2,\n matches[:10],\n None,\n flags=cv.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS,\n)\n\n# Draw matches\ncv.namedWindow(\"ORB BF Matcher\", cv.WINDOW_NORMAL)\ncv.imshow(\"ORB BF Matcher\", img3)\n\n# Calculate homography\n# Consider point filtering\nobj = []\nscene = []\nfor match in matches:\n # Ratio test as per Lowe's SIFT paper\n if match[0].distance >= 0.7 * match[1].distance:\n continue\n obj.append(kp1[match[0].queryIdx].pt)\n scene.append(kp2[match[0].trainIdx].pt)\n\n# Calculate homography: Inliers and outliers\n# RANSAC, LMEDS, RHO\nH, _ = cv.findHomography(np.array(obj), np.array(scene), cv.RANSAC)\n\nif H is not None:\n # Frame of the object image\n obj_points = np.array(\n [\n [0, 0],\n [img1.shape[1], 0],\n [img1.shape[1], img1.shape[0]],\n [0, img1.shape[0]],\n ],\n dtype=np.float,\n )\n\n # Check the sanity of the transformation\n warped_points = cv.perspectiveTransform(np.array([obj_points]), H)\n\n warped_image = np.copy(img2)\n cv.drawContours(\n warped_image, np.array([warped_points]).astype(np.int32), 0, (0, 0, 255)\n )\n\n cv.namedWindow(\"Warped Object\", cv.WINDOW_NORMAL)\n cv.imshow(\"Warped Object\", warped_image)\nelse:\n print(\"Error calculating perspective transformation\")\n\ncv.waitKey(0)\n" ]
[ [ "numpy.copy", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fengzhongye/face-alignment
[ "6a7731168dbb1a15f9ecd5fe4c79c992f179a622" ]
[ "face_alignment/utils.py" ]
[ "from __future__ import print_function\nimport os\nimport sys\nimport time\nimport torch\nimport math\nimport numpy as np\nimport cv2\n\n\ndef _gaussian(\n size=3, sigma=0.25, amplitude=1, normalize=False, width=None,\n height=None, sigma_horz=None, sigma_vert=None, mean_horz=0.5,\n mean_vert=0.5):\n # handle some defaults\n if width is None:\n width = size\n if height is None:\n height = size\n if sigma_horz is None:\n sigma_horz = sigma\n if sigma_vert is None:\n sigma_vert = sigma\n center_x = mean_horz * width + 0.5\n center_y = mean_vert * height + 0.5\n gauss = np.empty((height, width), dtype=np.float32)\n # generate kernel\n for i in range(height):\n for j in range(width):\n gauss[i][j] = amplitude * math.exp(-(math.pow((j + 1 - center_x) / (\n sigma_horz * width), 2) / 2.0 + math.pow((i + 1 - center_y) / (sigma_vert * height), 2) / 2.0))\n if normalize:\n gauss = gauss / np.sum(gauss)\n return gauss\n\n\ndef draw_gaussian(image, point, sigma):\n # Check if the gaussian is inside\n ul = [math.floor(point[0] - 3 * sigma), math.floor(point[1] - 3 * sigma)]\n br = [math.floor(point[0] + 3 * sigma), math.floor(point[1] + 3 * sigma)]\n if (ul[0] > image.shape[1] or ul[1] > image.shape[0] or br[0] < 1 or br[1] < 1):\n return image\n size = 6 * sigma + 1\n g = _gaussian(size)\n g_x = [int(max(1, -ul[0])), int(min(br[0], image.shape[1])) - int(max(1, ul[0])) + int(max(1, -ul[0]))]\n g_y = [int(max(1, -ul[1])), int(min(br[1], image.shape[0])) - int(max(1, ul[1])) + int(max(1, -ul[1]))]\n img_x = [int(max(1, ul[0])), int(min(br[0], image.shape[1]))]\n img_y = [int(max(1, ul[1])), int(min(br[1], image.shape[0]))]\n assert (g_x[0] > 0 and g_y[1] > 0)\n image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]\n ] = image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] + g[g_y[0] - 1:g_y[1], g_x[0] - 1:g_x[1]]\n image[image > 1] = 1\n return image\n\n\ndef transform(point, center, scale, resolution, invert=False):\n \"\"\"Generate and affine transformation matrix.\n\n Given a set of points, a center, a scale and a targer resolution, the\n function generates and affine transformation matrix. If invert is ``True``\n it will produce the inverse transformation.\n\n Arguments:\n point {torch.tensor} -- the input 2D point\n center {torch.tensor or numpy.array} -- the center around which to perform the transformations\n scale {float} -- the scale of the face/object\n resolution {float} -- the output resolution\n\n Keyword Arguments:\n invert {bool} -- define wherever the function should produce the direct or the\n inverse transformation matrix (default: {False})\n \"\"\"\n _pt = torch.ones(3)\n _pt[0] = point[0]\n _pt[1] = point[1]\n\n h = 200.0 * scale\n t = torch.eye(3)\n t[0, 0] = resolution / h\n t[1, 1] = resolution / h\n t[0, 2] = resolution * (-center[0] / h + 0.5)\n t[1, 2] = resolution * (-center[1] / h + 0.5)\n\n if invert:\n t = torch.inverse(t)\n\n new_point = (torch.matmul(t, _pt))[0:2]\n\n return new_point.int()\n\n\ndef crop(image, center, scale, resolution=256.0):\n \"\"\"Center crops an image or set of heatmaps\n\n Arguments:\n image {numpy.array} -- an rgb image\n center {numpy.array} -- the center of the object, usually the same as of the bounding box\n scale {float} -- scale of the face\n\n Keyword Arguments:\n resolution {float} -- the size of the output cropped image (default: {256.0})\n\n Returns:\n [type] -- [description]\n \"\"\" # Crop around the center point\n \"\"\" Crops the image around the center. Input is expected to be an np.ndarray \"\"\"\n ul = transform([1, 1], center, scale, resolution, True)\n br = transform([resolution, resolution], center, scale, resolution, True)\n # pad = math.ceil(torch.norm((ul - br).float()) / 2.0 - (br[0] - ul[0]) / 2.0)\n if image.ndim > 2:\n newDim = np.array([br[1] - ul[1], br[0] - ul[0],\n image.shape[2]], dtype=np.int32)\n newImg = np.zeros(newDim, dtype=np.uint8)\n else:\n newDim = np.array([br[1] - ul[1], br[0] - ul[0]], dtype=np.int)\n newImg = np.zeros(newDim, dtype=np.uint8)\n ht = image.shape[0]\n wd = image.shape[1]\n newX = np.array(\n [max(1, -ul[0] + 1), min(br[0], wd) - ul[0]], dtype=np.int32)\n newY = np.array(\n [max(1, -ul[1] + 1), min(br[1], ht) - ul[1]], dtype=np.int32)\n oldX = np.array([max(1, ul[0] + 1), min(br[0], wd)], dtype=np.int32)\n oldY = np.array([max(1, ul[1] + 1), min(br[1], ht)], dtype=np.int32)\n newImg[newY[0] - 1:newY[1], newX[0] - 1:newX[1]\n ] = image[oldY[0] - 1:oldY[1], oldX[0] - 1:oldX[1], :]\n newImg = cv2.resize(newImg, dsize=(int(resolution), int(resolution)),\n interpolation=cv2.INTER_LINEAR)\n return newImg\n\n\ndef get_preds_fromhm(hm, center=None, scale=None):\n \"\"\"Obtain (x,y) coordinates given a set of N heatmaps. If the center\n and the scale is provided the function will return the points also in\n the original coordinate frame.\n\n Arguments:\n hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H]\n\n Keyword Arguments:\n center {torch.tensor} -- the center of the bounding box (default: {None})\n scale {float} -- face scale (default: {None})\n \"\"\"\n max, idx = torch.max(\n hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)\n idx += 1\n preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()\n preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1)\n preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1)\n\n for i in range(preds.size(0)):\n for j in range(preds.size(1)):\n hm_ = hm[i, j, :]\n pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1\n if pX > 0 and pX < 63 and pY > 0 and pY < 63:\n diff = torch.FloatTensor(\n [hm_[pY, pX + 1] - hm_[pY, pX - 1],\n hm_[pY + 1, pX] - hm_[pY - 1, pX]])\n preds[i, j].add_(diff.sign_().mul_(.25))\n\n preds.add_(-.5)\n\n preds_orig = torch.zeros(preds.size())\n if center is not None and scale is not None:\n for i in range(hm.size(0)):\n for j in range(hm.size(1)):\n preds_orig[i, j] = transform(\n preds[i, j], center, scale, hm.size(2), True)\n\n return preds, preds_orig\n\n\ndef create_target_heatmap(target_landmarks, centers, scales):\n heatmaps = np.zeros((target_landmarks.shape[0], 68, 64, 64), dtype=np.float32)\n for i in range(heatmaps.shape[0]):\n for p in range(68):\n landmark_cropped_coor = transform(target_landmarks[i, p] + 1, centers[i], scales[i], 64, invert=False)\n heatmaps[i, p] = draw_gaussian(heatmaps[i, p], landmark_cropped_coor + 1, 1)\n return torch.tensor(heatmaps)\n\n\ndef create_bounding_box(target_landmarks, expansion_factor=0.0):\n \"\"\"\n gets a batch of landmarks and calculates a bounding box that includes all the landmarks per set of landmarks in\n the batch\n :param target_landmarks: batch of landmarks of dim (n x 68 x 2). Where n is the batch size\n :param expansion_factor: expands the bounding box by this factor. For example, a `expansion_factor` of 0.2 leads\n to 20% increase in width and height of the boxes\n :return: a batch of bounding boxes of dim (n x 4) where the second dim is (x1,y1,x2,y2)\n \"\"\"\n # Calc bounding box\n x_y_min, _ = target_landmarks.reshape(-1, 68, 2).min(dim=1)\n x_y_max, _ = target_landmarks.reshape(-1, 68, 2).max(dim=1)\n # expanding the bounding box\n expansion_factor /= 2\n bb_expansion_x = (x_y_max[:, 0] - x_y_min[:, 0]) * expansion_factor\n bb_expansion_y = (x_y_max[:, 1] - x_y_min[:, 1]) * expansion_factor\n x_y_min[:, 0] -= bb_expansion_x\n x_y_max[:, 0] += bb_expansion_x\n x_y_min[:, 1] -= bb_expansion_y\n x_y_max[:, 1] += bb_expansion_y\n return torch.cat([x_y_min, x_y_max], dim=1)\n\n\ndef shuffle_lr(parts, pairs=None):\n \"\"\"Shuffle the points left-right according to the axis of symmetry\n of the object.\n\n Arguments:\n parts {torch.tensor} -- a 3D or 4D object containing the\n heatmaps.\n\n Keyword Arguments:\n pairs {list of integers} -- [order of the flipped points] (default: {None})\n \"\"\"\n if pairs is None:\n pairs = [16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,\n 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 27, 28, 29, 30, 35,\n 34, 33, 32, 31, 45, 44, 43, 42, 47, 46, 39, 38, 37, 36, 41,\n 40, 54, 53, 52, 51, 50, 49, 48, 59, 58, 57, 56, 55, 64, 63,\n 62, 61, 60, 67, 66, 65]\n if parts.ndimension() == 3:\n parts = parts[pairs, ...]\n else:\n parts = parts[:, pairs, ...]\n\n return parts\n\n\ndef flip(tensor, is_label=False):\n \"\"\"Flip an image or a set of heatmaps left-right\n\n Arguments:\n tensor {numpy.array or torch.tensor} -- [the input image or heatmaps]\n\n Keyword Arguments:\n is_label {bool} -- [denote wherever the input is an image or a set of heatmaps ] (default: {False})\n \"\"\"\n if not torch.is_tensor(tensor):\n tensor = torch.from_numpy(tensor)\n\n if is_label:\n tensor = shuffle_lr(tensor).flip(tensor.ndimension() - 1)\n else:\n tensor = tensor.flip(tensor.ndimension() - 1)\n\n return tensor\n\n# From pyzolib/paths.py (https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py)\n\n\ndef appdata_dir(appname=None, roaming=False):\n \"\"\" appdata_dir(appname=None, roaming=False)\n\n Get the path to the application directory, where applications are allowed\n to write user specific files (e.g. configurations). For non-user specific\n data, consider using common_appdata_dir().\n If appname is given, a subdir is appended (and created if necessary).\n If roaming is True, will prefer a roaming directory (Windows Vista/7).\n \"\"\"\n\n # Define default user directory\n userDir = os.getenv('FACEALIGNMENT_USERDIR', None)\n if userDir is None:\n userDir = os.path.expanduser('~')\n if not os.path.isdir(userDir): # pragma: no cover\n userDir = '/var/tmp' # issue #54\n\n # Get system app data dir\n path = None\n if sys.platform.startswith('win'):\n path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')\n path = (path2 or path1) if roaming else (path1 or path2)\n elif sys.platform.startswith('darwin'):\n path = os.path.join(userDir, 'Library', 'Application Support')\n # On Linux and as fallback\n if not (path and os.path.isdir(path)):\n path = userDir\n\n # Maybe we should store things local to the executable (in case of a\n # portable distro or a frozen application that wants to be portable)\n prefix = sys.prefix\n if getattr(sys, 'frozen', None):\n prefix = os.path.abspath(os.path.dirname(sys.executable))\n for reldir in ('settings', '../settings'):\n localpath = os.path.abspath(os.path.join(prefix, reldir))\n if os.path.isdir(localpath): # pragma: no cover\n try:\n open(os.path.join(localpath, 'test.write'), 'wb').close()\n os.remove(os.path.join(localpath, 'test.write'))\n except IOError:\n pass # We cannot write in this directory\n else:\n path = localpath\n break\n\n # Get path specific for this app\n if appname:\n if path == userDir:\n appname = '.' + appname.lstrip('.') # Make it a hidden directory\n path = os.path.join(path, appname)\n if not os.path.isdir(path): # pragma: no cover\n os.mkdir(path)\n\n # Done\n return path\n" ]
[ [ "torch.ones", "torch.cat", "torch.eye", "torch.is_tensor", "torch.from_numpy", "torch.tensor", "torch.inverse", "torch.matmul", "torch.FloatTensor", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
teaVeloper/modin
[ "035f5027fc9f1236ba70ab8f8126f370f1f2280f", "035f5027fc9f1236ba70ab8f8126f370f1f2280f" ]
[ "modin/core/execution/ray/implementations/pandas_on_ray/partitioning/partition_manager.py", "modin/core/dataframe/pandas/partitioning/partition_manager.py" ]
[ "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module houses class that implements ``GenericRayDataframePartitionManager`` using Ray.\"\"\"\n\nimport inspect\nimport numpy as np\nimport threading\n\nfrom modin.config import ProgressBar, NPartitions\nfrom modin.core.execution.ray.generic.partitioning.partition_manager import (\n GenericRayDataframePartitionManager,\n)\nfrom .virtual_partition import (\n PandasOnRayDataframeColumnPartition,\n PandasOnRayDataframeRowPartition,\n)\nfrom .partition import PandasOnRayDataframePartition\nfrom modin.core.execution.ray.generic.modin_aqp import call_progress_bar\nfrom modin.core.storage_formats.pandas.utils import compute_chunksize\nfrom modin.error_message import ErrorMessage\nimport pandas\n\nimport ray\n\n\ndef progress_bar_wrapper(f):\n \"\"\"\n Wrap computation function inside a progress bar.\n\n Spawns another thread which displays a progress bar showing\n estimated completion time.\n\n Parameters\n ----------\n f : callable\n The name of the function to be wrapped.\n\n Returns\n -------\n callable\n Decorated version of `f` which reports progress.\n \"\"\"\n from functools import wraps\n\n @wraps(f)\n def magic(*args, **kwargs):\n result_parts = f(*args, **kwargs)\n if ProgressBar.get():\n current_frame = inspect.currentframe()\n function_name = None\n while function_name != \"<module>\":\n (\n filename,\n line_number,\n function_name,\n lines,\n index,\n ) = inspect.getframeinfo(current_frame)\n current_frame = current_frame.f_back\n t = threading.Thread(\n target=call_progress_bar,\n args=(result_parts, line_number),\n )\n t.start()\n # We need to know whether or not we are in a jupyter notebook\n from IPython import get_ipython\n\n try:\n ipy_str = str(type(get_ipython()))\n if \"zmqshell\" not in ipy_str:\n t.join()\n except Exception:\n pass\n return result_parts\n\n return magic\n\n\nclass PandasOnRayDataframePartitionManager(GenericRayDataframePartitionManager):\n \"\"\"The class implements the interface in `PandasDataframePartitionManager`.\"\"\"\n\n # This object uses RayRemotePartition objects as the underlying store.\n _partition_class = PandasOnRayDataframePartition\n _column_partitions_class = PandasOnRayDataframeColumnPartition\n _row_partition_class = PandasOnRayDataframeRowPartition\n\n @classmethod\n def get_indices(cls, axis, partitions, index_func=None):\n \"\"\"\n Get the internal indices stored in the partitions.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to extract the labels over.\n partitions : np.ndarray\n NumPy array with ``PandasDataframePartition``-s.\n index_func : callable, default: None\n The function to be used to extract the indices.\n\n Returns\n -------\n pandas.Index\n A ``pandas.Index`` object.\n\n Notes\n -----\n These are the global indices of the object. This is mostly useful\n when you have deleted rows/columns internally, but do not know\n which ones were deleted.\n \"\"\"\n ErrorMessage.catch_bugs_and_request_email(not callable(index_func))\n func = cls.preprocess_func(index_func)\n if axis == 0:\n # We grab the first column of blocks and extract the indices\n new_idx = (\n [idx.apply(func).oid for idx in partitions.T[0]]\n if len(partitions.T)\n else []\n )\n else:\n new_idx = (\n [idx.apply(func).oid for idx in partitions[0]]\n if len(partitions)\n else []\n )\n new_idx = ray.get(new_idx)\n return new_idx[0].append(new_idx[1:]) if len(new_idx) else new_idx\n\n @classmethod\n def concat(cls, axis, left_parts, right_parts):\n \"\"\"\n Concatenate the blocks of partitions with another set of blocks.\n\n Parameters\n ----------\n axis : int\n The axis to concatenate to.\n left_parts : np.ndarray\n NumPy array of partitions to concatenate with.\n right_parts : np.ndarray or list\n NumPy array of partitions to be concatenated.\n\n Returns\n -------\n np.ndarray\n A new NumPy array with concatenated partitions.\n\n Notes\n -----\n Assumes that the `left_parts` and `right_parts` blocks are already the same\n shape on the dimension (opposite `axis`) as the one being concatenated. A\n ``ValueError`` will be thrown if this condition is not met.\n \"\"\"\n result = super(PandasOnRayDataframePartitionManager, cls).concat(\n axis, left_parts, right_parts\n )\n if axis == 0:\n return cls.rebalance_partitions(result)\n else:\n return result\n\n @classmethod\n def rebalance_partitions(cls, partitions):\n \"\"\"\n Rebalance a 2-d array of partitions.\n\n Rebalance the partitions by building a new array\n of partitions out of the original ones so that:\n - If all partitions have a length, each new partition has roughly the\n same number of rows.\n - Otherwise, each new partition spans roughly the same number of old\n partitions.\n\n Parameters\n ----------\n partitions : np.ndarray\n The 2-d array of partitions to rebalance.\n\n Returns\n -------\n np.ndarray\n A new NumPy array with rebalanced partitions.\n \"\"\"\n # We rebalance when the ratio of the number of existing partitions to\n # the ideal number of partitions is larger than this threshold. The\n # threshold is a heuristic that may need to be tuned for performance.\n max_excess_of_num_partitions = 1.5\n num_existing_partitions = partitions.shape[0]\n ideal_num_new_partitions = NPartitions.get()\n if (\n num_existing_partitions\n <= ideal_num_new_partitions * max_excess_of_num_partitions\n ):\n return partitions\n # If any partition has an unknown length, give each axis partition\n # roughly the same number of row partitions. We use `_length_cache` here\n # to avoid materializing any unmaterialized lengths.\n if any(\n partition._length_cache is None for row in partitions for partition in row\n ):\n # We need each partition to go into an axis partition, but the\n # number of axis partitions may not evenly divide the number of\n # partitions.\n chunk_size = compute_chunksize(\n num_existing_partitions, ideal_num_new_partitions, min_block_size=1\n )\n return np.array(\n [\n cls.column_partitions(\n partitions[i : i + chunk_size],\n full_axis=False,\n )\n for i in range(\n 0,\n num_existing_partitions,\n chunk_size,\n )\n ]\n )\n\n # If we know the number of rows in every partition, then we should try\n # instead to give each new partition roughly the same number of rows.\n new_partitions = []\n # `start` is the index of the first existing partition that we want to\n # put into the current new partition.\n start = 0\n total_rows = sum(part.length() for part in partitions[:, 0])\n ideal_partition_size = compute_chunksize(\n total_rows, ideal_num_new_partitions, min_block_size=1\n )\n for _ in range(ideal_num_new_partitions):\n # We might pick up old partitions too quickly and exhaust all of them.\n if start >= len(partitions):\n break\n # `stop` is the index of the last existing partition so far that we\n # want to put into the current new partition.\n stop = start\n partition_size = partitions[start][0].length()\n # Add existing partitions into the current new partition until the\n # number of rows in the new partition hits `ideal_partition_size`.\n while stop < len(partitions) and partition_size < ideal_partition_size:\n stop += 1\n if stop < len(partitions):\n partition_size += partitions[stop][0].length()\n # If the new partition is larger than we want, split the last\n # current partition that it contains into two partitions, where\n # the first partition has just enough rows to make the current\n # new partition have length `ideal_partition_size`, and the second\n # partition has the remainder.\n if partition_size > ideal_partition_size * max_excess_of_num_partitions:\n new_last_partition_size = ideal_partition_size - sum(\n row[0].length() for row in partitions[start:stop]\n )\n partitions = np.insert(\n partitions,\n stop + 1,\n [\n obj.mask(slice(new_last_partition_size, None), slice(None))\n for obj in partitions[stop]\n ],\n 0,\n )\n partitions[stop, :] = [\n obj.mask(slice(None, new_last_partition_size), slice(None))\n for obj in partitions[stop]\n ]\n partition_size = ideal_partition_size\n new_partitions.append(\n cls.column_partitions(\n (partitions[start : stop + 1]),\n full_axis=partition_size == total_rows,\n )\n )\n start = stop + 1\n return np.array(new_partitions)\n\n @classmethod\n def broadcast_apply(cls, axis, apply_func, left, right, other_name=\"r\"):\n \"\"\"\n Broadcast the `right` partitions to `left` and apply `apply_func` to selected indices.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to apply and broadcast over.\n apply_func : callable\n Function to apply.\n left : np.ndarray\n NumPy 2D array of left partitions.\n right : np.ndarray\n NumPy 2D array of right partitions.\n other_name : str, default: \"r\"\n Name of key-value argument for `apply_func` that\n is used to pass `right` to `apply_func`.\n\n Returns\n -------\n np.ndarray\n An array of partition objects.\n \"\"\"\n\n def map_func(df, *others):\n other = pandas.concat(others, axis=axis ^ 1)\n return apply_func(df, **{other_name: other})\n\n map_func = cls.preprocess_func(map_func)\n rt_axis_parts = cls.axis_partition(right, axis ^ 1)\n return np.array(\n [\n [\n part.apply(\n map_func,\n *(\n rt_axis_parts[col_idx].list_of_blocks\n if axis\n else rt_axis_parts[row_idx].list_of_blocks\n ),\n )\n for col_idx, part in enumerate(left[row_idx])\n ]\n for row_idx in range(len(left))\n ]\n )\n\n @classmethod\n @progress_bar_wrapper\n def map_partitions(cls, partitions, map_func):\n \"\"\"\n Apply `map_func` to every partition in `partitions`.\n\n Parameters\n ----------\n partitions : np.ndarray\n A NumPy 2D array of partitions to perform operation on.\n map_func : callable\n Function to apply.\n\n Returns\n -------\n np.ndarray\n A NumPy array of partitions.\n \"\"\"\n return super(PandasOnRayDataframePartitionManager, cls).map_partitions(\n partitions, map_func\n )\n\n @classmethod\n @progress_bar_wrapper\n def lazy_map_partitions(cls, partitions, map_func):\n \"\"\"\n Apply `map_func` to every partition in `partitions` *lazily*.\n\n Parameters\n ----------\n partitions : np.ndarray\n A NumPy 2D array of partitions to perform operation on.\n map_func : callable\n Function to apply.\n\n Returns\n -------\n np.ndarray\n A NumPy array of partitions.\n \"\"\"\n return super(PandasOnRayDataframePartitionManager, cls).lazy_map_partitions(\n partitions, map_func\n )\n\n @classmethod\n @progress_bar_wrapper\n def map_axis_partitions(\n cls,\n axis,\n partitions,\n map_func,\n keep_partitioning=False,\n lengths=None,\n enumerate_partitions=False,\n **kwargs,\n ):\n \"\"\"\n Apply `map_func` to every partition in `partitions` along given `axis`.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to perform the map across (0 - index, 1 - columns).\n partitions : np.ndarray\n A NumPy 2D array of partitions to perform operation on.\n map_func : callable\n Function to apply.\n keep_partitioning : bool, default: False\n Whether to keep partitioning for Modin Frame.\n Setting it to True prevents data shuffling between partitions.\n lengths : list of ints, default: None\n List of lengths to shuffle the object.\n enumerate_partitions : bool, default: False\n Whether or not to pass partition index into `map_func`.\n Note that `map_func` must be able to accept `partition_idx` kwarg.\n **kwargs : dict\n Additional options that could be used by different engines.\n\n Returns\n -------\n np.ndarray\n A NumPy array of new partitions for Modin Frame.\n\n Notes\n -----\n This method should be used in the case when `map_func` relies on\n some global information about the axis.\n \"\"\"\n return super(PandasOnRayDataframePartitionManager, cls).map_axis_partitions(\n axis,\n partitions,\n map_func,\n keep_partitioning,\n lengths,\n enumerate_partitions,\n **kwargs,\n )\n\n @classmethod\n @progress_bar_wrapper\n def _apply_func_to_list_of_partitions(cls, func, partitions, **kwargs):\n \"\"\"\n Apply a `func` to a list of remote `partitions`.\n\n Parameters\n ----------\n func : callable\n The func to apply.\n partitions : np.ndarray\n The partitions to which the `func` will apply.\n **kwargs : dict\n Keyword arguments.\n\n Returns\n -------\n list\n A list of ``RayFramePartition`` objects.\n\n Notes\n -----\n This preprocesses the `func` first before applying it to the partitions.\n \"\"\"\n return super(\n PandasOnRayDataframePartitionManager, cls\n )._apply_func_to_list_of_partitions(func, partitions, **kwargs)\n\n @classmethod\n @progress_bar_wrapper\n def apply_func_to_select_indices(\n cls, axis, partitions, func, indices, keep_remaining=False\n ):\n \"\"\"\n Apply a `func` to select `indices` of `partitions`.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to apply the `func` over.\n partitions : np.ndarray\n The partitions to which the `func` will apply.\n func : callable\n The function to apply to these indices of partitions.\n indices : dict\n The indices to apply the function to.\n keep_remaining : bool, default: False\n Whether or not to keep the other partitions. Some operations\n may want to drop the remaining partitions and keep\n only the results.\n\n Returns\n -------\n np.ndarray\n A NumPy array with partitions.\n\n Notes\n -----\n Your internal function must take a kwarg `internal_indices` for\n this to work correctly. This prevents information leakage of the\n internal index to the external representation.\n \"\"\"\n return super(\n PandasOnRayDataframePartitionManager, cls\n ).apply_func_to_select_indices(\n axis, partitions, func, indices, keep_remaining=keep_remaining\n )\n\n @classmethod\n @progress_bar_wrapper\n def apply_func_to_select_indices_along_full_axis(\n cls, axis, partitions, func, indices, keep_remaining=False\n ):\n \"\"\"\n Apply a `func` to a select subset of full columns/rows.\n\n Parameters\n ----------\n axis : {0, 1}\n The axis to apply the `func` over.\n partitions : np.ndarray\n The partitions to which the `func` will apply.\n func : callable\n The function to apply.\n indices : list-like\n The global indices to apply the func to.\n keep_remaining : bool, default: False\n Whether or not to keep the other partitions.\n Some operations may want to drop the remaining partitions and\n keep only the results.\n\n Returns\n -------\n np.ndarray\n A NumPy array with partitions.\n\n Notes\n -----\n This should be used when you need to apply a function that relies\n on some global information for the entire column/row, but only need\n to apply a function to a subset.\n For your func to operate directly on the indices provided,\n it must use `internal_indices` as a keyword argument.\n \"\"\"\n return super(\n PandasOnRayDataframePartitionManager, cls\n ).apply_func_to_select_indices_along_full_axis(\n axis, partitions, func, indices, keep_remaining\n )\n\n @classmethod\n @progress_bar_wrapper\n def apply_func_to_indices_both_axis(\n cls,\n partitions,\n func,\n row_partitions_list,\n col_partitions_list,\n item_to_distribute=None,\n row_lengths=None,\n col_widths=None,\n ):\n \"\"\"\n Apply a function along both axes.\n\n Parameters\n ----------\n partitions : np.ndarray\n The partitions to which the `func` will apply.\n func : callable\n The function to apply.\n row_partitions_list : list\n List of row partitions.\n col_partitions_list : list\n List of column partitions.\n item_to_distribute : item, optional\n The item to split up so it can be applied over both axes.\n row_lengths : list of ints, optional\n Lengths of partitions for every row. If not specified this information\n is extracted from partitions itself.\n col_widths : list of ints, optional\n Widths of partitions for every column. If not specified this information\n is extracted from partitions itself.\n\n Returns\n -------\n np.ndarray\n A NumPy array with partitions.\n\n Notes\n -----\n For your func to operate directly on the indices provided,\n it must use ``row_internal_indices`` and ``col_internal_indices`` as keyword\n arguments.\n \"\"\"\n return super(\n PandasOnRayDataframePartitionManager, cls\n ).apply_func_to_indices_both_axis(\n partitions,\n func,\n row_partitions_list,\n col_partitions_list,\n item_to_distribute,\n row_lengths,\n col_widths,\n )\n\n @classmethod\n @progress_bar_wrapper\n def binary_operation(cls, axis, left, func, right):\n \"\"\"\n Apply a function that requires partitions of two ``PandasOnRayDataframe`` objects.\n\n Parameters\n ----------\n axis : {0, 1}\n The axis to apply the function over (0 - rows, 1 - columns).\n left : np.ndarray\n The partitions of left ``PandasOnRayDataframe``.\n func : callable\n The function to apply.\n right : np.ndarray\n The partitions of right ``PandasOnRayDataframe``.\n\n Returns\n -------\n np.ndarray\n A NumPy array with new partitions.\n \"\"\"\n return super(PandasOnRayDataframePartitionManager, cls).binary_operation(\n axis, left, func, right\n )\n", "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"\nModule holding base PartitionManager class - the thing that tracks partitions across the distribution.\n\nThe manager also allows manipulating the data - running functions at each partition, shuffle over the distribution, etc.\n\"\"\"\n\nfrom abc import ABC\nfrom functools import wraps\nimport numpy as np\nimport pandas\nimport warnings\n\nfrom modin.error_message import ErrorMessage\nfrom modin.core.storage_formats.pandas.utils import compute_chunksize\nfrom modin.core.dataframe.pandas.utils import concatenate\nfrom modin.config import NPartitions, ProgressBar, BenchmarkMode\n\nimport os\n\n\ndef wait_computations_if_benchmark_mode(func):\n \"\"\"\n Make sure a `func` finished its computations in benchmark mode.\n\n Parameters\n ----------\n func : callable\n A function that should be performed in syncronous mode.\n\n Returns\n -------\n callable\n Wrapped function that executes eagerly (if benchmark mode) or original `func`.\n\n Notes\n -----\n `func` should return NumPy array with partitions.\n \"\"\"\n if BenchmarkMode.get():\n\n @wraps(func)\n def wait(*args, **kwargs):\n \"\"\"Wait for computation results.\"\"\"\n result = func(*args, **kwargs)\n if isinstance(result, tuple):\n partitions = result[0]\n else:\n partitions = result\n # need to go through all the values of the map iterator\n # since `wait` does not return anything, we need to explicitly add\n # the return `True` value from the lambda\n all(map(lambda partition: partition.wait() or True, partitions.flatten()))\n return result\n\n return wait\n return func\n\n\nclass PandasDataframePartitionManager(ABC):\n \"\"\"\n Base class for managing the dataframe data layout and operators across the distribution of partitions.\n\n Partition class is the class to use for storing each partition.\n Each partition must extend the `PandasDataframePartition` class.\n \"\"\"\n\n _partition_class = None\n # Column partitions class is the class to use to create the column partitions.\n _column_partitions_class = None\n # Row partitions class is the class to use to create the row partitions.\n _row_partition_class = None\n\n @classmethod\n def preprocess_func(cls, map_func):\n \"\"\"\n Preprocess a function to be applied to `PandasDataframePartition` objects.\n\n Parameters\n ----------\n map_func : callable\n The function to be preprocessed.\n\n Returns\n -------\n callable\n The preprocessed version of the `map_func` provided.\n\n Notes\n -----\n Preprocessing does not require any specific format, only that the\n `PandasDataframePartition.apply` method will recognize it (for the subclass\n being used).\n\n If your `PandasDataframePartition` objects assume that a function provided\n is serialized or wrapped or in some other format, this is the place\n to add that logic. It is possible that this can also just return\n `map_func` if the `apply` method of the `PandasDataframePartition` object\n you are using does not require any modification to a given function.\n \"\"\"\n return cls._partition_class.preprocess_func(map_func)\n\n # END Abstract Methods\n\n @classmethod\n def column_partitions(cls, partitions, full_axis=True):\n \"\"\"\n Get the list of `BaseDataframeAxisPartition` objects representing column-wise paritions.\n\n Parameters\n ----------\n partitions : list-like\n List of (smaller) partitions to be combined to column-wise partitions.\n full_axis : bool, default: True\n Whether or not this partition contains the entire column axis.\n\n Returns\n -------\n list\n A list of `BaseDataframeAxisPartition` objects.\n\n Notes\n -----\n Each value in this list will be an `BaseDataframeAxisPartition` object.\n `BaseDataframeAxisPartition` is located in `axis_partition.py`.\n \"\"\"\n if not isinstance(partitions, list):\n partitions = [partitions]\n return [\n cls._column_partitions_class(col, full_axis=full_axis)\n for frame in partitions\n for col in frame.T\n ]\n\n @classmethod\n def row_partitions(cls, partitions):\n \"\"\"\n List of `BaseDataframeAxisPartition` objects representing row-wise partitions.\n\n Parameters\n ----------\n partitions : list-like\n List of (smaller) partitions to be combined to row-wise partitions.\n\n Returns\n -------\n list\n A list of `BaseDataframeAxisPartition` objects.\n\n Notes\n -----\n Each value in this list will an `BaseDataframeAxisPartition` object.\n `BaseDataframeAxisPartition` is located in `axis_partition.py`.\n \"\"\"\n if not isinstance(partitions, list):\n partitions = [partitions]\n return [cls._row_partition_class(row) for frame in partitions for row in frame]\n\n @classmethod\n def axis_partition(cls, partitions, axis, full_axis: bool = True):\n \"\"\"\n Logically partition along given axis (columns or rows).\n\n Parameters\n ----------\n partitions : list-like\n List of partitions to be combined.\n axis : {0, 1}\n 0 for column partitions, 1 for row partitions.\n full_axis : bool, default: True\n Whether or not this partition contains the entire column axis.\n\n Returns\n -------\n list\n A list of `BaseDataframeAxisPartition` objects.\n \"\"\"\n make_column_partitions = axis == 0\n if not full_axis and not make_column_partitions:\n raise NotImplementedError(\n (\n \"Row partitions must contain the entire axis. We don't \"\n + \"support virtual partitioning for row partitions yet.\"\n )\n )\n return (\n cls.column_partitions(partitions)\n if make_column_partitions\n else cls.row_partitions(partitions)\n )\n\n @classmethod\n def groupby_reduce(\n cls, axis, partitions, by, map_func, reduce_func, apply_indices=None\n ):\n \"\"\"\n Groupby data using the `map_func` provided along the `axis` over the `partitions` then reduce using `reduce_func`.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to groupby over.\n partitions : NumPy 2D array\n Partitions of the ModinFrame to groupby.\n by : NumPy 2D array\n Partitions of 'by' to broadcast.\n map_func : callable\n Map function.\n reduce_func : callable,\n Reduce function.\n apply_indices : list of ints, default: None\n Indices of `axis ^ 1` to apply function over.\n\n Returns\n -------\n NumPy array\n Partitions with applied groupby.\n \"\"\"\n if apply_indices is not None:\n partitions = (\n partitions[apply_indices] if axis else partitions[:, apply_indices]\n )\n\n if by is not None:\n mapped_partitions = cls.broadcast_apply(\n axis, map_func, left=partitions, right=by, other_name=\"other\"\n )\n else:\n mapped_partitions = cls.map_partitions(partitions, map_func)\n return cls.map_axis_partitions(\n axis, mapped_partitions, reduce_func, enumerate_partitions=True\n )\n\n @classmethod\n @wait_computations_if_benchmark_mode\n def broadcast_apply_select_indices(\n cls,\n axis,\n apply_func,\n left,\n right,\n left_indices,\n right_indices,\n keep_remaining=False,\n ):\n \"\"\"\n Broadcast the `right` partitions to `left` and apply `apply_func` to selected indices.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to apply and broadcast over.\n apply_func : callable\n Function to apply.\n left : NumPy 2D array\n Left partitions.\n right : NumPy 2D array\n Right partitions.\n left_indices : list-like\n Indices to apply function to.\n right_indices : dictionary of indices of right partitions\n Indices that you want to bring at specified left partition, for example\n dict {key: {key1: [0, 1], key2: [5]}} means that in left[key] you want to\n broadcast [right[key1], right[key2]] partitions and internal indices\n for `right` must be [[0, 1], [5]].\n keep_remaining : bool, default: False\n Whether or not to keep the other partitions.\n Some operations may want to drop the remaining partitions and\n keep only the results.\n\n Returns\n -------\n NumPy array\n An array of partition objects.\n\n Notes\n -----\n Your internal function must take these kwargs:\n [`internal_indices`, `other`, `internal_other_indices`] to work correctly!\n \"\"\"\n if not axis:\n partitions_for_apply = left.T\n right = right.T\n else:\n partitions_for_apply = left\n\n [obj.drain_call_queue() for row in right for obj in row]\n\n def get_partitions(index):\n \"\"\"Grab required partitions and indices from `right` and `right_indices`.\"\"\"\n must_grab = right_indices[index]\n partitions_list = np.array([right[i] for i in must_grab.keys()])\n indices_list = list(must_grab.values())\n return {\"other\": partitions_list, \"internal_other_indices\": indices_list}\n\n new_partitions = np.array(\n [\n partitions_for_apply[i]\n if i not in left_indices\n else cls._apply_func_to_list_of_partitions_broadcast(\n apply_func,\n partitions_for_apply[i],\n internal_indices=left_indices[i],\n **get_partitions(i),\n )\n for i in range(len(partitions_for_apply))\n if i in left_indices or keep_remaining\n ]\n )\n if not axis:\n new_partitions = new_partitions.T\n return new_partitions\n\n @classmethod\n @wait_computations_if_benchmark_mode\n def broadcast_apply(cls, axis, apply_func, left, right, other_name=\"r\"):\n \"\"\"\n Broadcast the `right` partitions to `left` and apply `apply_func` function.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to apply and broadcast over.\n apply_func : callable\n Function to apply.\n left : NumPy 2D array\n Left partitions.\n right : NumPy 2D array\n Right partitions.\n other_name : str, default: \"r\"\n Name of key-value argument for `apply_func` that\n is used to pass `right` to `apply_func`.\n\n Returns\n -------\n NumPy array\n An of partition objects.\n\n Notes\n -----\n This will often be overridden by implementations. It materializes the\n entire partitions of the right and applies them to the left through `apply`.\n \"\"\"\n [obj.drain_call_queue() for row in right for obj in row]\n new_right = np.empty(shape=right.shape[axis], dtype=object)\n\n if axis:\n right = right.T\n\n for i in range(len(right)):\n new_right[i] = pandas.concat(\n [right[i][j].get() for j in range(len(right[i]))], axis=axis ^ 1\n )\n right = new_right.T if axis else new_right\n\n new_partitions = np.array(\n [\n [\n part.apply(\n apply_func,\n **{other_name: right[col_idx] if axis else right[row_idx]},\n )\n for col_idx, part in enumerate(left[row_idx])\n ]\n for row_idx in range(len(left))\n ]\n )\n\n return new_partitions\n\n @classmethod\n @wait_computations_if_benchmark_mode\n def broadcast_axis_partitions(\n cls,\n axis,\n apply_func,\n left,\n right,\n keep_partitioning=False,\n apply_indices=None,\n enumerate_partitions=False,\n lengths=None,\n **kwargs,\n ):\n \"\"\"\n Broadcast the `right` partitions to `left` and apply `apply_func` along full `axis`.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to apply and broadcast over.\n apply_func : callable\n Function to apply.\n left : NumPy 2D array\n Left partitions.\n right : NumPy 2D array\n Right partitions.\n keep_partitioning : boolean, default: False\n The flag to keep partition boundaries for Modin Frame.\n Setting it to True disables shuffling data from one partition to another.\n apply_indices : list of ints, default: None\n Indices of `axis ^ 1` to apply function over.\n enumerate_partitions : bool, default: False\n Whether or not to pass partition index into `apply_func`.\n Note that `apply_func` must be able to accept `partition_idx` kwarg.\n lengths : list of ints, default: None\n The list of lengths to shuffle the object.\n **kwargs : dict\n Additional options that could be used by different engines.\n\n Returns\n -------\n NumPy array\n An array of partition objects.\n \"\"\"\n # Since we are already splitting the DataFrame back up after an\n # operation, we will just use this time to compute the number of\n # partitions as best we can right now.\n if keep_partitioning:\n num_splits = len(left) if axis == 0 else len(left.T)\n elif lengths:\n num_splits = len(lengths)\n else:\n num_splits = NPartitions.get()\n preprocessed_map_func = cls.preprocess_func(apply_func)\n left_partitions = cls.axis_partition(left, axis)\n right_partitions = None if right is None else cls.axis_partition(right, axis)\n # For mapping across the entire axis, we don't maintain partitioning because we\n # may want to line to partitioning up with another BlockPartitions object. Since\n # we don't need to maintain the partitioning, this gives us the opportunity to\n # load-balance the data as well.\n kw = {\n \"num_splits\": num_splits,\n \"other_axis_partition\": right_partitions,\n }\n if lengths:\n kw[\"_lengths\"] = lengths\n kw[\"manual_partition\"] = True\n\n if apply_indices is None:\n apply_indices = np.arange(len(left_partitions))\n\n result_blocks = np.array(\n [\n left_partitions[i].apply(\n preprocessed_map_func,\n **kw,\n **({\"partition_idx\": idx} if enumerate_partitions else {}),\n **kwargs,\n )\n for idx, i in enumerate(apply_indices)\n ]\n )\n # If we are mapping over columns, they are returned to use the same as\n # rows, so we need to transpose the returned 2D NumPy array to return\n # the structure to the correct order.\n return result_blocks.T if not axis else result_blocks\n\n @classmethod\n @wait_computations_if_benchmark_mode\n def map_partitions(cls, partitions, map_func):\n \"\"\"\n Apply `map_func` to every partition in `partitions`.\n\n Parameters\n ----------\n partitions : NumPy 2D array\n Partitions housing the data of Modin Frame.\n map_func : callable\n Function to apply.\n\n Returns\n -------\n NumPy array\n An array of partitions\n \"\"\"\n preprocessed_map_func = cls.preprocess_func(map_func)\n return np.array(\n [\n [part.apply(preprocessed_map_func) for part in row_of_parts]\n for row_of_parts in partitions\n ]\n )\n\n @classmethod\n @wait_computations_if_benchmark_mode\n def lazy_map_partitions(cls, partitions, map_func):\n \"\"\"\n Apply `map_func` to every partition in `partitions` *lazily*.\n\n Parameters\n ----------\n partitions : NumPy 2D array\n Partitions of Modin Frame.\n map_func : callable\n Function to apply.\n\n Returns\n -------\n NumPy array\n An array of partitions\n \"\"\"\n preprocessed_map_func = cls.preprocess_func(map_func)\n return np.array(\n [\n [part.add_to_apply_calls(preprocessed_map_func) for part in row]\n for row in partitions\n ]\n )\n\n @classmethod\n def map_axis_partitions(\n cls,\n axis,\n partitions,\n map_func,\n keep_partitioning=False,\n lengths=None,\n enumerate_partitions=False,\n **kwargs,\n ):\n \"\"\"\n Apply `map_func` to every partition in `partitions` along given `axis`.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to perform the map across (0 - index, 1 - columns).\n partitions : NumPy 2D array\n Partitions of Modin Frame.\n map_func : callable\n Function to apply.\n keep_partitioning : bool, default: False\n Whether to keep partitioning for Modin Frame.\n Setting it to True stops data shuffling between partitions.\n lengths : list of ints, default: None\n List of lengths to shuffle the object.\n enumerate_partitions : bool, default: False\n Whether or not to pass partition index into `map_func`.\n Note that `map_func` must be able to accept `partition_idx` kwarg.\n **kwargs : dict\n Additional options that could be used by different engines.\n\n Returns\n -------\n NumPy array\n An array of new partitions for Modin Frame.\n\n Notes\n -----\n This method should be used in the case when `map_func` relies on\n some global information about the axis.\n \"\"\"\n return cls.broadcast_axis_partitions(\n axis=axis,\n left=partitions,\n apply_func=map_func,\n keep_partitioning=keep_partitioning,\n right=None,\n lengths=lengths,\n enumerate_partitions=enumerate_partitions,\n **kwargs,\n )\n\n @classmethod\n def concat(cls, axis, left_parts, right_parts):\n \"\"\"\n Concatenate the blocks of partitions with another set of blocks.\n\n Parameters\n ----------\n axis : int\n The axis to concatenate to.\n left_parts : np.ndarray\n NumPy array of partitions to concatenate with.\n right_parts : np.ndarray or list\n NumPy array of partitions to be concatenated.\n\n Returns\n -------\n np.ndarray\n A new NumPy array with concatenated partitions.\n\n Notes\n -----\n Assumes that the blocks are already the same shape on the\n dimension being concatenated. A ValueError will be thrown if this\n condition is not met.\n \"\"\"\n # TODO: Possible change is `isinstance(right_parts, list)`\n if type(right_parts) is list:\n # `np.array` with partitions of empty ModinFrame has a shape (0,)\n # but `np.concatenate` can concatenate arrays only if its shapes at\n # specified axis are equals, so filtering empty frames to avoid concat error\n right_parts = [o for o in right_parts if o.size != 0]\n to_concat = (\n [left_parts] + right_parts if left_parts.size != 0 else right_parts\n )\n return (\n np.concatenate(to_concat, axis=axis) if len(to_concat) else left_parts\n )\n else:\n return np.append(left_parts, right_parts, axis=axis)\n\n @classmethod\n def to_pandas(cls, partitions):\n \"\"\"\n Convert NumPy array of PandasDataframePartition to pandas DataFrame.\n\n Parameters\n ----------\n partitions : np.ndarray\n NumPy array of PandasDataframePartition.\n\n Returns\n -------\n pandas.DataFrame\n A pandas DataFrame\n \"\"\"\n retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]\n if all(\n isinstance(part, pandas.Series) for row in retrieved_objects for part in row\n ):\n axis = 0\n elif all(\n isinstance(part, pandas.DataFrame)\n for row in retrieved_objects\n for part in row\n ):\n axis = 1\n else:\n ErrorMessage.catch_bugs_and_request_email(True)\n df_rows = [\n pandas.concat([part for part in row], axis=axis)\n for row in retrieved_objects\n if not all(part.empty for part in row)\n ]\n if len(df_rows) == 0:\n return pandas.DataFrame()\n else:\n return concatenate(df_rows)\n\n @classmethod\n def to_numpy(cls, partitions, **kwargs):\n \"\"\"\n Convert NumPy array of PandasDataframePartition to NumPy array of data stored within `partitions`.\n\n Parameters\n ----------\n partitions : np.ndarray\n NumPy array of PandasDataframePartition.\n **kwargs : dict\n Keyword arguments for PandasDataframePartition.to_numpy function.\n\n Returns\n -------\n np.ndarray\n A NumPy array.\n \"\"\"\n return np.block(\n [[block.to_numpy(**kwargs) for block in row] for row in partitions]\n )\n\n @classmethod\n @wait_computations_if_benchmark_mode\n def from_pandas(cls, df, return_dims=False):\n \"\"\"\n Return the partitions from pandas.DataFrame.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A pandas.DataFrame.\n return_dims : bool, default: False\n If it's True, return as (np.ndarray, row_lengths, col_widths),\n else np.ndarray.\n\n Returns\n -------\n np.ndarray or (np.ndarray, row_lengths, col_widths)\n A NumPy array with partitions (with dimensions or not).\n \"\"\"\n\n def update_bar(pbar, f):\n if ProgressBar.get():\n pbar.update(1)\n return f\n\n num_splits = NPartitions.get()\n put_func = cls._partition_class.put\n row_chunksize = compute_chunksize(df.shape[0], num_splits)\n col_chunksize = compute_chunksize(df.shape[1], num_splits)\n\n bar_format = (\n \"{l_bar}{bar}{r_bar}\"\n if os.environ.get(\"DEBUG_PROGRESS_BAR\", \"False\") == \"True\"\n else \"{desc}: {percentage:3.0f}%{bar} Elapsed time: {elapsed}, estimated remaining time: {remaining}\"\n )\n if ProgressBar.get():\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n try:\n from tqdm.autonotebook import tqdm as tqdm_notebook\n except ImportError:\n raise ImportError(\"Please pip install tqdm to use the progress bar\")\n\n rows = max(1, round(len(df) / row_chunksize))\n cols = max(1, round(len(df.columns) / col_chunksize))\n update_count = rows * cols\n pbar = tqdm_notebook(\n total=round(update_count),\n desc=\"Distributing Dataframe\",\n bar_format=bar_format,\n )\n else:\n pbar = None\n parts = [\n [\n update_bar(\n pbar,\n put_func(\n df.iloc[i : i + row_chunksize, j : j + col_chunksize].copy()\n ),\n )\n for j in range(0, len(df.columns), col_chunksize)\n ]\n for i in range(0, len(df), row_chunksize)\n ]\n if ProgressBar.get():\n pbar.close()\n if not return_dims:\n return np.array(parts)\n else:\n row_lengths = [\n row_chunksize\n if i + row_chunksize < len(df)\n else len(df) % row_chunksize or row_chunksize\n for i in range(0, len(df), row_chunksize)\n ]\n col_widths = [\n col_chunksize\n if i + col_chunksize < len(df.columns)\n else len(df.columns) % col_chunksize or col_chunksize\n for i in range(0, len(df.columns), col_chunksize)\n ]\n return np.array(parts), row_lengths, col_widths\n\n @classmethod\n def from_arrow(cls, at, return_dims=False):\n \"\"\"\n Return the partitions from Apache Arrow (PyArrow).\n\n Parameters\n ----------\n at : pyarrow.table\n Arrow Table.\n return_dims : bool, default: False\n If it's True, return as (np.ndarray, row_lengths, col_widths),\n else np.ndarray.\n\n Returns\n -------\n np.ndarray or (np.ndarray, row_lengths, col_widths)\n A NumPy array with partitions (with dimensions or not).\n \"\"\"\n return cls.from_pandas(at.to_pandas(), return_dims=return_dims)\n\n @classmethod\n def get_indices(cls, axis, partitions, index_func=None):\n \"\"\"\n Get the internal indices stored in the partitions.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to extract the labels over.\n partitions : np.ndarray\n NumPy array with PandasDataframePartition's.\n index_func : callable, default: None\n The function to be used to extract the indices.\n\n Returns\n -------\n pandas.Index\n A pandas Index object.\n\n Notes\n -----\n These are the global indices of the object. This is mostly useful\n when you have deleted rows/columns internally, but do not know\n which ones were deleted.\n \"\"\"\n ErrorMessage.catch_bugs_and_request_email(not callable(index_func))\n func = cls.preprocess_func(index_func)\n if axis == 0:\n new_idx = (\n [idx.apply(func).get() for idx in partitions.T[0]]\n if len(partitions.T)\n else []\n )\n else:\n new_idx = (\n [idx.apply(func).get() for idx in partitions[0]]\n if len(partitions)\n else []\n )\n # TODO FIX INFORMATION LEAK!!!!1!!1!!\n return new_idx[0].append(new_idx[1:]) if len(new_idx) else new_idx\n\n @classmethod\n def _apply_func_to_list_of_partitions_broadcast(\n cls, func, partitions, other, **kwargs\n ):\n \"\"\"\n Apply a function to a list of remote partitions.\n\n `other` partitions will be broadcasted to `partitions`\n and `func` will be applied.\n\n Parameters\n ----------\n func : callable\n The func to apply.\n partitions : np.ndarray\n The partitions to which the `func` will apply.\n other : np.ndarray\n The partitions to be broadcasted to `partitions`.\n **kwargs : dict\n Keyword arguments for PandasDataframePartition.apply function.\n\n Returns\n -------\n list\n A list of PandasDataframePartition objects.\n \"\"\"\n preprocessed_func = cls.preprocess_func(func)\n return [\n obj.apply(preprocessed_func, other=[o.get() for o in broadcasted], **kwargs)\n for obj, broadcasted in zip(partitions, other.T)\n ]\n\n @classmethod\n def _apply_func_to_list_of_partitions(cls, func, partitions, **kwargs):\n \"\"\"\n Apply a function to a list of remote partitions.\n\n Parameters\n ----------\n func : callable\n The func to apply.\n partitions : np.ndarray\n The partitions to which the `func` will apply.\n **kwargs : dict\n Keyword arguments for PandasDataframePartition.apply function.\n\n Returns\n -------\n list\n A list of PandasDataframePartition objects.\n\n Notes\n -----\n This preprocesses the `func` first before applying it to the partitions.\n \"\"\"\n preprocessed_func = cls.preprocess_func(func)\n return [obj.apply(preprocessed_func, **kwargs) for obj in partitions]\n\n @classmethod\n @wait_computations_if_benchmark_mode\n def apply_func_to_select_indices(\n cls, axis, partitions, func, indices, keep_remaining=False\n ):\n \"\"\"\n Apply a function to select indices.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to apply the `func` over.\n partitions : np.ndarray\n The partitions to which the `func` will apply.\n func : callable\n The function to apply to these indices of partitions.\n indices : dict\n The indices to apply the function to.\n keep_remaining : bool, default: False\n Whether or not to keep the other partitions. Some operations\n may want to drop the remaining partitions and keep\n only the results.\n\n Returns\n -------\n np.ndarray\n A NumPy array with partitions.\n\n Notes\n -----\n Your internal function must take a kwarg `internal_indices` for\n this to work correctly. This prevents information leakage of the\n internal index to the external representation.\n \"\"\"\n if partitions.size == 0:\n return np.array([[]])\n # Handling dictionaries has to be done differently, but we still want\n # to figure out the partitions that need to be applied to, so we will\n # store the dictionary in a separate variable and assign `indices` to\n # the keys to handle it the same as we normally would.\n if isinstance(func, dict):\n dict_func = func\n else:\n dict_func = None\n if not axis:\n partitions_for_apply = partitions.T\n else:\n partitions_for_apply = partitions\n # We may have a command to perform different functions on different\n # columns at the same time. We attempt to handle this as efficiently as\n # possible here. Functions that use this in the dictionary format must\n # accept a keyword argument `func_dict`.\n if dict_func is not None:\n if not keep_remaining:\n result = np.array(\n [\n cls._apply_func_to_list_of_partitions(\n func,\n partitions_for_apply[o_idx],\n func_dict={\n i_idx: dict_func[i_idx]\n for i_idx in list_to_apply\n if i_idx >= 0\n },\n )\n for o_idx, list_to_apply in indices.items()\n ]\n )\n else:\n result = np.array(\n [\n partitions_for_apply[i]\n if i not in indices\n else cls._apply_func_to_list_of_partitions(\n func,\n partitions_for_apply[i],\n func_dict={\n idx: dict_func[idx] for idx in indices[i] if idx >= 0\n },\n )\n for i in range(len(partitions_for_apply))\n ]\n )\n else:\n if not keep_remaining:\n # We are passing internal indices in here. In order for func to\n # actually be able to use this information, it must be able to take in\n # the internal indices. This might mean an iloc in the case of Pandas\n # or some other way to index into the internal representation.\n result = np.array(\n [\n cls._apply_func_to_list_of_partitions(\n func,\n partitions_for_apply[idx],\n internal_indices=list_to_apply,\n )\n for idx, list_to_apply in indices.items()\n ]\n )\n else:\n # The difference here is that we modify a subset and return the\n # remaining (non-updated) blocks in their original position.\n result = np.array(\n [\n partitions_for_apply[i]\n if i not in indices\n else cls._apply_func_to_list_of_partitions(\n func, partitions_for_apply[i], internal_indices=indices[i]\n )\n for i in range(len(partitions_for_apply))\n ]\n )\n return result.T if not axis else result\n\n @classmethod\n @wait_computations_if_benchmark_mode\n def apply_func_to_select_indices_along_full_axis(\n cls, axis, partitions, func, indices, keep_remaining=False\n ):\n \"\"\"\n Apply a function to a select subset of full columns/rows.\n\n Parameters\n ----------\n axis : {0, 1}\n The axis to apply the function over.\n partitions : np.ndarray\n The partitions to which the `func` will apply.\n func : callable\n The function to apply.\n indices : list-like\n The global indices to apply the func to.\n keep_remaining : bool, default: False\n Whether or not to keep the other partitions.\n Some operations may want to drop the remaining partitions and\n keep only the results.\n\n Returns\n -------\n np.ndarray\n A NumPy array with partitions.\n\n Notes\n -----\n This should be used when you need to apply a function that relies\n on some global information for the entire column/row, but only need\n to apply a function to a subset.\n For your func to operate directly on the indices provided,\n it must use `internal_indices` as a keyword argument.\n \"\"\"\n if partitions.size == 0:\n return np.array([[]])\n # Handling dictionaries has to be done differently, but we still want\n # to figure out the partitions that need to be applied to, so we will\n # store the dictionary in a separate variable and assign `indices` to\n # the keys to handle it the same as we normally would.\n if isinstance(func, dict):\n dict_func = func\n else:\n dict_func = None\n preprocessed_func = cls.preprocess_func(func)\n # Since we might be keeping the remaining blocks that are not modified,\n # we have to also keep the block_partitions object in the correct\n # direction (transpose for columns).\n if not keep_remaining:\n selected_partitions = partitions.T if not axis else partitions\n selected_partitions = np.array([selected_partitions[i] for i in indices])\n selected_partitions = (\n selected_partitions.T if not axis else selected_partitions\n )\n else:\n selected_partitions = partitions\n if not axis:\n partitions_for_apply = cls.column_partitions(selected_partitions)\n partitions_for_remaining = partitions.T\n else:\n partitions_for_apply = cls.row_partitions(selected_partitions)\n partitions_for_remaining = partitions\n # We may have a command to perform different functions on different\n # columns at the same time. We attempt to handle this as efficiently as\n # possible here. Functions that use this in the dictionary format must\n # accept a keyword argument `func_dict`.\n if dict_func is not None:\n if not keep_remaining:\n result = np.array(\n [\n part.apply(\n preprocessed_func,\n func_dict={idx: dict_func[idx] for idx in indices[i]},\n )\n for i, part in zip(indices, partitions_for_apply)\n ]\n )\n else:\n result = np.array(\n [\n partitions_for_remaining[i]\n if i not in indices\n else cls._apply_func_to_list_of_partitions(\n preprocessed_func,\n partitions_for_apply[i],\n func_dict={idx: dict_func[idx] for idx in indices[i]},\n )\n for i in range(len(partitions_for_apply))\n ]\n )\n else:\n if not keep_remaining:\n # See notes in `apply_func_to_select_indices`\n result = np.array(\n [\n part.apply(preprocessed_func, internal_indices=indices[i])\n for i, part in zip(indices, partitions_for_apply)\n ]\n )\n else:\n # See notes in `apply_func_to_select_indices`\n result = np.array(\n [\n partitions_for_remaining[i]\n if i not in indices\n else partitions_for_apply[i].apply(\n preprocessed_func, internal_indices=indices[i]\n )\n for i in range(len(partitions_for_remaining))\n ]\n )\n return result.T if not axis else result\n\n @classmethod\n @wait_computations_if_benchmark_mode\n def apply_func_to_indices_both_axis(\n cls,\n partitions,\n func,\n row_partitions_list,\n col_partitions_list,\n item_to_distribute=None,\n row_lengths=None,\n col_widths=None,\n ):\n \"\"\"\n Apply a function along both axes.\n\n Parameters\n ----------\n partitions : np.ndarray\n The partitions to which the `func` will apply.\n func : callable\n The function to apply.\n row_partitions_list : iterable of tuples\n Iterable of tuples, containing 2 values:\n 1. Integer row partition index.\n 2. Internal row indexer of this partition.\n col_partitions_list : iterable of tuples\n Iterable of tuples, containing 2 values:\n 1. Integer column partition index.\n 2. Internal column indexer of this partition.\n item_to_distribute : item, default: None\n The item to split up so it can be applied over both axes.\n row_lengths : list of ints, optional\n Lengths of partitions for every row. If not specified this information\n is extracted from partitions itself.\n col_widths : list of ints, optional\n Widths of partitions for every column. If not specified this information\n is extracted from partitions itself.\n\n Returns\n -------\n np.ndarray\n A NumPy array with partitions.\n\n Notes\n -----\n For your func to operate directly on the indices provided,\n it must use `row_internal_indices`, `col_internal_indices` as keyword\n arguments.\n \"\"\"\n partition_copy = partitions.copy()\n row_position_counter = 0\n\n if row_lengths is None:\n row_lengths = [None] * len(row_partitions_list)\n if col_widths is None:\n col_widths = [None] * len(col_partitions_list)\n\n def compute_part_size(indexer, remote_part, part_idx, axis):\n \"\"\"Compute indexer length along the specified axis for the passed partition.\"\"\"\n if isinstance(indexer, slice):\n shapes_container = row_lengths if axis == 0 else col_widths\n part_size = shapes_container[part_idx]\n if part_size is None:\n part_size = (\n remote_part.length() if axis == 0 else remote_part.width()\n )\n shapes_container[part_idx] = part_size\n indexer = range(*indexer.indices(part_size))\n return len(indexer)\n\n for row_idx, row_values in enumerate(row_partitions_list):\n row_blk_idx, row_internal_idx = row_values\n col_position_counter = 0\n for col_idx, col_values in enumerate(col_partitions_list):\n col_blk_idx, col_internal_idx = col_values\n remote_part = partition_copy[row_blk_idx, col_blk_idx]\n\n row_offset = compute_part_size(\n row_internal_idx, remote_part, row_idx, axis=0\n )\n col_offset = compute_part_size(\n col_internal_idx, remote_part, col_idx, axis=1\n )\n\n # We want to eventually make item_to_distribute an np.ndarray,\n # but that doesn't work for setting a subset of a categorical\n # column, as per https://github.com/modin-project/modin/issues/3736.\n # In that case, `item` is not an ndarray but instead some\n # categorical variable, which we we don't need to distribute\n # at all. Note that np.ndarray is not hashable, so it can't\n # be a categorical variable.\n # TODO(https://github.com/pandas-dev/pandas/issues/44703): Delete\n # this special case once the pandas bug is fixed.\n if item_to_distribute is not None:\n if isinstance(item_to_distribute, np.ndarray):\n item = item_to_distribute[\n row_position_counter : row_position_counter + row_offset,\n col_position_counter : col_position_counter + col_offset,\n ]\n else:\n item = item_to_distribute\n item = {\"item\": item}\n else:\n item = {}\n block_result = remote_part.add_to_apply_calls(\n func,\n row_internal_indices=row_internal_idx,\n col_internal_indices=col_internal_idx,\n **item,\n )\n partition_copy[row_blk_idx, col_blk_idx] = block_result\n col_position_counter += col_offset\n row_position_counter += row_offset\n return partition_copy\n\n @classmethod\n @wait_computations_if_benchmark_mode\n def binary_operation(cls, axis, left, func, right):\n \"\"\"\n Apply a function that requires two PandasDataframe objects.\n\n Parameters\n ----------\n axis : {0, 1}\n The axis to apply the function over (0 - rows, 1 - columns).\n left : np.ndarray\n The partitions of left PandasDataframe.\n func : callable\n The function to apply.\n right : np.ndarray\n The partitions of right PandasDataframe.\n\n Returns\n -------\n np.ndarray\n A NumPy array with new partitions.\n \"\"\"\n if axis:\n left_partitions = cls.row_partitions(left)\n right_partitions = cls.row_partitions(right)\n else:\n left_partitions = cls.column_partitions(left)\n right_partitions = cls.column_partitions(right)\n func = cls.preprocess_func(func)\n result = np.array(\n [\n left_partitions[i].apply(\n func,\n num_splits=NPartitions.get(),\n other_axis_partition=right_partitions[i],\n )\n for i in range(len(left_partitions))\n ]\n )\n return result if axis else result.T\n\n @classmethod\n @wait_computations_if_benchmark_mode\n def finalize(cls, partitions):\n \"\"\"\n Perform all deferred calls on partitions.\n\n Parameters\n ----------\n partitions : np.ndarray\n Partitions of Modin Dataframe on which all deferred calls should be performed.\n \"\"\"\n [part.drain_call_queue() for row in partitions for part in row]\n\n @classmethod\n def rebalance_partitions(cls, partitions):\n \"\"\"\n Return the provided array of partitions without rebalancing it.\n\n Parameters\n ----------\n partitions : np.ndarray\n The 2-d array of partitions to rebalance.\n\n Returns\n -------\n np.ndarray\n The same 2-d array.\n \"\"\"\n return partitions\n" ]
[ [ "pandas.concat", "numpy.array" ], [ "pandas.concat", "pandas.DataFrame", "numpy.concatenate", "numpy.append", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
binzh93/EAST
[ "b5f66ab1a5dd37b6a5134336d494000e1add6da1" ]
[ "train.py" ]
[ "import torch\nfrom torch.utils import data\nfrom torch import nn\nfrom torch.optim import lr_scheduler\nfrom dataset import custom_dataset\nfrom model import EAST\nfrom loss import Loss\nimport os\nimport time\nimport numpy as np\n\n\ndef train(train_img_path, train_gt_path, pths_path, batch_size, lr, num_workers, epoch_iter, interval):\n\tfile_num = len(os.listdir(train_img_path))\n\ttrainset = custom_dataset(train_img_path, train_gt_path)\n\ttrain_loader = data.DataLoader(trainset, batch_size=batch_size, \\\n shuffle=True, num_workers=num_workers, drop_last=True)\n\t\n\tcriterion = Loss()\n\tdevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\t# model = EAST()\n\tmodel = EAST(pretrained=False)\n\tdata_parallel = False\n\tif torch.cuda.device_count() > 1:\n\t\tmodel = nn.DataParallel(model)\n\t\tdata_parallel = True\n\tmodel.to(device)\n\toptimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\tscheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[epoch_iter//2], gamma=0.1)\n\n\tfor epoch in range(epoch_iter):\t\n\t\tmodel.train()\n\t\tscheduler.step()\n\t\tepoch_loss = 0\n\t\tepoch_time = time.time()\n\t\tfor i, (img, gt_score, gt_geo, ignored_map) in enumerate(train_loader):\n\t\t\tstart_time = time.time()\n\t\t\timg, gt_score, gt_geo, ignored_map = img.to(device), gt_score.to(device), gt_geo.to(device), ignored_map.to(device)\n\t\t\tpred_score, pred_geo = model(img)\n\t\t\tloss = criterion(gt_score, pred_score, gt_geo, pred_geo, ignored_map)\n\t\t\t\n\t\t\tepoch_loss += loss.item()\n\t\t\toptimizer.zero_grad()\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tprint('Epoch is [{}/{}], mini-batch is [{}/{}], time consumption is {:.8f}, batch_loss is {:.8f}'.format(\\\n epoch+1, epoch_iter, i+1, int(file_num/batch_size), time.time()-start_time, loss.item()))\n\t\t\n\t\tprint('epoch_loss is {:.8f}, epoch_time is {:.8f}'.format(epoch_loss/int(file_num/batch_size), time.time()-epoch_time))\n\t\tprint(time.asctime(time.localtime(time.time())))\n\t\tprint('='*50)\n\t\tif (epoch + 1) % interval == 0:\n\t\t\tstate_dict = model.module.state_dict() if data_parallel else model.state_dict()\n\t\t\ttorch.save(state_dict, os.path.join(pths_path, 'model_epoch_{}.pth'.format(epoch+1)))\n\n\nif __name__ == '__main__':\n\t# train_img_path = os.path.abspath('../ICDAR_2015/train_img')\n\t# train_gt_path = os.path.abspath('../ICDAR_2015/train_gt')\n\n\t# train_img_path = os.path.abspath('/workspace/mnt/storage/zhubin/track/ocr/ICDAR13')\n\t# train_gt_path = os.path.abspath('/workspace/mnt/storage/zhubin/track/ocr/ICDAR13_gt')\n\n\ttrain_img_path = os.path.abspath('/workspace/mnt/storage/zhubin/track/ocr/id5/train/img/')\n\ttrain_gt_path = os.path.abspath('/workspace/mnt/storage/zhubin/track/ocr/id5/train/gt/')\n\n\tpths_path = './pths'\n\tbatch_size = 64 # 24 \n\tlr = 1e-3\n\tnum_workers = 4\n\tepoch_iter = 600\n\tsave_interval = 5\n\ttrain(train_img_path, train_gt_path, pths_path, batch_size, lr, num_workers, epoch_iter, save_interval)\t\n\t\n" ]
[ [ "torch.optim.lr_scheduler.MultiStepLR", "torch.utils.data.DataLoader", "torch.nn.DataParallel", "torch.cuda.is_available", "torch.cuda.device_count" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LiyouZhou/CenterPoint
[ "b57dac03c4044cf19f6e7dcbeebae8e0befb5abb" ]
[ "det3d/torchie/parallel/collate.py" ]
[ "import collections\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data.dataloader import default_collate\n\nfrom .data_container import DataContainer\n\n\ndef collate(batch, samples_per_gpu=1):\n \"\"\"Puts each data field into a tensor/DataContainer with outer dimension\n batch size.\n\n Extend default_collate to add support for\n :type:`~torchie.parallel.DataContainer`. There are 3 cases.\n\n 1. cpu_only = True, e.g., meta data\n 2. cpu_only = False, stack = True, e.g., images tensors\n 3. cpu_only = False, stack = False, e.g., gt bboxes\n \"\"\"\n\n if not isinstance(batch, collections.Sequence):\n raise TypeError(\"{} is not supported.\".format(batch.dtype))\n\n if isinstance(batch[0], DataContainer):\n assert len(batch) % samples_per_gpu == 0\n stacked = []\n if batch[0].cpu_only:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i : i + samples_per_gpu]]\n )\n return DataContainer(\n stacked, batch[0].stack, batch[0].padding_value, cpu_only=True\n )\n elif batch[0].stack:\n for i in range(0, len(batch), samples_per_gpu):\n assert isinstance(batch[i].data, torch.Tensor)\n\n if batch[i].pad_dims is not None:\n ndim = batch[i].dim()\n assert ndim > batch[i].pad_dims\n max_shape = [0 for _ in range(batch[i].pad_dims)]\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = batch[i].size(-dim)\n for sample in batch[i : i + samples_per_gpu]:\n for dim in range(0, ndim - batch[i].pad_dims):\n assert batch[i].size(dim) == sample.size(dim)\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = max(\n max_shape[dim - 1], sample.size(-dim)\n )\n padded_samples = []\n for sample in batch[i : i + samples_per_gpu]:\n pad = [0 for _ in range(batch[i].pad_dims * 2)]\n for dim in range(1, batch[i].pad_dims + 1):\n pad[2 * dim - 1] = max_shape[dim - 1] - sample.size(-dim)\n padded_samples.append(\n F.pad(sample.data, pad, value=sample.padding_value)\n )\n stacked.append(default_collate(padded_samples))\n elif batch[i].pad_dims is None:\n stacked.append(\n default_collate(\n [sample.data for sample in batch[i : i + samples_per_gpu]]\n )\n )\n else:\n raise ValueError(\"pad_dims should be either None or integers (1-3)\")\n\n else:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i : i + samples_per_gpu]]\n )\n return DataContainer(stacked, batch[0].stack, batch[0].padding_value)\n elif isinstance(batch[0], collections.Sequence):\n transposed = zip(*batch)\n return [collate(samples, samples_per_gpu) for samples in transposed]\n elif isinstance(batch[0], collections.Mapping):\n return {\n key: collate([d[key] for d in batch], samples_per_gpu) for key in batch[0]\n }\n else:\n return default_collate(batch)\n\n\n\ndef collate_kitti(batch_list, samples_per_gpu=1):\n example_merged = collections.defaultdict(list)\n for example in batch_list:\n if type(example) is list:\n for subexample in example:\n for k, v in subexample.items():\n example_merged[k].append(v)\n else:\n for k, v in example.items():\n example_merged[k].append(v)\n batch_size = len(example_merged['metadata'])\n ret = {}\n # voxel_nums_list = example_merged[\"num_voxels\"]\n # example_merged.pop(\"num_voxels\")\n for key, elems in example_merged.items():\n if key in [\"voxels\", \"num_points\", \"num_gt\", \"voxel_labels\", \"num_voxels\"]:\n ret[key] = torch.tensor(np.concatenate(elems, axis=0))\n elif key in [\n \"gt_boxes\",\n ]:\n task_max_gts = []\n for task_id in range(len(elems[0])):\n max_gt = 0\n for k in range(batch_size):\n max_gt = max(max_gt, len(elems[k][task_id]))\n task_max_gts.append(max_gt)\n res = []\n for idx, max_gt in enumerate(task_max_gts):\n batch_task_gt_boxes3d = np.zeros((batch_size, max_gt, 7))\n for i in range(batch_size):\n batch_task_gt_boxes3d[i, : len(elems[i][idx]), :] = elems[i][idx]\n res.append(batch_task_gt_boxes3d)\n ret[key] = res\n elif key == \"metadata\":\n ret[key] = elems\n elif key == \"calib\":\n ret[key] = {}\n for elem in elems:\n for k1, v1 in elem.items():\n if k1 not in ret[key]:\n ret[key][k1] = [v1]\n else:\n ret[key][k1].append(v1)\n for k1, v1 in ret[key].items():\n ret[key][k1] = torch.tensor(np.stack(v1, axis=0))\n elif key in [\"coordinates\", \"points\"]:\n coors = []\n for i, coor in enumerate(elems):\n coor_pad = np.pad(\n coor, ((0, 0), (1, 0)), mode=\"constant\", constant_values=i\n )\n coors.append(coor_pad)\n ret[key] = torch.tensor(np.concatenate(coors, axis=0))\n elif key in [\"anchors\", \"anchors_mask\", \"reg_targets\", \"reg_weights\", \"labels\", \"hm\", \"anno_box\",\n \"ind\", \"mask\", \"cat\"]:\n\n ret[key] = defaultdict(list)\n res = []\n for elem in elems:\n for idx, ele in enumerate(elem):\n ret[key][str(idx)].append(torch.tensor(ele))\n for kk, vv in ret[key].items():\n res.append(torch.stack(vv))\n ret[key] = res\n else:\n ret[key] = np.stack(elems, axis=0)\n\n return ret\n" ]
[ [ "numpy.pad", "numpy.stack", "torch.tensor", "numpy.concatenate", "torch.stack", "numpy.zeros", "torch.nn.functional.pad", "torch.utils.data.dataloader.default_collate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pengaoao/wenet
[ "73e6503620df404184e0f2f01d85c1bb1d6c3b07", "73e6503620df404184e0f2f01d85c1bb1d6c3b07" ]
[ "tools/compute_cmvn_stats.py", "wenet/transformer/embedding.py" ]
[ "#!/usr/bin/env python3\n# encoding: utf-8\n\nimport sys\nimport argparse\nimport json\nimport codecs\nimport yaml\n\nimport torch\nimport torchaudio\nimport torchaudio.compliance.kaldi as kaldi\nfrom torch.utils.data import Dataset, DataLoader\n\ntorchaudio.set_audio_backend(\"sox_io\")\n\n\nclass CollateFunc(object):\n ''' Collate function for AudioDataset\n '''\n def __init__(self, feat_dim, resample_rate):\n self.feat_dim = feat_dim\n self.resample_rate = resample_rate\n pass\n\n def __call__(self, batch):\n mean_stat = torch.zeros(self.feat_dim)\n var_stat = torch.zeros(self.feat_dim)\n number = 0\n for item in batch:\n value = item[1].strip().split(\",\")\n assert len(value) == 3 or len(value) == 1\n wav_path = value[0]\n sample_rate = torchaudio.backend.sox_io_backend.info(wav_path).sample_rate\n resample_rate = sample_rate\n # len(value) == 3 means segmented wav.scp,\n # len(value) == 1 means original wav.scp\n if len(value) == 3:\n start_frame = int(float(value[1]) * sample_rate)\n end_frame = int(float(value[2]) * sample_rate)\n waveform, sample_rate = torchaudio.backend.sox_io_backend.load(\n filepath=wav_path,\n num_frames=end_frame - start_frame,\n frame_offset=start_frame)\n else:\n waveform, sample_rate = torchaudio.load(item[1])\n\n waveform = waveform * (1 << 15)\n if self.resample_rate != 0 and self.resample_rate != sample_rate:\n resample_rate = self.resample_rate\n waveform = torchaudio.transforms.Resample(\n orig_freq=sample_rate, new_freq=resample_rate)(waveform)\n\n mat = kaldi.fbank(waveform,\n num_mel_bins=self.feat_dim,\n dither=0.0,\n energy_floor=0.0,\n sample_frequency=resample_rate)\n mean_stat += torch.sum(mat, axis=0)\n var_stat += torch.sum(torch.square(mat), axis=0)\n number += mat.shape[0]\n return number, mean_stat, var_stat\n\n\nclass AudioDataset(Dataset):\n def __init__(self, data_file):\n self.items = []\n with codecs.open(data_file, 'r', encoding='utf-8') as f:\n for line in f:\n arr = line.strip().split()\n self.items.append((arr[0], arr[1]))\n\n def __len__(self):\n return len(self.items)\n\n def __getitem__(self, idx):\n return self.items[idx]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='extract CMVN stats')\n parser.add_argument('--num_workers',\n default=0,\n type=int,\n help='num of subprocess workers for processing')\n parser.add_argument('--train_config',\n default='',\n help='training yaml conf')\n parser.add_argument('--in_scp', default=None, help='wav scp file')\n parser.add_argument('--out_cmvn',\n default='global_cmvn',\n help='global cmvn file')\n\n args = parser.parse_args()\n\n with open(args.train_config, 'r') as fin:\n configs = yaml.load(fin, Loader=yaml.FullLoader)\n feat_dim = configs['collate_conf']['feature_extraction_conf']['mel_bins']\n resample_rate = 0\n if 'resample' in configs['collate_conf']['feature_extraction_conf']:\n resample_rate = configs['collate_conf']['feature_extraction_conf']['resample']\n print('using resample and new sample rate is {}'.format(resample_rate))\n\n collate_func = CollateFunc(feat_dim, resample_rate)\n dataset = AudioDataset(args.in_scp)\n batch_size = 20\n data_loader = DataLoader(dataset,\n batch_size=batch_size,\n shuffle=True,\n sampler=None,\n num_workers=args.num_workers,\n collate_fn=collate_func)\n\n with torch.no_grad():\n all_number = 0\n all_mean_stat = torch.zeros(feat_dim)\n all_var_stat = torch.zeros(feat_dim)\n wav_number = 0\n for i, batch in enumerate(data_loader):\n number, mean_stat, var_stat = batch\n all_mean_stat += mean_stat\n all_var_stat += var_stat\n all_number += number\n wav_number += batch_size\n if wav_number % 1000 == 0:\n print(f'processed {wav_number} wavs, {all_number} frames',\n file=sys.stderr,\n flush=True)\n\n cmvn_info = {\n 'mean_stat': list(all_mean_stat.tolist()),\n 'var_stat': list(all_var_stat.tolist()),\n 'frame_num': all_number\n }\n\n with open(args.out_cmvn, 'w') as fout:\n fout.write(json.dumps(cmvn_info))\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2019 Mobvoi Inc. All Rights Reserved.\n# Author: [email protected] (DI WU)\n\"\"\"Positonal Encoding Module.\"\"\"\n\nimport math\nfrom typing import Tuple\n\nimport torch\nfrom wenet.transformer.slice_helper import slice_helper2\n\n\nclass PositionalEncoding(torch.nn.Module):\n \"\"\"Positional encoding.\n\n :param int d_model: embedding dim\n :param float dropout_rate: dropout rate\n :param int max_len: maximum input length\n\n PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))\n PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))\n \"\"\"\n def __init__(self,\n d_model: int,\n dropout_rate: float,\n max_len: int = 5000,\n reverse: bool = False):\n \"\"\"Construct an PositionalEncoding object.\"\"\"\n super().__init__()\n self.d_model = d_model\n self.xscale = math.sqrt(self.d_model)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.max_len = max_len\n\n self.pe = torch.zeros(self.max_len, self.d_model)\n position = torch.arange(0, self.max_len,\n dtype=torch.float32).unsqueeze(1)\n div_term = torch.exp(\n torch.arange(0, self.d_model, 2, dtype=torch.float32) *\n -(math.log(10000.0) / self.d_model))\n self.pe[:, 0::2] = torch.sin(position * div_term)\n self.pe[:, 1::2] = torch.cos(position * div_term)\n self.pe = self.pe.unsqueeze(0)\n\n def forward(self,\n x: torch.Tensor,\n offset: torch.Tensor = torch.tensor(0),\n onnx_mode: bool = False) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Add positional encoding.\n\n Args:\n x (torch.Tensor): Input. Its shape is (batch, time, ...)\n offset (int): position offset\n\n Returns:\n torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)\n torch.Tensor: for compatibility to RelPositionalEncoding\n \"\"\"\n # assert offset + x.size(1) < self.max_len\n self.pe = self.pe.to(x.device)\n pos_emb = self.pe[:, offset:offset + x.size(1)]\n x = x * self.xscale + pos_emb\n return self.dropout(x), self.dropout(pos_emb)\n\n def position_encoding(self, \n offset: torch.Tensor, \n size: torch.Tensor,\n onnx_mode: bool = False,\n ) -> torch.Tensor:\n \"\"\" For getting encoding in a streaming fashion\n\n Attention!!!!!\n we apply dropout only once at the whole utterance level in a none\n streaming way, but will call this function several times with\n increasing input size in a streaming scenario, so the dropout will\n be applied several times.\n\n Args:\n offset (int): start offset\n size (int): requried size of position encoding\n\n Returns:\n torch.Tensor: Corresponding encoding\n \"\"\"\n assert offset + size < self.max_len\n if onnx_mode:\n # pe = self.pe[:, offset:offset + size]\n return slice_helper2(self.pe, offset, offset + size)\n else:\n pe = self.pe[:, offset:offset + size]\n return self.dropout(pe)\n\n\nclass RelPositionalEncoding(PositionalEncoding):\n \"\"\"Relative positional encoding module.\n See : Appendix B in https://arxiv.org/abs/1901.02860\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n \"\"\"\n def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):\n \"\"\"Initialize class.\"\"\"\n super().__init__(d_model, dropout_rate, max_len, reverse=True)\n\n def forward(self,\n x: torch.Tensor,\n offset: torch.Tensor,\n onnx_mode: bool = False) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute positional encoding.\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n torch.Tensor: Positional embedding tensor (1, time, `*`).\n \"\"\"\n # assert offset + x.size(1) < self.max_len\n self.pe = self.pe.to(x.device)\n x = x * self.xscale\n if onnx_mode:\n # end = offset.item() + x.size(1)\n # pos_emb = torch.index_select(self.pe, 1, torch.tensor(range(x.size(1))))\n pos_emb = slice_helper2(self.pe, offset, offset + x.size(1))\n # pos_emb = slice_helper3(pos_emb, x.size(1))\n else:\n pos_emb = self.pe[:, offset:offset + x.size(1)]\n return self.dropout(x), self.dropout(pos_emb)\n\n\nclass NoPositionalEncoding(torch.nn.Module):\n \"\"\" No position encoding\n \"\"\"\n def __init__(self, d_model: int, dropout_rate: float):\n super().__init__()\n self.d_model = d_model\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n\n def forward(self,\n x: torch.Tensor,\n offset: int = 0) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\" Just return zero vector for interface compatibility\n \"\"\"\n pos_emb = torch.zeros(1, x.size(1), self.d_model).to(x.device)\n return self.dropout(x), pos_emb\n\n def position_encoding(self, offset: int, size: int) -> torch.Tensor:\n return torch.zeros(1, size, self.d_model)\n" ]
[ [ "torch.zeros", "torch.sum", "torch.utils.data.DataLoader", "torch.no_grad", "torch.square" ], [ "torch.nn.Dropout", "torch.sin", "torch.zeros", "torch.tensor", "torch.arange", "torch.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gchrupala/visually-grounded-speech
[ "1c4b173f616596c044e827501515e43ecd2516b4" ]
[ "analysis/analyze.py" ]
[ "from __future__ import division\nfrom __future__ import print_function\nimport imaginet.simple_data as sd\nimport imaginet.data_provider as dp\nimport imaginet.vendrov_provider as vendrov\nimport imaginet.experiment as E\nimport imaginet.defn.audiovis_rhn as Speech\nimport imaginet.defn.visual2_rhn as Text\nimport imaginet.task\nimport numpy\nimport sys\nimport argparse\nimport logging\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.preprocessing import normalize\n\ndef main():\n logging.getLogger().setLevel('INFO')\n parser = argparse.ArgumentParser()\n commands = parser.add_subparsers()\n retrievalp = commands.add_parser('retrieval')\n retrievalp.set_defaults(func=retrieval)\n errorsp = commands.add_parser('errors')\n errorsp.set_defaults(func=errors)\n homonymsp = commands.add_parser('homonyms')\n homonymsp.set_defaults(func=homonyms)\n args = parser.parse_args()\n args.func(args)\n\ndef retrieval(args):\n\n print(\"model r@1 r@5 r@10 rank\")\n print(\"flick8k-speech {:.3f} {:.3f} {:.3f} {}\".format(*scores(flickr8k_speech())))\n print(\"flickr8k-text {:.3f} {:.3f} {:.3f} {}\".format(*scores(flickr8k_text())))\n print(\"coco-speech {:.3f} {:.3f} {:.3f} {}\".format(*scores(coco_speech())))\n print(\"coco-text {:.3f} {:.3f} {:.3f} {}\".format(*scores(coco_text())))\n\ndef errors(args):\n import pandas as pd\n import json\n prov = vendrov.getDataProvider(dataset='coco', root='..', audio_kind=None)\n sent = list(prov.iterSentences(split='val'))\n\n def extreme(good, worse):\n ratio = numpy.array(good['ranks']) / numpy.array(worse['ranks'])\n return numpy.argsort(ratio)\n def extreme_stats(good, worse, N=100):\n J = extreme(good, worse)[:N]\n L = [len(sent[j]['tokens']) for j in J]\n R = [good['ranks'][j] / worse['ranks'][j] for j in J]\n return (L, R)\n logging.info(\"Computing scores on validation data\")\n score_w = coco_text(split='val')\n score_s = coco_speech(split='val')\n Lw,Rw = extreme_stats(score_w, score_s)\n Ls,Rs = extreme_stats(score_s, score_w)\n data = pd.DataFrame(dict(Length=numpy.hstack([Lw,Ls]),\n better=numpy.hstack([numpy.repeat(\"text\",100),\n numpy.repeat(\"speech\",100)])))\n logging.info(\"Writing results to error-length.txt\")\n with open(\"error-length.txt\",\"w\") as f:\n f.write(data.to_csv(index=False))\n\ndef homonyms(args):\n\n logging.info(\"Loading data\")\n homonym = [ line.split() for line in open(\"../data/coco/homonym.txt\")]\n prov = vendrov.getDataProvider(dataset='coco', root='..', audio_kind='mfcc')\n sent = list(prov.iterSentences(split='train')) + list(prov.iterSentences(split='val'))\n logging.info(\"Loading model\")\n model = imaginet.task.load(\"../models/coco-speech.zip\")\n def input_mfcc(sent):\n return [ sent_i['audio'].mean(axis=0) for sent_i in sent ]\n def embed(sent):\n return Speech.encode_sentences(model, [ sent_i['audio'] for sent_i in sent ])\n logging.info(\"Testing on I/O layers\")\n with open(\"ambigu-io.txt\", \"w\") as out:\n print(\"word1 word2 io count1 count2 majority acc\", file=out)\n for H in homonym:\n logging.info(\"Testing homonym {}\".format(H))\n r = test_homonym(H, sent, input_mfcc)\n for acc in r['kfold_acc']:\n print(\" \".join(H), \"input\", r['word1_count'], r['word2_count'], r['majority'], acc, file=out)\n r = test_homonym(H, sent, embed)\n for acc in r['kfold_acc']:\n print(\" \".join(H), \"output\", r['word1_count'], r['word2_count'], r['majority'], acc, file=out)\n out.flush()\n logging.info(\"Written results to ambigu-io.txt\")\n logging.info(\"Testing on recurrent layers\")\n with open(\"ambigu-layerwise.txt\", \"w\") as out:\n print(\"word1 word2 layer count1 count2 majority acc\", file=out)\n for H in homonym:\n logging.info(\"Testing homonym {}\".format(H))\n for layer in range(5):\n feat = lambda x: mean_layer(x, model, layer=layer)\n r = test_homonym(H, sent, feat)\n for acc in r['kfold_acc']:\n print(\" \".join(H), layer, r['word1_count'], r['word2_count'], r['majority'], acc, file=out)\n out.flush()\n logging.info(\"Written results to ambigu-layerwise.txt\")\n\ndef matching(sent, word):\n for sent_i in sent:\n if word in sent_i['tokens']:\n yield sent_i\n\ndef test_homonym(H, sent, features, C=1.0):\n X_0 = features(matching(sent, H[0]))\n X_1 = features(matching(sent, H[1]))\n y_0 = numpy.zeros(len(X_0))\n y_1 = numpy.ones(len(X_1))\n X = normalize(numpy.vstack([X_0, X_1]), norm='l2')\n y = numpy.hstack([y_0, y_1])\n classifier = LogisticRegression(C=C)\n fold = StratifiedKFold(y, n_folds=10)\n score = []\n count = []\n for tr, te in fold:\n X_tr, X_te = X[tr], X[te]\n y_tr, y_te = y[tr], y[te]\n classifier.fit(X_tr, y_tr)\n score.append(sum(classifier.predict(X_te) == y_te))\n count.append(len(y_te))\n score = numpy.array(score, dtype='float')\n count = numpy.array(count, dtype='float')\n result = {'word1_count': len(y_0),\n 'word2_count': len(y_1),\n 'majority': 1.0 * max(len(y_0),len(y_1))/len(y),\n 'kfold_acc': score/count }\n return result\n\nCACHE = {}\ndef mean_layer(sent, model, layer=0):\n sent = list(sent)\n if len(CACHE) > 5:\n CACHE.clear()\n key = '\\n'.join([ sent_i['raw'] for sent_i in sent ])\n if key in CACHE:\n return [ datum[:,layer,:].mean(axis=0) for datum in CACHE[key] ]\n else:\n data = Speech.layer_states(model, [ sent_i['audio'] for sent_i in sent ])\n CACHE[key] = data\n result = [ datum[:,layer,:].mean(axis=0) for datum in data ]\n return result\n\n\ndef flickr8k_speech(split='test'):\n batch_size = 32\n prov = dp.getDataProvider('flickr8k', root='..', audio_kind='human.max1K.accel3.ord.mfcc')\n data = sd.SimpleData(prov, min_df=10, scale=False,\n batch_size=batch_size, shuffle=True)\n result = E.evaluate(prov, model_path=\"../models/flickr8k-speech.zip\",\n task=Speech.Visual,\n encode_sentences=Speech.encode_sentences,\n tokenize=audio,\n split=split,\n batch_size=batch_size)\n return result\n\ndef flickr8k_text(split='test'):\n batch_size = 32\n prov = dp.getDataProvider('flickr8k', root='..', audio_kind=None)\n data = sd.SimpleData(prov, min_df=1, scale=False,\n batch_size=batch_size, shuffle=True, tokenize=sd.words,\n val_vocab=True)\n result = E.evaluate(prov, model_path=\"../models/flickr8k-text.zip\",\n task=Text.Visual,\n encode_sentences=Text.encode_sentences,\n tokenize=sd.words,\n split=split,\n batch_size=batch_size)\n return result\n\ndef coco_speech(split='test'):\n batch_size = 32\n prov = vendrov.getDataProvider('coco', root='..', audio_kind='mfcc')\n data = sd.SimpleData(prov, min_df=10, scale=False,\n batch_size=batch_size, shuffle=True)\n result = E.evaluate(prov, model_path=\"../models/coco-speech.zip\",\n task=Speech.Visual,\n encode_sentences=Speech.encode_sentences,\n tokenize=audio,\n split=split,\n batch_size=batch_size)\n return result\n\ndef coco_text(split='test'):\n batch_size = 128\n prov = vendrov.getDataProvider('coco', root='..', audio_kind=None)\n data = sd.SimpleData(prov, min_df=1, scale=False,\n batch_size=batch_size, shuffle=True, tokenize=sd.words,\n val_vocab=True)\n result = E.evaluate(prov, model_path=\"../models/coco-text.zip\",\n task=Text.Visual,\n encode_sentences=Text.encode_sentences,\n tokenize=sd.words,\n split=split,\n batch_size=batch_size)\n return result\n\n\ndef audio(sent):\n return sent['audio']\n\ndef scores(data):\n return (numpy.mean(data['recall'][1]), \\\n numpy.mean(data['recall'][5]),\\\n numpy.mean(data['recall'][10]),\\\n numpy.median(data['ranks']))\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.hstack", "sklearn.cross_validation.StratifiedKFold", "sklearn.linear_model.LogisticRegression", "numpy.median", "numpy.mean", "numpy.argsort", "numpy.repeat", "numpy.array", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mariusud/SqueezeSegV3
[ "893a3efd41e11761f3e83d570192cb8e9d1439b9" ]
[ "src/tasks/semantic/modules/segmentator.py" ]
[ "#!/usr/bin/env python3\n# This file is covered by the LICENSE file in the root of this project.\n\nimport imp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom tasks.semantic.postproc.CRF import CRF\nimport __init__ as booger\n\nclass Segmentator(nn.Module):\n def __init__(self, ARCH, nclasses, path=None, path_append=\"\", strict=False):\n super().__init__()\n self.ARCH = ARCH\n self.nclasses = nclasses\n self.path = path\n self.path_append = path_append\n self.strict = False\n \n bboneModule = imp.load_source(\"bboneModule\",\n booger.TRAIN_PATH + '/backbones/' +\n self.ARCH[\"backbone\"][\"name\"] + '.py')\n self.backbone = bboneModule.Backbone(params=self.ARCH[\"backbone\"])\n\n # do a pass of the backbone to initialize the skip connections\n xyz = torch.zeros((1, 3, \n self.ARCH['dataset']['sensor']['img_prop']['height'],\n self.ARCH['dataset']['sensor']['img_prop']['width']))\n stub = torch.zeros((1,\n self.backbone.get_input_depth(),\n self.ARCH[\"dataset\"][\"sensor\"][\"img_prop\"][\"height\"],\n self.ARCH[\"dataset\"][\"sensor\"][\"img_prop\"][\"width\"]))\n\n if torch.cuda.is_available():\n stub = stub.cuda()\n xyz = xyz.cuda()\n self.backbone.cuda()\n _, stub_skips = self.backbone(stub)\n\n decoderModule = imp.load_source(\"decoderModule\",\n booger.TRAIN_PATH + '/tasks/semantic/decoders/' +\n self.ARCH[\"decoder\"][\"name\"] + '.py')\n self.decoder = decoderModule.Decoder(params=self.ARCH[\"decoder\"],\n stub_skips=stub_skips,\n OS=self.ARCH[\"backbone\"][\"OS\"],\n feature_depth=self.backbone.get_last_depth())\n\n self.head1 = nn.Sequential(nn.Dropout2d(p=ARCH[\"head\"][\"dropout\"]),\n nn.Conv2d(256,\n self.nclasses, kernel_size=1,\n stride=1, padding=0))\n\n self.head2 = nn.Sequential(nn.Dropout2d(p=ARCH[\"head\"][\"dropout\"]),\n nn.Conv2d(256,\n self.nclasses, kernel_size=1,\n stride=1, padding=0))\n\n self.head3 = nn.Sequential(nn.Dropout2d(p=ARCH[\"head\"][\"dropout\"]),\n nn.Conv2d(128,\n self.nclasses, kernel_size=1,\n stride=1, padding=0))\n\n self.head4 = nn.Sequential(nn.Dropout2d(p=ARCH[\"head\"][\"dropout\"]),\n nn.Conv2d(64,\n self.nclasses, kernel_size=1,\n stride=1, padding=0))\n self.head5 = nn.Sequential(nn.Dropout2d(p=ARCH[\"head\"][\"dropout\"]),\n nn.Conv2d(32,\n self.nclasses, kernel_size=3,\n stride=1, padding=1))\n\n\n\n if self.ARCH[\"post\"][\"CRF\"][\"use\"]:\n self.CRF = CRF(self.ARCH[\"post\"][\"CRF\"][\"params\"], self.nclasses)\n else:\n self.CRF = None\n\n # train backbone?\n if not self.ARCH[\"backbone\"][\"train\"]:\n for w in self.backbone.parameters():\n w.requires_grad = False\n\n # train decoder?\n if not self.ARCH[\"decoder\"][\"train\"]:\n for w in self.decoder.parameters():\n w.requires_grad = False\n\n # train head?\n if not self.ARCH[\"head\"][\"train\"]:\n for w in self.head.parameters():\n w.requires_grad = False\n\n # train CRF?\n if self.CRF and not self.ARCH[\"post\"][\"CRF\"][\"train\"]:\n for w in self.CRF.parameters():\n w.requires_grad = False\n\n # print number of parameters and the ones requiring gradients\n # print number of parameters and the ones requiring gradients\n weights_total = sum(p.numel() for p in self.parameters())\n weights_grad = sum(p.numel() for p in self.parameters() if p.requires_grad)\n print(\"Total number of parameters: \", weights_total)\n print(\"Total number of parameters requires_grad: \", weights_grad)\n\n # breakdown by layer\n weights_enc = sum(p.numel() for p in self.backbone.parameters())\n weights_dec = sum(p.numel() for p in self.decoder.parameters())\n weights_head = sum(p.numel() for p in self.head1.parameters())+\\\n sum(p.numel() for p in self.head2.parameters())+\\\n sum(p.numel() for p in self.head3.parameters())+\\\n sum(p.numel() for p in self.head4.parameters())+\\\n sum(p.numel() for p in self.head5.parameters())\n print(\"Param encoder \", weights_enc)\n print(\"Param decoder \", weights_dec)\n print(\"Param head \", weights_head)\n if self.CRF:\n weights_crf = sum(p.numel() for p in self.CRF.parameters())\n print(\"Param CRF \", weights_crf)\n\n # get weights\n if path is not None:\n # try backbone\n try:\n w_dict = torch.load(path + \"/backbone\",\n map_location=lambda storage, loc: storage)\n self.backbone.load_state_dict(w_dict, strict=True)\n print(\"Successfully loaded model backbone weights\")\n except Exception as e:\n print()\n print(\"Couldn't load backbone, using random weights. Error: \", e)\n if strict:\n print(\"I'm in strict mode and failure to load weights blows me up :)\")\n raise e\n\n # try decoder\n try:\n w_dict = torch.load(path + \"/segmentation_decoder\",\n map_location=lambda storage, loc: storage)\n self.decoder.load_state_dict(w_dict, strict=True)\n print(\"Successfully loaded model decoder weights\")\n except Exception as e:\n print(\"Couldn't load decoder, using random weights. Error: \", e)\n if strict:\n print(\"I'm in strict mode and failure to load weights blows me up :)\")\n raise e\n\n # try head\n try:\n print(path_append+'./segmentation_head1')\n w_dict = torch.load(path + \"/segmentation_head1\",\n map_location=lambda storage, loc: storage)\n self.head1.load_state_dict(w_dict, strict=True)\n print(\"Successfully loaded model head weights\")\n except Exception as e:\n print(\"Couldn't load head, using random weights. Error: \", e)\n if strict:\n print(\"I'm in strict mode and failure to load weights blows me up :)\")\n raise e\n try:\n w_dict = torch.load(path+ \"/segmentation_head2\",\n map_location=lambda storage, loc: storage)\n self.head2.load_state_dict(w_dict, strict=True)\n print(\"Successfully loaded model head weights\")\n except Exception as e:\n print(\"Couldn't load head, using random weights. Error: \", e)\n if strict:\n print(\"I'm in strict mode and failure to load weights blows me up :)\")\n raise e\n try:\n w_dict = torch.load(path + \"/segmentation_head3\",\n map_location=lambda storage, loc: storage)\n self.head3.load_state_dict(w_dict, strict=True)\n print(\"Successfully loaded model head weights\")\n except Exception as e:\n print(\"Couldn't load head, using random weights. Error: \", e)\n if strict:\n print(\"I'm in strict mode and failure to load weights blows me up :)\")\n raise e\n\n try:\n w_dict = torch.load(path+ \"/segmentation_head4\",\n map_location=lambda storage, loc: storage)\n self.head4.load_state_dict(w_dict, strict=True)\n print(\"Successfully loaded model head weights\")\n except Exception as e:\n print(\"Couldn't load head, using random weights. Error: \", e)\n if strict:\n print(\"I'm in strict mode and failure to load weights blows me up :)\")\n raise e\n\n try:\n w_dict = torch.load(path + \"/segmentation_head5\",\n map_location=lambda storage, loc: storage)\n self.head5.load_state_dict(w_dict, strict=True)\n print(\"Successfully loaded model head weights\")\n except Exception as e:\n print(\"Couldn't load head, using random weights. Error: \", e)\n if strict:\n print(\"I'm in strict mode and failure to load weights blows me up :)\")\n raise e\n else:\n print(\"No path to pretrained, using random init.\")\n\n def forward(self, x, mask=None):\n\n feature, skips = self.backbone(x)\n\n y = self.decoder(feature, skips)\n \n z1 = self.head5(y[0])\n z1 = F.softmax(z1,dim=1)\n\n z2 = self.head4(y[1])\n z2 = F.softmax(z2,dim=1)\n\n z3 = self.head3(y[2])\n z3 = F.softmax(z3,dim=1)\n\n z4 = self.head2(y[3])\n z4 = F.softmax(z4,dim=1)\n\n z5 = self.head1(y[4])\n z5 = F.softmax(z5,dim=1)\n\n return [z1, z2, z3, z4, z5]\n\n def save_checkpoint(self, logdir, suffix=\"\"):\n # Save the weights\n torch.save(self.backbone.state_dict(), logdir +\n \"/backbone\" + suffix)\n \n torch.save(self.decoder.state_dict(), logdir +\n \"/segmentation_decoder\" + suffix)\n\n torch.save(self.head1.state_dict(),logdir+\"/segmentation_head1\"+suffix)\n torch.save(self.head2.state_dict(),logdir+\"/segmentation_head2\"+suffix)\n torch.save(self.head3.state_dict(),logdir+\"/segmentation_head3\"+suffix)\n torch.save(self.head4.state_dict(),logdir+\"/segmentation_head4\"+suffix)\n torch.save(self.head5.state_dict(),logdir+\"/segmentation_head5\"+suffix)\n\n" ]
[ [ "torch.nn.functional.softmax", "torch.nn.Dropout2d", "torch.zeros", "torch.load", "torch.nn.Conv2d", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HibrahimBoz/FingerprintFeatureExtraction
[ "68554a9e1ca3961c6fea655832905d71e9f9139f" ]
[ "src/ridge_filter.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 22 03:15:03 2016\r\n\r\n@author: utkarsh\r\n\"\"\"\r\n\r\n\r\n# RIDGEFILTER - enhances fingerprint image via oriented filters\r\n#\r\n# Function to enhance fingerprint image via oriented filters\r\n#\r\n# Usage:\r\n# newim = ridgefilter(im, orientim, freqim, kx, ky, showfilter)\r\n#\r\n# Arguments:\r\n# im - Image to be processed.\r\n# orientim - Ridge orientation image, obtained from RIDGEORIENT.\r\n# freqim - Ridge frequency image, obtained from RIDGEFREQ.\r\n# kx, ky - Scale factors specifying the filter sigma relative\r\n# to the wavelength of the filter. This is done so\r\n# that the shapes of the filters are invariant to the\r\n# scale. kx controls the sigma in the x direction\r\n# which is along the filter, and hence controls the\r\n# bandwidth of the filter. ky controls the sigma\r\n# across the filter and hence controls the\r\n# orientational selectivity of the filter. A value of\r\n# 0.5 for both kx and ky is a good starting point.\r\n# showfilter - An optional flag 0/1. When set an image of the\r\n# largest scale filter is displayed for inspection.\r\n# \r\n# Returns:\r\n# newim - The enhanced image\r\n#\r\n# See also: RIDGEORIENT, RIDGEFREQ, RIDGESEGMENT\r\n\r\n# Reference: \r\n# Hong, L., Wan, Y., and Jain, A. K. Fingerprint image enhancement:\r\n# Algorithm and performance evaluation. IEEE Transactions on Pattern\r\n# Analysis and Machine Intelligence 20, 8 (1998), 777 789.\r\n\r\n### REFERENCES\r\n\r\n# Peter Kovesi \r\n# School of Computer Science & Software Engineering\r\n# The University of Western Australia\r\n# pk at csse uwa edu au\r\n# http://www.csse.uwa.edu.au/~pk\r\n\r\n\r\n\r\nimport numpy as np\r\nimport scipy;\r\ndef ridge_filter(im, orient, freq, kx, ky):\r\n angleInc = 3;\r\n im = np.double(im);\r\n rows,cols = im.shape;\r\n newim = np.zeros((rows,cols));\r\n \r\n freq_1d = np.reshape(freq,(1,rows*cols));\r\n ind = np.where(freq_1d>0);\r\n \r\n ind = np.array(ind);\r\n ind = ind[1,:]; \r\n \r\n # Round the array of frequencies to the nearest 0.01 to reduce the\r\n # number of distinct frequencies we have to deal with. \r\n \r\n non_zero_elems_in_freq = freq_1d[0][ind]; \r\n non_zero_elems_in_freq = np.double(np.round((non_zero_elems_in_freq*100)))/100;\r\n \r\n unfreq = np.unique(non_zero_elems_in_freq);\r\n\r\n # Generate filters corresponding to these distinct frequencies and\r\n # orientations in 'angleInc' increments.\r\n \r\n sigmax = 1/unfreq[0]*kx;\r\n sigmay = 1/unfreq[0]*ky;\r\n \r\n sze = np.round(3*np.max([sigmax,sigmay]));\r\n \r\n x,y = np.meshgrid(np.linspace(-sze,sze,(2*sze + 1)),np.linspace(-sze,sze,(2*sze + 1)));\r\n \r\n reffilter = np.exp(-(( (np.power(x,2))/(sigmax*sigmax) + (np.power(y,2))/(sigmay*sigmay)))) * np.cos(2*np.pi*unfreq[0]*x); # this is the original gabor filter\r\n \r\n filt_rows, filt_cols = reffilter.shape; \r\n \r\n gabor_filter = np.array(np.zeros(((int)(180/angleInc),filt_rows,filt_cols)));\r\n \r\n for o in range(0,(int)(180/angleInc)):\r\n \r\n # Generate rotated versions of the filter. Note orientation\r\n # image provides orientation *along* the ridges, hence +90\r\n # degrees, and imrotate requires angles +ve anticlockwise, hence\r\n # the minus sign. \r\n \r\n rot_filt = scipy.ndimage.rotate(reffilter,-(o*angleInc + 90),reshape = False);\r\n gabor_filter[o] = rot_filt;\r\n \r\n # Find indices of matrix points greater than maxsze from the image\r\n # boundary\r\n \r\n maxsze = int(sze); \r\n\r\n temp = freq>0; \r\n validr,validc = np.where(temp) \r\n \r\n temp1 = validr>maxsze;\r\n temp2 = validr<rows - maxsze;\r\n temp3 = validc>maxsze;\r\n temp4 = validc<cols - maxsze;\r\n \r\n final_temp = temp1 & temp2 & temp3 & temp4; \r\n \r\n finalind = np.where(final_temp);\r\n \r\n # Convert orientation matrix values from radians to an index value\r\n # that corresponds to round(degrees/angleInc) \r\n \r\n maxorientindex = np.round(180/angleInc);\r\n orientindex = np.round(orient/np.pi*180/angleInc);\r\n \r\n #do the filtering \r\n \r\n for i in range(0,rows):\r\n for j in range(0,cols):\r\n if(orientindex[i][j] < 1):\r\n orientindex[i][j] = orientindex[i][j] + maxorientindex;\r\n if(orientindex[i][j] > maxorientindex):\r\n orientindex[i][j] = orientindex[i][j] - maxorientindex;\r\n finalind_rows,finalind_cols = np.shape(finalind);\r\n sze = int(sze);\r\n for k in range(0,finalind_cols):\r\n r = validr[finalind[0][k]];\r\n c = validc[finalind[0][k]];\r\n \r\n img_block = im[r-sze:r+sze + 1][:,c-sze:c+sze + 1];\r\n \r\n newim[r][c] = np.sum(img_block * gabor_filter[int(orientindex[r][c]) - 1]);\r\n \r\n return(newim); " ]
[ [ "numpy.linspace", "numpy.unique", "numpy.reshape", "numpy.power", "numpy.cos", "scipy.ndimage.rotate", "numpy.round", "numpy.max", "numpy.shape", "numpy.array", "numpy.where", "numpy.double", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
vishalbelsare/skpro
[ "05f0df076db777946f317117d6cc66f2dd54259b", "05f0df076db777946f317117d6cc66f2dd54259b" ]
[ "examples/workflow.py", "skpro/ensemble.py" ]
[ "from sklearn.linear_model import LinearRegression\n\nfrom skpro.workflow.table import Table\nfrom skpro.metrics import log_loss\nfrom skpro.workflow import Model\nfrom skpro.workflow.manager import DataManager\nfrom skpro.workflow.cross_validation import grid_optimizer\nfrom skpro.parametric import ParametricEstimator\nfrom skpro.parametric.estimators import Constant\nfrom skpro.baselines import DensityBaseline\n\ntbl = Table()\n\n# Loads and represents the data\ndata = DataManager('boston')\n\n# Adds a model information column\ntbl.info()\n# Defines the cross validation using the log_loss metric and grid hyperparameter search\ntbl.cv(data, log_loss, tune=True, optimizer=grid_optimizer(n_jobs=-1, verbose=0))\n\n# Run the models against the workflow and print the results\ntbl.print([\n # Baseline ...\n Model(\n DensityBaseline()\n ),\n # ... and parametric composite model\n Model(\n ParametricEstimator(\n LinearRegression(),\n Constant('std(y)')\n ),\n # ... which hyperparameter shall be optimized\n tuning={'point__normalize': [True, False]},\n )\n])", "import numpy as np\n\nfrom sklearn.ensemble import BaggingRegressor as BaseBaggingRegressor\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.utils import check_array\n\nfrom .base import ProbabilisticEstimator\n\n\nclass BaggingRegressor(BaseBaggingRegressor, ProbabilisticEstimator):\n\n class Distribution(ProbabilisticEstimator.Distribution):\n\n def __init__(self, estimator, X, distributions, n_estimators):\n super().__init__(estimator, X)\n self.distributions = distributions\n self.n_estimators = n_estimators\n\n def point(self):\n return NotImplemented\n\n def std(self):\n return NotImplemented\n\n def pdf(self, x):\n # Average the predicted PDFs\n arr = np.array([\n d.pdf(x)\n for distribution in self.distributions\n for d in distribution\n ])\n\n return np.mean(arr, axis=0)\n\n def predict(self, X):\n \"\"\" Predict regression target for X.\n\n The predicted regression target of an input sample is computed as the\n averaged predicted distributions of the estimators in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape = [n_samples, n_features]\n The training input samples. Sparse matrices are accepted only if\n they are supported by the base estimator.\n\n Returns\n -------\n y : skpro.base.Distribution = [n_samples]\n The predicted bagged distributions.\n \"\"\"\n\n # Ensure estimator were being fitted\n check_is_fitted(self, \"estimators_features_\")\n # Check data\n X = check_array(X, accept_sparse=['csr', 'csc'])\n\n # Parallel loop\n from sklearn.ensemble.base import _partition_estimators\n n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,\n self.n_jobs)\n\n def _parallel_predict_regression(estimators, estimators_features, X):\n \"\"\" Private function used to compute predictions within a job. \"\"\"\n return [\n estimator.predict(X[:, features])\n for estimator, features in zip(estimators, estimators_features)\n ]\n\n # Obtain predictions\n all_y_hat = [\n _parallel_predict_regression(\n self.estimators_[starts[i]:starts[i + 1]],\n self.estimators_features_[starts[i]:starts[i + 1]],\n X\n ) for i in range(n_jobs)\n ]\n\n # Reduce\n return self._distribution()(self, X, all_y_hat, n_estimators)\n\n def __str__(self, describer=str):\n return 'BaggingRegressor(' + describer(self.base_estimator) + ')'\n\n def __repr__(self):\n return self.__str__(repr)\n" ]
[ [ "sklearn.linear_model.LinearRegression" ], [ "sklearn.utils.check_array", "sklearn.utils.validation.check_is_fitted", "numpy.mean", "sklearn.ensemble.base._partition_estimators" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YasMME/habitat-lab
[ "d573e16d6e3d56e6fef4dcde142fa93c63f8469b" ]
[ "habitat/sims/habitat_simulator/habitat_simulator.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n List,\n Optional,\n Sequence,\n Set,\n Union,\n cast,\n)\n\nimport numpy as np\nfrom gym import spaces\nfrom gym.spaces.box import Box\nfrom numpy import ndarray\n\nif TYPE_CHECKING:\n from torch import Tensor\n\nimport habitat_sim\nfrom habitat.core.dataset import Episode\nfrom habitat.core.registry import registry\nfrom habitat.core.simulator import (\n AgentState,\n Config,\n DepthSensor,\n Observations,\n RGBSensor,\n SemanticSensor,\n Sensor,\n SensorSuite,\n ShortestPathPoint,\n Simulator,\n VisualObservation,\n)\nfrom habitat.core.spaces import Space\n\nRGBSENSOR_DIMENSION = 3\n\n\ndef overwrite_config(\n config_from: Config, config_to: Any, ignore_keys: Optional[Set[str]] = None\n) -> None:\n r\"\"\"Takes Habitat Lab config and Habitat-Sim config structures. Overwrites\n Habitat-Sim config with Habitat Lab values, where a field name is present\n in lowercase. Mostly used to avoid :ref:`sim_cfg.field = hapi_cfg.FIELD`\n code.\n Args:\n config_from: Habitat Lab config node.\n config_to: Habitat-Sim config structure.\n ignore_keys: Optional set of keys to ignore in config_to\n \"\"\"\n\n def if_config_to_lower(config):\n if isinstance(config, Config):\n return {key.lower(): val for key, val in config.items()}\n else:\n return config\n\n for attr, value in config_from.items():\n low_attr = attr.lower()\n if ignore_keys is None or low_attr not in ignore_keys:\n if hasattr(config_to, low_attr):\n setattr(config_to, low_attr, if_config_to_lower(value))\n else:\n raise NameError(\n f\"\"\"{low_attr} is not found on habitat_sim but is found on habitat_lab config.\n It's also not in the list of keys to ignore: {ignore_keys}\n Did you make a typo in the config?\n If not the version of Habitat Sim may not be compatible with Habitat Lab version: {config_from}\n \"\"\"\n )\n\n\[email protected]_sensor\nclass HabitatSimRGBSensor(RGBSensor):\n sim_sensor_type: habitat_sim.SensorType\n sim_sensor_subtype: habitat_sim.SensorSubType\n\n def __init__(self, config: Config) -> None:\n self.sim_sensor_type = habitat_sim.SensorType.COLOR\n self.sim_sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n super().__init__(config=config)\n\n def _get_observation_space(self, *args: Any, **kwargs: Any) -> Box:\n return spaces.Box(\n low=0,\n high=255,\n shape=(self.config.HEIGHT, self.config.WIDTH, RGBSENSOR_DIMENSION),\n dtype=np.uint8,\n )\n\n def get_observation(\n self, sim_obs: Dict[str, Union[ndarray, bool, \"Tensor\"]]\n ) -> VisualObservation:\n obs = cast(Optional[VisualObservation], sim_obs.get(self.uuid, None))\n check_sim_obs(obs, self)\n\n # remove alpha channel\n obs = obs[:, :, :RGBSENSOR_DIMENSION] # type: ignore[index]\n return obs\n\n\[email protected]_sensor\nclass HabitatSimDepthSensor(DepthSensor):\n sim_sensor_type: habitat_sim.SensorType\n sim_sensor_subtype: habitat_sim.SensorSubType\n min_depth_value: float\n max_depth_value: float\n\n def __init__(self, config: Config) -> None:\n self.sim_sensor_type = habitat_sim.SensorType.DEPTH\n self.sim_sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n\n if config.NORMALIZE_DEPTH:\n self.min_depth_value = 0\n self.max_depth_value = 1\n else:\n self.min_depth_value = config.MIN_DEPTH\n self.max_depth_value = config.MAX_DEPTH\n\n super().__init__(config=config)\n\n def _get_observation_space(self, *args: Any, **kwargs: Any) -> Box:\n return spaces.Box(\n low=self.min_depth_value,\n high=self.max_depth_value,\n shape=(self.config.HEIGHT, self.config.WIDTH, 1),\n dtype=np.float32,\n )\n\n def get_observation(\n self, sim_obs: Dict[str, Union[ndarray, bool, \"Tensor\"]]\n ) -> VisualObservation:\n obs = cast(Optional[VisualObservation], sim_obs.get(self.uuid, None))\n check_sim_obs(obs, self)\n if isinstance(obs, np.ndarray):\n obs = np.clip(obs, self.config.MIN_DEPTH, self.config.MAX_DEPTH)\n\n obs = np.expand_dims(\n obs, axis=2\n ) # make depth observation a 3D array\n else:\n obs = obs.clamp(self.config.MIN_DEPTH, self.config.MAX_DEPTH) # type: ignore[attr-defined]\n\n obs = obs.unsqueeze(-1) # type: ignore[attr-defined]\n\n if self.config.NORMALIZE_DEPTH:\n # normalize depth observation to [0, 1]\n obs = (obs - self.config.MIN_DEPTH) / (\n self.config.MAX_DEPTH - self.config.MIN_DEPTH\n )\n\n return obs\n\n\[email protected]_sensor\nclass HabitatSimSemanticSensor(SemanticSensor):\n sim_sensor_type: habitat_sim.SensorType\n sim_sensor_subtype: habitat_sim.SensorSubType\n\n def __init__(self, config):\n self.sim_sensor_type = habitat_sim.SensorType.SEMANTIC\n self.sim_sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n super().__init__(config=config)\n\n def _get_observation_space(self, *args: Any, **kwargs: Any):\n return spaces.Box(\n low=np.iinfo(np.uint32).min,\n high=np.iinfo(np.uint32).max,\n shape=(self.config.HEIGHT, self.config.WIDTH),\n dtype=np.uint32,\n )\n\n def get_observation(\n self, sim_obs: Dict[str, Union[ndarray, bool, \"Tensor\"]]\n ) -> VisualObservation:\n obs = cast(Optional[VisualObservation], sim_obs.get(self.uuid, None))\n check_sim_obs(obs, self)\n return obs\n\n\ndef check_sim_obs(obs: ndarray, sensor: Sensor) -> None:\n assert obs is not None, (\n \"Observation corresponding to {} not present in \"\n \"simulator's observations\".format(sensor.uuid)\n )\n\n\nHabitatSimVizSensors = Union[\n HabitatSimRGBSensor, HabitatSimDepthSensor, HabitatSimSemanticSensor\n]\n\n\[email protected]_simulator(name=\"Sim-v0\")\nclass HabitatSim(habitat_sim.Simulator, Simulator):\n r\"\"\"Simulator wrapper over habitat-sim\n\n habitat-sim repo: https://github.com/facebookresearch/habitat-sim\n\n Args:\n config: configuration for initializing the simulator.\n \"\"\"\n\n def __init__(self, config: Config) -> None:\n self.habitat_config = config\n agent_config = self._get_agent_config()\n\n sim_sensors = []\n for sensor_name in agent_config.SENSORS:\n sensor_cfg = getattr(self.habitat_config, sensor_name)\n sensor_type = registry.get_sensor(sensor_cfg.TYPE)\n\n assert sensor_type is not None, \"invalid sensor type {}\".format(\n sensor_cfg.TYPE\n )\n sim_sensors.append(sensor_type(sensor_cfg))\n\n self._sensor_suite = SensorSuite(sim_sensors)\n self.sim_config = self.create_sim_config(self._sensor_suite)\n self._current_scene = self.sim_config.sim_cfg.scene_id\n super().__init__(self.sim_config)\n self._action_space = spaces.Discrete(\n len(self.sim_config.agents[0].action_space)\n )\n self._prev_sim_obs: Optional[Observations] = None\n\n def create_sim_config(\n self, _sensor_suite: SensorSuite\n ) -> habitat_sim.Configuration:\n sim_config = habitat_sim.SimulatorConfiguration()\n # Check if Habitat-Sim is post Scene Config Update\n if not hasattr(sim_config, \"scene_id\"):\n raise RuntimeError(\n \"Incompatible version of Habitat-Sim detected, please upgrade habitat_sim\"\n )\n overwrite_config(\n config_from=self.habitat_config.HABITAT_SIM_V0,\n config_to=sim_config,\n # Ignore key as it gets propogated to sensor below\n ignore_keys={\"gpu_gpu\"},\n )\n sim_config.scene_id = self.habitat_config.SCENE\n agent_config = habitat_sim.AgentConfiguration()\n overwrite_config(\n config_from=self._get_agent_config(),\n config_to=agent_config,\n # These keys are only used by Hab-Lab\n ignore_keys={\n \"is_set_start_state\",\n # This is the Sensor Config. Unpacked below\n \"sensors\",\n \"start_position\",\n \"start_rotation\",\n },\n )\n\n sensor_specifications = []\n VisualSensorTypeSet = {\n habitat_sim.SensorType.COLOR,\n habitat_sim.SensorType.DEPTH,\n habitat_sim.SensorType.SEMANTIC,\n }\n CameraSensorSubTypeSet = {\n habitat_sim.SensorSubType.PINHOLE,\n habitat_sim.SensorSubType.ORTHOGRAPHIC,\n }\n for sensor in _sensor_suite.sensors.values():\n\n # Check if type VisualSensorSpec, we know that Sensor is one of HabitatSimRGBSensor, HabitatSimDepthSensor, HabitatSimSemanticSensor\n if (\n getattr(sensor, \"sim_sensor_type\", [])\n not in VisualSensorTypeSet\n ):\n raise ValueError(\n f\"\"\"{getattr(sensor, \"sim_sensor_type\", [])} is an illegal sensorType that is not implemented yet\"\"\"\n )\n # Check if type CameraSensorSpec\n if (\n getattr(sensor, \"sim_sensor_subtype\", [])\n not in CameraSensorSubTypeSet\n ):\n raise ValueError(\n f\"\"\"{getattr(sensor, \"sim_sensor_subtype\", [])} is an illegal sensorSubType for a VisualSensor\"\"\"\n )\n\n # TODO: Implement checks for other types of SensorSpecs\n\n sim_sensor_cfg = habitat_sim.CameraSensorSpec()\n # TODO Handle configs for custom VisualSensors that might need\n # their own ignore_keys. Maybe with special key / checking\n # SensorType\n overwrite_config(\n config_from=sensor.config,\n config_to=sim_sensor_cfg,\n # These keys are only used by Hab-Lab\n # or translated into the sensor config manually\n ignore_keys={\n \"height\",\n \"hfov\",\n \"max_depth\",\n \"min_depth\",\n \"normalize_depth\",\n \"type\",\n \"width\",\n },\n )\n sim_sensor_cfg.uuid = sensor.uuid\n sim_sensor_cfg.resolution = list(\n sensor.observation_space.shape[:2]\n )\n\n # TODO(maksymets): Add configure method to Sensor API to avoid\n # accessing child attributes through parent interface\n # We know that the Sensor has to be one of these Sensors\n sensor = cast(HabitatSimVizSensors, sensor)\n sim_sensor_cfg.sensor_type = sensor.sim_sensor_type\n sim_sensor_cfg.sensor_subtype = sensor.sim_sensor_subtype\n sim_sensor_cfg.gpu2gpu_transfer = (\n self.habitat_config.HABITAT_SIM_V0.GPU_GPU\n )\n sensor_specifications.append(sim_sensor_cfg)\n\n agent_config.sensor_specifications = sensor_specifications\n agent_config.action_space = registry.get_action_space_configuration(\n self.habitat_config.ACTION_SPACE_CONFIG\n )(self.habitat_config).get()\n\n return habitat_sim.Configuration(sim_config, [agent_config])\n\n @property\n def sensor_suite(self) -> SensorSuite:\n return self._sensor_suite\n\n @property\n def action_space(self) -> Space:\n return self._action_space\n\n def _update_agents_state(self) -> bool:\n is_updated = False\n for agent_id, _ in enumerate(self.habitat_config.AGENTS):\n agent_cfg = self._get_agent_config(agent_id)\n if agent_cfg.IS_SET_START_STATE:\n self.set_agent_state(\n agent_cfg.START_POSITION,\n agent_cfg.START_ROTATION,\n agent_id,\n )\n is_updated = True\n\n return is_updated\n\n def reset(self) -> Observations:\n sim_obs = super().reset()\n if self._update_agents_state():\n sim_obs = self.get_sensor_observations()\n\n self._prev_sim_obs = sim_obs\n return self._sensor_suite.get_observations(sim_obs)\n\n def step(self, action: Union[str, int]) -> Observations:\n sim_obs = super().step(action)\n self._prev_sim_obs = sim_obs\n observations = self._sensor_suite.get_observations(sim_obs)\n return observations\n\n def render(self, mode: str = \"rgb\") -> Any:\n r\"\"\"\n Args:\n mode: sensor whose observation is used for returning the frame,\n eg: \"rgb\", \"depth\", \"semantic\"\n\n Returns:\n rendered frame according to the mode\n \"\"\"\n sim_obs = self.get_sensor_observations()\n observations = self._sensor_suite.get_observations(sim_obs)\n\n output = observations.get(mode)\n assert output is not None, \"mode {} sensor is not active\".format(mode)\n if not isinstance(output, np.ndarray):\n # If it is not a numpy array, it is a torch tensor\n # The function expects the result to be a numpy array\n output = output.to(\"cpu\").numpy()\n\n return output\n\n def reconfigure(self, habitat_config: Config) -> None:\n # TODO(maksymets): Switch to Habitat-Sim more efficient caching\n is_same_scene = habitat_config.SCENE == self._current_scene\n self.habitat_config = habitat_config\n self.sim_config = self.create_sim_config(self._sensor_suite)\n if not is_same_scene:\n self._current_scene = habitat_config.SCENE\n self.close()\n super().reconfigure(self.sim_config)\n\n self._update_agents_state()\n\n def geodesic_distance(\n self,\n position_a: Union[Sequence[float], ndarray],\n position_b: Union[Sequence[float], Sequence[Sequence[float]]],\n episode: Optional[Episode] = None,\n ) -> float:\n if episode is None or episode._shortest_path_cache is None:\n path = habitat_sim.MultiGoalShortestPath()\n if isinstance(position_b[0], (Sequence, np.ndarray)):\n path.requested_ends = np.array(position_b, dtype=np.float32)\n else:\n path.requested_ends = np.array(\n [np.array(position_b, dtype=np.float32)]\n )\n else:\n path = episode._shortest_path_cache\n\n path.requested_start = np.array(position_a, dtype=np.float32)\n\n self.pathfinder.find_path(path)\n\n if episode is not None:\n episode._shortest_path_cache = path\n\n return path.geodesic_distance\n\n def action_space_shortest_path(\n self,\n source: AgentState,\n targets: Sequence[AgentState],\n agent_id: int = 0,\n ) -> List[ShortestPathPoint]:\n r\"\"\"\n Returns:\n List of agent states and actions along the shortest path from\n source to the nearest target (both included). If one of the\n target(s) is identical to the source, a list containing only\n one node with the identical agent state is returned. Returns\n an empty list in case none of the targets are reachable from\n the source. For the last item in the returned list the action\n will be None.\n \"\"\"\n raise NotImplementedError(\n \"This function is no longer implemented. Please use the greedy \"\n \"follower instead\"\n )\n\n @property\n def up_vector(self) -> np.ndarray:\n return np.array([0.0, 1.0, 0.0])\n\n @property\n def forward_vector(self) -> np.ndarray:\n return -np.array([0.0, 0.0, 1.0])\n\n def get_straight_shortest_path_points(self, position_a, position_b):\n path = habitat_sim.ShortestPath()\n path.requested_start = position_a\n path.requested_end = position_b\n self.pathfinder.find_path(path)\n return path.points\n\n def sample_navigable_point(self) -> List[float]:\n return self.pathfinder.get_random_navigable_point().tolist()\n\n def is_navigable(self, point: List[float]) -> bool:\n return self.pathfinder.is_navigable(point)\n\n def semantic_annotations(self):\n r\"\"\"\n Returns:\n SemanticScene which is a three level hierarchy of semantic\n annotations for the current scene. Specifically this method\n returns a SemanticScene which contains a list of SemanticLevel's\n where each SemanticLevel contains a list of SemanticRegion's where\n each SemanticRegion contains a list of SemanticObject's.\n\n SemanticScene has attributes: aabb(axis-aligned bounding box) which\n has attributes aabb.center and aabb.sizes which are 3d vectors,\n categories, levels, objects, regions.\n\n SemanticLevel has attributes: id, aabb, objects and regions.\n\n SemanticRegion has attributes: id, level, aabb, category (to get\n name of category use category.name()) and objects.\n\n SemanticObject has attributes: id, region, aabb, obb (oriented\n bounding box) and category.\n\n SemanticScene contains List[SemanticLevels]\n SemanticLevel contains List[SemanticRegion]\n SemanticRegion contains List[SemanticObject]\n\n Example to loop through in a hierarchical fashion:\n for level in semantic_scene.levels:\n for region in level.regions:\n for obj in region.objects:\n \"\"\"\n return self.semantic_scene\n\n def _get_agent_config(self, agent_id: Optional[int] = None) -> Any:\n if agent_id is None:\n agent_id = self.habitat_config.DEFAULT_AGENT_ID\n agent_name = self.habitat_config.AGENTS[agent_id]\n agent_config = getattr(self.habitat_config, agent_name)\n return agent_config\n\n def get_agent_state(self, agent_id: int = 0) -> habitat_sim.AgentState:\n assert agent_id == 0, \"No support of multi agent in {} yet.\".format(\n self.__class__.__name__\n )\n return self.get_agent(agent_id).get_state()\n\n def set_agent_state(\n self,\n position: List[float],\n rotation: List[float],\n agent_id: int = 0,\n reset_sensors: bool = True,\n ) -> bool:\n r\"\"\"Sets agent state similar to initialize_agent, but without agents\n creation. On failure to place the agent in the proper position, it is\n moved back to its previous pose.\n\n Args:\n position: list containing 3 entries for (x, y, z).\n rotation: list with 4 entries for (x, y, z, w) elements of unit\n quaternion (versor) representing agent 3D orientation,\n (https://en.wikipedia.org/wiki/Versor)\n agent_id: int identification of agent from multiagent setup.\n reset_sensors: bool for if sensor changes (e.g. tilt) should be\n reset).\n\n Returns:\n True if the set was successful else moves the agent back to its\n original pose and returns false.\n \"\"\"\n agent = self.get_agent(agent_id)\n new_state = self.get_agent_state(agent_id)\n new_state.position = position\n new_state.rotation = rotation\n\n # NB: The agent state also contains the sensor states in _absolute_\n # coordinates. In order to set the agent's body to a specific\n # location and have the sensors follow, we must not provide any\n # state for the sensors. This will cause them to follow the agent's\n # body\n new_state.sensor_states = {}\n agent.set_state(new_state, reset_sensors)\n return True\n\n def get_observations_at(\n self,\n position: Optional[List[float]] = None,\n rotation: Optional[List[float]] = None,\n keep_agent_at_new_pose: bool = False,\n ) -> Optional[Observations]:\n current_state = self.get_agent_state()\n if position is None or rotation is None:\n success = True\n else:\n success = self.set_agent_state(\n position, rotation, reset_sensors=False\n )\n\n if success:\n sim_obs = self.get_sensor_observations()\n\n self._prev_sim_obs = sim_obs\n\n observations = self._sensor_suite.get_observations(sim_obs)\n if not keep_agent_at_new_pose:\n self.set_agent_state(\n current_state.position,\n current_state.rotation,\n reset_sensors=False,\n )\n return observations\n else:\n return None\n\n def distance_to_closest_obstacle(\n self, position: ndarray, max_search_radius: float = 2.0\n ) -> float:\n return self.pathfinder.distance_to_closest_obstacle(\n position, max_search_radius\n )\n\n def island_radius(self, position: Sequence[float]) -> float:\n return self.pathfinder.island_radius(position)\n\n @property\n def previous_step_collided(self):\n r\"\"\"Whether or not the previous step resulted in a collision\n\n Returns:\n bool: True if the previous step resulted in a collision, false otherwise\n\n Warning:\n This feild is only updated when :meth:`step`, :meth:`reset`, or :meth:`get_observations_at` are\n called. It does not update when the agent is moved to a new loction. Furthermore, it\n will _always_ be false after :meth:`reset` or :meth:`get_observations_at` as neither of those\n result in an action (step) being taken.\n \"\"\"\n return self._prev_sim_obs.get(\"collided\", False)\n" ]
[ [ "numpy.array", "numpy.expand_dims", "numpy.iinfo", "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shijiansu/coursera-applied-data-science-with-python
[ "a0f2bbd0b9201805f26d18b73a25183cf0b3a0e9", "a0f2bbd0b9201805f26d18b73a25183cf0b3a0e9" ]
[ "2_applied_data_representation/w3_charting_fundamentals/1_chart/4_heatmap.py", "1_introduction/w2_pandas/3_assignment_pandas/1_olympic_game.py" ]
[ "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 数据\nY = np.random.normal(loc=0.0, scale=1.0, size=10000)\nX = np.random.random(size=10000)\n\n# 例子 1:\nplt.figure()\n_ = plt.hist2d(X, Y, bins=25)\nplt.show()\n\n# 例子 2:\nplt.figure()\n_ = plt.hist2d(X, Y, bins=100)\n# add a colorbar legend\nplt.colorbar()\nplt.show()\n", "# -*- coding: UTF-8 -*-\nfrom __future__ import division\nimport pandas as pd\n\n# The following code loads the olympics dataset (olympics.csv), which was derrived from the Wikipedia entry\n# on All Time Olympic Games Medals, and does some basic data cleaning.\n# The columns are organized as # of Summer games, Summer medals, # of Winter games, Winter medals,\n# total # number of games, total # of medals. Use this dataset to answer the questions below.\ndf = pd.read_csv('../data/olympics.csv', index_col=0, skiprows=1)\nfor col in df.columns:\n if col[:2] == '01':\n df.rename(columns={col: 'Gold' + col[4:]}, inplace=True)\n if col[:2] == '02':\n df.rename(columns={col: 'Silver' + col[4:]}, inplace=True)\n if col[:2] == '03':\n df.rename(columns={col: 'Bronze' + col[4:]}, inplace=True)\n if col[:1] == '№': # 虽然我在本地修改了数据文件, 但是Coursera服务器上仍是这个, 所以保留\n df.rename(columns={col: '#' + col[1:]}, inplace=True)\n\nnames_ids = df.index.str.split('(') # split the index by '('\n\ndf.index = names_ids.str[0] # the [0] element is the country name (new index)\ndf['ID'] = names_ids.str[1].str[:3] # the [1] element is the abbreviation or ID (take first 3 characters from that)\n\ndf = df.drop('Totals')\nprint(df.head(1))\n# # Summer Gold Silver Bronze Total # Winter Gold.1 \\\n# Afghanistan  13 0 0 2 2 0 0\n# Silver.1 Bronze.1 Total.1 # Games Gold.2 Silver.2 \\\n# Afghanistan  0 0 0 13 0 0\n# Bronze.2 Combined total ID\n# Afghanistan  2 2 AFG\n\n\n# Question 0 (Example)\n# ----------------------------------------\n# Quiz Question: What is the first country in df?\n# This function should return a Series.\ndef answer_zero():\n return df.iloc[0]\n\n\nprint('\\nQuestion 0')\nprint(answer_zero())\n\n\n# # Summer 13\n# Gold 0\n# Silver 0\n# Bronze 2\n# Total 2\n# # Winter 0\n# Gold.1 0\n# Silver.1 0\n# Bronze.1 0\n# Total.1 0\n# # Games 13\n# Gold.2 0\n# Silver.2 0\n# Bronze.2 2\n# Combined total 2\n# ID AFG\n# Name: Afghanistan, dtype: object\n\n\n# Question 1\n# ----------------------------------------\n# Quiz Question: Which country has won the most gold medals in summer games?\n# This function should return a single string value.\ndef answer_one1():\n # Here the index has the country name\n # Only one returns\n return df[df['Gold'] == df['Gold'].max()].index.index[0]\n\n\ndef answer_one2():\n # Here the index has the country name\n return df.sort_index(axis=0, by='Gold', ascending=False).index[0]\n\n\ndef answer_one():\n # Directly return index name\n return df['Gold'].argmax()\n\n\nprint('\\nQuestion 1')\nprint(answer_one()) # United States\n\n\n# Question 2\n# ----------------------------------------\n# Quiz Question: Which country had the biggest difference between their summer and winter gold medal counts?\n# This function should return a single string value.\ndef answer_two2():\n biggest_diff = abs(df['Gold'] - df['Gold.1']).max()\n # Here the index has the country name\n return df[abs(df['Gold'] - df['Gold.1']) == biggest_diff].index[0]\n\n\ndef answer_two():\n df['Gold_Diff'] = abs(df['Gold'] - df['Gold.1'])\n return df['Gold_Diff'].argmax()\n\n\nprint('\\nQuestion 2')\nprint(answer_two()) # United States\n\n\n# Question 3\n# ----------------------------------------\n# Quiz Question: Which country has the biggest difference between their summer gold medal\n# counts and winter gold medal counts relative to their total gold medal count?\n# Summer Gold−Winter GoldTotal Gold\n# Only include countries that have won at least 1 gold in both summer and winter.\n# This function should return a single string value.\ndef answer_three():\n df2 = pd.DataFrame()\n df2['Gold'] = df['Gold']\n df2['Gold.1'] = df['Gold.1']\n df2 = df2[(df2['Gold'] > 0) & (df2['Gold.1'] > 0)]\n df2['Gold_diff_r'] = (df['Gold'] - df['Gold.1']) / df['Gold.2']\n return df2['Gold_diff_r'].argmax()\n\n\nprint('\\nQuestion 3')\nprint(answer_three()) # Bulgaria\n\n\n# Question 4\n# ----------------------------------------\n# Quiz Question: Write a function to update the dataframe to include a new column called \"Points\"\n# which is a weighted value where each gold medal counts for 3 points, silver medals for 2 points,\n# and bronze mdeals for 1 point. The function should return only the column (a Series object) which you created.\n# This function should return a Series named Points of length 146\ndef answer_four():\n point = df['Gold.2'] * 3 + df['Silver.2'] * 2 + df['Bronze.2']\n return point\n\n\nprint('\\nQuestion 4')\nprint(len(answer_four())) # 146\n" ]
[ [ "numpy.random.random", "matplotlib.pyplot.hist2d", "matplotlib.pyplot.colorbar", "numpy.random.normal", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
hcherkaoui/pyta
[ "1f4a272abf4a6971e75c552eda791409608d5fc5" ]
[ "pyta/optim.py" ]
[ "\"\"\" This module gathers optimization functions.\"\"\"\n# Authors: Hamza Cherkaoui <[email protected]>\n# License: BSD (3-clause)\n\nimport time\nimport warnings\nimport numpy as np\nfrom scipy.optimize.linesearch import line_search_armijo\n\n\ndef fista(grad, obj, prox, x0, momentum=True, max_iter=100, step_size=None,\n early_stopping=True, eps=np.finfo(np.float32).eps, times=False,\n debug=False, verbose=0, name=\"Optimization\"):\n \"\"\" F/ISTA algorithm. \"\"\"\n if verbose and not debug:\n warnings.warn(\"Can't have verbose if cost-func is not computed, \"\n \"enable it by setting debug=True\")\n\n adaptive_step_size = False\n if step_size is None:\n adaptive_step_size = True\n step_size = 1.0\n\n # prepare the iterate\n t = t_old = 1\n z_old = np.zeros_like(x0)\n x = np.copy(x0)\n\n if adaptive_step_size and x.ndim > 1:\n raise ValueError(\"Backtracking line search need to have 1D gradient\")\n\n # saving variables\n pobj_, times_ = [obj(x)], [0.0]\n\n # precompute L.op(y)\n if adaptive_step_size:\n old_fval = obj(x)\n\n # main loop\n for ii in range(max_iter):\n\n if times:\n t0 = time.time()\n\n grad_ = grad(x)\n\n # step-size\n if adaptive_step_size:\n step_size, _, old_fval = line_search_armijo(\n obj, x.ravel(), -grad_.ravel(), grad_.ravel(),\n old_fval, c1=1.0e-5, alpha0=step_size)\n if step_size is None:\n step_size = 0.0\n\n # descent step\n z = prox(x - step_size * grad_, step_size)\n\n # fista acceleration\n if momentum:\n t = 0.5 * (1.0 + np.sqrt(1.0 + 4.0 * t_old**2))\n x = z + (t_old - 1.0) / t * (z - z_old)\n else:\n x = z\n\n # savings\n if debug:\n if adaptive_step_size:\n pobj_.append(old_fval)\n else:\n pobj_.append(obj(x))\n\n # printing\n if debug and verbose > 0:\n print(\"[{0}] Iteration {1} / {2}, \"\n \"loss = {3}\".format(name, ii+1, max_iter, pobj_[ii]))\n\n # early-stopping\n l1_diff = np.sum(np.abs(z - z_old))\n if l1_diff <= eps and early_stopping:\n if debug:\n print(\"---> [{0}] early-stopping \"\n \"done at {1}/{2}\".format(name, ii+1, max_iter))\n break\n if l1_diff > np.finfo(np.float64).max:\n raise RuntimeError(\"[{}] {} have diverged during.\".format(name,\n [\"ISTA\", \"FISTA\"][momentum]))\n\n # update iterates\n t_old = t\n z_old = z\n\n # savings\n if times:\n times_.append(time.time() - t0)\n\n if not times and not debug:\n return x\n if times and not debug:\n return x, times_\n if not times and debug:\n return x, pobj_\n if times and debug:\n return x, pobj_, times_\n\n\ndef fbs(y, prox_t, prox_s, update_weights=[0.5, 0.5], max_iter=10,\n name='Optimization', obj=lambda x: -1, verbose=False):\n \"\"\" Forward Backward Splitting algorithm. \"\"\"\n x_s = np.zeros_like(y)\n x_t = np.zeros_like(y)\n x = np.zeros_like(y)\n w_t, w_s = update_weights\n\n if verbose:\n print(f\"[{name}] progress {0}%\", end=\"\\r\")\n\n l_time, l_loss = [0.0], [obj(x)]\n for ii in range(max_iter):\n\n t0 = time.time()\n\n if w_t > 0:\n x_t = x_t + prox_t(x - x_t + y) - x\n\n if w_s > 0:\n x_s = x_s + prox_s(x - x_s + y) - x\n\n x = w_t * x_t + w_s * x_s\n\n l_time.append(time.time() - t0)\n l_loss.append(obj(x))\n\n if verbose > 0:\n print(f\"[{name}] progress {100. * (ii + 1) / max_iter:.1f}%\"\n f\" - loss={l_loss[-1]:.4e}\", end=\"\\r\")\n\n if verbose > 0:\n print()\n\n return x, l_time, l_loss\n" ]
[ [ "numpy.abs", "numpy.sqrt", "numpy.finfo", "numpy.copy", "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
springcoil/pymc3
[ "90fcd7d1073da8724830de703828c79fc843c493", "90fcd7d1073da8724830de703828c79fc843c493", "90fcd7d1073da8724830de703828c79fc843c493" ]
[ "pymc3/gp/util.py", "pymc3/tests/test_examples.py", "pymc3/gp/cov.py" ]
[ "from scipy.cluster.vq import kmeans\nimport numpy as np\nimport pymc3 as pm\nimport theano.tensor as tt\n\n\ncholesky = pm.distributions.dist_math.Cholesky(nofail=True, lower=True)\nsolve_lower = tt.slinalg.Solve(A_structure='lower_triangular')\nsolve_upper = tt.slinalg.Solve(A_structure='upper_triangular')\nsolve = tt.slinalg.Solve(A_structure='general')\n\n\ndef infer_shape(X, n_points=None):\n if n_points is None:\n try:\n n_points = np.int(X.shape[0])\n except TypeError:\n raise TypeError(\"Cannot infer n_points, provide as an argument\")\n return n_points\n\n\ndef stabilize(K):\n \"\"\" adds small diagonal to a covariance matrix \"\"\"\n return K + 1e-6 * tt.identity_like(K)\n\n\ndef kmeans_inducing_points(n_inducing, X):\n # first whiten X\n if isinstance(X, tt.TensorConstant):\n X = X.value\n elif isinstance(X, (np.ndarray, tuple, list)):\n X = np.asarray(X)\n else:\n raise ValueError((\"To use K-means initialization, \"\n \"please provide X as a type that \"\n \"can be cast to np.ndarray, instead \"\n \"of {}\".format(type(X))))\n scaling = np.std(X, 0)\n # if std of a column is very small (zero), don't normalize that column\n scaling[scaling <= 1e-6] = 1.0\n Xw = X / scaling\n Xu, distortion = kmeans(Xw, n_inducing)\n return Xu * scaling\n\n\ndef conditioned_vars(varnames):\n \"\"\" Decorator for validating attrs that are conditioned on. \"\"\"\n def gp_wrapper(cls):\n def make_getter(name):\n def getter(self):\n value = getattr(self, name, None)\n if value is None:\n raise AttributeError((\"'{}' not set. Provide as argument \"\n \"to condition, or call 'prior' \"\n \"first\".format(name.lstrip(\"_\"))))\n else:\n return value\n return getattr(self, name)\n return getter\n\n def make_setter(name):\n def setter(self, val):\n setattr(self, name, val)\n return setter\n\n for name in varnames:\n getter = make_getter('_' + name)\n setter = make_setter('_' + name)\n setattr(cls, name, property(getter, setter))\n return cls\n return gp_wrapper\n\n\ndef plot_gp_dist(ax, samples, x, plot_samples=True, palette=\"Reds\"):\n \"\"\" A helper function for plotting 1D GP posteriors from trace \"\"\"\n import matplotlib.pyplot as plt\n\n cmap = plt.get_cmap(palette)\n percs = np.linspace(51, 99, 40)\n colors = (percs - np.min(percs)) / (np.max(percs) - np.min(percs))\n samples = samples.T\n x = x.flatten()\n for i, p in enumerate(percs[::-1]):\n upper = np.percentile(samples, p, axis=1)\n lower = np.percentile(samples, 100-p, axis=1)\n color_val = colors[i]\n ax.fill_between(x, upper, lower, color=cmap(color_val), alpha=0.8)\n if plot_samples:\n # plot a few samples\n idx = np.random.randint(0, samples.shape[1], 30)\n ax.plot(x, samples[:,idx], color=cmap(0.9), lw=1, alpha=0.1)\n\n\n\n", "import matplotlib\nimport numpy as np\nimport pandas as pd\nimport pymc3 as pm\nimport scipy.optimize as opt\nimport theano.tensor as tt\nimport pytest\nimport theano\nfrom pymc3.theanof import floatX\n\nfrom .helpers import SeededTest\n\nmatplotlib.use('Agg', warn=False)\n\n\ndef get_city_data():\n \"\"\"Helper to get city data\"\"\"\n data = pd.read_csv(pm.get_data('srrs2.dat'))\n cty_data = pd.read_csv(pm.get_data('cty.dat'))\n\n data = data[data.state == 'MN']\n\n data['fips'] = data.stfips * 1000 + data.cntyfips\n cty_data['fips'] = cty_data.stfips * 1000 + cty_data.ctfips\n data['lradon'] = np.log(np.where(data.activity == 0, .1, data.activity))\n data = data.merge(cty_data, 'inner', on='fips')\n\n unique = data[['fips']].drop_duplicates()\n unique['group'] = np.arange(len(unique))\n unique.set_index('fips')\n return data.merge(unique, 'inner', on='fips')\n\n\nclass TestARM5_4(SeededTest):\n def build_model(self):\n data = pd.read_csv(pm.get_data('wells.dat'),\n delimiter=u' ', index_col=u'id', dtype={u'switch': np.int8})\n data.dist /= 100\n data.educ /= 4\n col = data.columns\n P = data[col[1:]]\n P -= P.mean()\n P['1'] = 1\n\n with pm.Model() as model:\n effects = pm.Normal('effects', mu=0, tau=100. ** -2, shape=len(P.columns))\n p = tt.nnet.sigmoid(tt.dot(floatX(np.array(P)), effects))\n pm.Bernoulli('s', p, observed=floatX(np.array(data.switch)))\n return model\n\n def test_run(self):\n model = self.build_model()\n with model:\n pm.sample(50, tune=50, n_init=1000)\n\n\nclass TestARM12_6(SeededTest):\n def build_model(self):\n data = get_city_data()\n\n self.obs_means = data.groupby('fips').lradon.mean().as_matrix()\n\n lradon = data.lradon.as_matrix()\n floor = data.floor.as_matrix()\n group = data.group.as_matrix()\n\n with pm.Model() as model:\n groupmean = pm.Normal('groupmean', 0, 10. ** -2.)\n groupsd = pm.Uniform('groupsd', 0, 10.)\n sd = pm.Uniform('sd', 0, 10.)\n floor_m = pm.Normal('floor_m', 0, 5. ** -2.)\n means = pm.Normal('means', groupmean, groupsd ** -2., shape=len(self.obs_means))\n pm.Normal('lr', floor * floor_m + means[group], sd ** -2., observed=lradon)\n return model\n\n def too_slow(self):\n model = self.build_model()\n start = {'groupmean': self.obs_means.mean(),\n 'groupsd_interval__': 0,\n 'sd_interval__': 0,\n 'means': self.obs_means,\n 'floor_m': 0.,\n }\n with model:\n start = pm.find_MAP(start=start,\n vars=[model['groupmean'], model['sd_interval__'], model['floor_m']])\n step = pm.NUTS(model.vars, scaling=start)\n pm.sample(50, step=step, start=start)\n\n\nclass TestARM12_6Uranium(SeededTest):\n def build_model(self):\n data = get_city_data()\n self.obs_means = data.groupby('fips').lradon.mean()\n\n lradon = data.lradon.as_matrix()\n floor = data.floor.as_matrix()\n group = data.group.as_matrix()\n ufull = data.Uppm.as_matrix()\n\n with pm.Model() as model:\n groupmean = pm.Normal('groupmean', 0, 10. ** -2.)\n groupsd = pm.Uniform('groupsd', 0, 10.)\n sd = pm.Uniform('sd', 0, 10.)\n floor_m = pm.Normal('floor_m', 0, 5. ** -2.)\n u_m = pm.Normal('u_m', 0, 5. ** -2)\n means = pm.Normal('means', groupmean, groupsd ** -2., shape=len(self.obs_means))\n pm.Normal('lr', floor * floor_m + means[group] + ufull * u_m, sd ** - 2.,\n observed=lradon)\n return model\n\n def too_slow(self):\n model = self.build_model()\n with model:\n start = pm.Point({\n 'groupmean': self.obs_means.mean(),\n 'groupsd_interval__': 0,\n 'sd_interval__': 0,\n 'means': np.array(self.obs_means),\n 'u_m': np.array([.72]),\n 'floor_m': 0.,\n })\n\n start = pm.find_MAP(start, model.vars[:-1])\n H = model.fastd2logp()\n h = np.diag(H(start))\n\n step = pm.HamiltonianMC(model.vars, h)\n pm.sample(50, step=step, start=start)\n\n\ndef build_disaster_model(masked=False):\n disasters_data = np.array([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,\n 3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,\n 2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0, 0,\n 1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,\n 0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,\n 3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,\n 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])\n if masked:\n disasters_data[[23, 68]] = -1\n disasters_data = np.ma.masked_values(disasters_data, value=-1)\n years = len(disasters_data)\n\n with pm.Model() as model:\n # Prior for distribution of switchpoint location\n switchpoint = pm.DiscreteUniform('switchpoint', lower=0, upper=years)\n # Priors for pre- and post-switch mean number of disasters\n early_mean = pm.Exponential('early_mean', lam=1.)\n late_mean = pm.Exponential('late_mean', lam=1.)\n # Allocate appropriate Poisson rates to years before and after current\n # switchpoint location\n idx = np.arange(years)\n rate = tt.switch(switchpoint >= idx, early_mean, late_mean)\n # Data likelihood\n pm.Poisson('disasters', rate, observed=disasters_data)\n return model\n\n\[email protected](condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\nclass TestDisasterModel(SeededTest):\n # Time series of recorded coal mining disasters in the UK from 1851 to 1962\n def test_disaster_model(self):\n model = build_disaster_model(masked=False)\n with model:\n # Initial values for stochastic nodes\n start = {'early_mean': 2., 'late_mean': 3.}\n # Use slice sampler for means (other varibles auto-selected)\n step = pm.Slice([model.early_mean_log__, model.late_mean_log__])\n tr = pm.sample(500, tune=50, start=start, step=step)\n pm.summary(tr)\n\n def test_disaster_model_missing(self):\n model = build_disaster_model(masked=True)\n with model:\n # Initial values for stochastic nodes\n start = {'early_mean': 2., 'late_mean': 3.}\n # Use slice sampler for means (other varibles auto-selected)\n step = pm.Slice([model.early_mean_log__, model.late_mean_log__])\n tr = pm.sample(500, tune=50, start=start, step=step)\n pm.summary(tr)\n\n\nclass TestGLMLinear(SeededTest):\n def build_model(self):\n size = 50\n true_intercept = 1\n true_slope = 2\n self.x = np.linspace(0, 1, size)\n self.y = true_intercept + self.x * true_slope + np.random.normal(scale=.5, size=size)\n data = dict(x=self.x, y=self.y)\n with pm.Model() as model:\n pm.GLM.from_formula('y ~ x', data)\n return model\n\n def test_run(self):\n with self.build_model():\n start = pm.find_MAP(fmin=opt.fmin_powell)\n pm.sample(50, pm.Slice(), start=start)\n\n\nclass TestLatentOccupancy(SeededTest):\n \"\"\"\n From the PyMC example list\n latent_occupancy.py\n\n Simple model demonstrating the estimation of occupancy, using latent variables. Suppose\n a population of n sites, with some proportion pi being occupied. Each site is surveyed,\n yielding an array of counts, y:\n\n y = [3, 0, 0, 2, 1, 0, 1, 0, ..., ]\n\n This is a classic zero-inflated count problem, where more zeros appear in the data than would\n be predicted by a simple Poisson model. We have, in fact, a mixture of models; one, conditional\n on occupancy, with a poisson mean of theta, and another, conditional on absence, with mean zero.\n One way to tackle the problem is to model the latent state of 'occupancy' as a Bernoulli\n variable at each site, with some unknown probability:\n\n z_i ~ Bern(pi)\n\n These latent variables can then be used to generate an array of Poisson parameters:\n\n t_i = theta (if z_i=1) or 0 (if z_i=0)\n\n Hence, the likelihood is just:\n\n y_i = Poisson(t_i)\n\n (Note in this elementary model, we are ignoring the issue of imperfect detection.)\n\n Created by Chris Fonnesbeck on 2008-07-28.\n Copyright (c) 2008 University of Otago. All rights reserved.\n \"\"\"\n def setup_method(self):\n super(TestLatentOccupancy, self).setup_method()\n # Sample size\n n = 100\n # True mean count, given occupancy\n theta = 2.1\n # True occupancy\n pi = 0.4\n # Simulate some data data\n self.y = ((np.random.random(n) < pi) * np.random.poisson(lam=theta, size=n)).astype('int16')\n\n def build_model(self):\n with pm.Model() as model:\n # Estimated occupancy\n psi = pm.Beta('psi', 1, 1)\n # Latent variable for occupancy\n pm.Bernoulli('z', psi, self.y.shape)\n # Estimated mean count\n theta = pm.Uniform('theta', 0, 100)\n # Poisson likelihood\n pm.ZeroInflatedPoisson('y', theta, psi, observed=self.y)\n return model\n\n def test_run(self):\n model = self.build_model()\n with model:\n start = {'psi': 0.5, 'z': (self.y > 0).astype('int16'), 'theta': 5}\n step_one = pm.Metropolis([model.theta_interval__, model.psi_logodds__])\n step_two = pm.BinaryMetropolis([model.z])\n pm.sample(50, step=[step_one, step_two], start=start)\n\n\[email protected](condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32 due to starting inf at starting logP\")\nclass TestRSV(SeededTest):\n '''\n This model estimates the population prevalence of respiratory syncytial virus\n (RSV) among children in Amman, Jordan, based on 3 years of admissions diagnosed\n with RSV to Al Bashir hospital.\n\n To estimate this parameter from raw counts of diagnoses, we need to establish\n the population of 1-year-old children from which the diagnosed individuals\n were sampled. This involved correcting census data (national estimate of\n 1-year-olds) for the proportion of the population in the city, as well as for\n the market share of the hospital. The latter is based on expert esimate, and\n hence encoded as a prior.\n '''\n def build_model(self):\n # 1-year-old children in Jordan\n kids = np.array([180489, 191817, 190830])\n # Proportion of population in Amman\n amman_prop = 0.35\n # infant RSV cases in Al Bashir hostpital\n rsv_cases = np.array([40, 59, 65])\n with pm.Model() as model:\n # Al Bashir hospital market share\n market_share = pm.Uniform('market_share', 0.5, 0.6)\n # Number of 1 y.o. in Amman\n n_amman = pm.Binomial('n_amman', kids, amman_prop, shape=3)\n # Prior probability\n prev_rsv = pm.Beta('prev_rsv', 1, 5, shape=3)\n # RSV in Amman\n y_amman = pm.Binomial('y_amman', n_amman, prev_rsv, shape=3, testval=100)\n # Likelihood for number with RSV in hospital (assumes Pr(hosp | RSV) = 1)\n pm.Binomial('y_hosp', y_amman, market_share, observed=rsv_cases)\n return model\n\n def test_run(self):\n with self.build_model():\n pm.sample(50, step=[pm.NUTS(), pm.Metropolis()])\n", "import numpy as np\nimport theano.tensor as tt\nfrom functools import reduce\nfrom operator import mul, add\n\n__all__ = ['Constant',\n 'WhiteNoise',\n 'ExpQuad',\n 'RatQuad',\n 'Exponential',\n 'Matern52',\n 'Matern32',\n 'Linear',\n 'Polynomial',\n 'Cosine',\n 'Periodic',\n 'WarpedInput',\n 'Gibbs']\n\n\nclass Covariance(object):\n R\"\"\"\n Base class for all kernels/covariance functions.\n\n Parameters\n ----------\n input_dim : integer\n The number of input dimensions, or columns of X (or Xs)\n the kernel will operate on.\n active_dims : List of integers\n Indicate which dimension or column of X the covariance\n function operates on.\n \"\"\"\n\n def __init__(self, input_dim, active_dims=None):\n self.input_dim = input_dim\n if active_dims is None:\n self.active_dims = np.arange(input_dim)\n else:\n self.active_dims = np.asarray(active_dims, np.int)\n\n def __call__(self, X, Xs=None, diag=False):\n R\"\"\"\n Evaluate the kernel/covariance function.\n\n Parameters\n ----------\n X : The training inputs to the kernel.\n Xs : The optional prediction set of inputs the kernel.\n If Xs is None, Xs = X.\n diag: bool\n Return only the diagonal of the covariance function.\n Default is False.\n \"\"\"\n if diag:\n return self.diag(X)\n else:\n return self.full(X, Xs)\n\n def diag(self, X):\n raise NotImplementedError\n\n def full(self, X, Xs):\n raise NotImplementedError\n\n def _slice(self, X, Xs):\n X = tt.as_tensor_variable(X[:, self.active_dims])\n if Xs is not None:\n Xs = tt.as_tensor_variable(Xs[:, self.active_dims])\n return X, Xs\n\n def __add__(self, other):\n return Add([self, other])\n\n def __mul__(self, other):\n return Prod([self, other])\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __array_wrap__(self, result):\n \"\"\"\n Required to allow radd/rmul by numpy arrays.\n \"\"\"\n r, c = result.shape\n A = np.zeros((r, c))\n for i in range(r):\n for j in range(c):\n A[i, j] = result[i, j].factor_list[1]\n if isinstance(result[0][0], Add):\n return result[0][0].factor_list[0] + A\n elif isinstance(result[0][0], Prod):\n return result[0][0].factor_list[0] * A\n else:\n raise RuntimeError\n\n\nclass Combination(Covariance):\n def __init__(self, factor_list):\n input_dim = max([factor.input_dim for factor in factor_list\n if isinstance(factor, Covariance)])\n super(Combination, self).__init__(input_dim=input_dim)\n self.factor_list = []\n for factor in factor_list:\n if isinstance(factor, self.__class__):\n self.factor_list.extend(factor.factor_list)\n else:\n self.factor_list.append(factor)\n\n def merge_factors(self, X, Xs=None, diag=False):\n factor_list = []\n for factor in self.factor_list:\n # make sure diag=True is handled properly\n if isinstance(factor, Covariance):\n factor_list.append(factor(X, Xs, diag))\n elif isinstance(factor, np.ndarray):\n if np.ndim(factor) == 2 and diag:\n factor_list.append(np.diag(factor))\n else:\n factor_list.append(factor)\n elif isinstance(factor, (tt.TensorConstant,\n tt.TensorVariable,\n tt.sharedvar.TensorSharedVariable)):\n if factor.ndim == 2 and diag:\n factor_list.append(tt.diag(factor))\n else:\n factor_list.append(factor)\n else:\n factor_list.append(factor)\n return factor_list\n\n\nclass Add(Combination):\n def __call__(self, X, Xs=None, diag=False):\n return reduce(add, self.merge_factors(X, Xs, diag))\n\n\nclass Prod(Combination):\n def __call__(self, X, Xs=None, diag=False):\n return reduce(mul, self.merge_factors(X, Xs, diag))\n\n\nclass Constant(Covariance):\n R\"\"\"\n Constant valued covariance function.\n\n .. math::\n\n k(x, x') = c\n \"\"\"\n\n def __init__(self, c):\n super(Constant, self).__init__(1, None)\n self.c = c\n\n def diag(self, X):\n return tt.alloc(self.c, X.shape[0])\n\n def full(self, X, Xs=None):\n if Xs is None:\n return tt.alloc(self.c, X.shape[0], X.shape[0])\n else:\n return tt.alloc(self.c, X.shape[0], Xs.shape[0])\n\n\nclass WhiteNoise(Covariance):\n R\"\"\"\n White noise covariance function.\n\n .. math::\n\n k(x, x') = \\sigma^2 \\mathrm{I}\n \"\"\"\n\n def __init__(self, sigma):\n super(WhiteNoise, self).__init__(1, None)\n self.sigma = sigma\n\n def diag(self, X):\n return tt.alloc(tt.square(self.sigma), X.shape[0])\n\n def full(self, X, Xs=None):\n if Xs is None:\n return tt.diag(self.diag(X))\n else:\n return tt.alloc(0.0, X.shape[0], Xs.shape[0])\n\n\nclass Stationary(Covariance):\n R\"\"\"\n Base class for stationary kernels/covariance functions.\n\n Parameters\n ----------\n ls : Lengthscale. If input_dim > 1, a list or array of scalars or PyMC3 random\n variables. If input_dim == 1, a scalar or PyMC3 random variable.\n ls_inv : Inverse lengthscale. 1 / ls. One of ls or ls_inv must be provided.\n \"\"\"\n\n def __init__(self, input_dim, ls=None, ls_inv=None, active_dims=None):\n super(Stationary, self).__init__(input_dim, active_dims)\n if (ls is None and ls_inv is None) or (ls is not None and ls_inv is not None):\n raise ValueError(\"Only one of 'ls' or 'ls_inv' must be provided\")\n elif ls_inv is not None:\n if isinstance(ls_inv, (list, tuple)):\n ls = 1.0 / np.asarray(ls_inv)\n else:\n ls = 1.0 / ls_inv\n self.ls = tt.as_tensor_variable(ls)\n\n def square_dist(self, X, Xs):\n X = tt.mul(X, 1.0 / self.ls)\n X2 = tt.sum(tt.square(X), 1)\n if Xs is None:\n sqd = (-2.0 * tt.dot(X, tt.transpose(X))\n + (tt.reshape(X2, (-1, 1)) + tt.reshape(X2, (1, -1))))\n else:\n Xs = tt.mul(Xs, 1.0 / self.ls)\n Xs2 = tt.sum(tt.square(Xs), 1)\n sqd = (-2.0 * tt.dot(X, tt.transpose(Xs))\n + (tt.reshape(X2, (-1, 1)) + tt.reshape(Xs2, (1, -1))))\n return tt.clip(sqd, 0.0, np.inf)\n\n def euclidean_dist(self, X, Xs):\n r2 = self.square_dist(X, Xs)\n return tt.sqrt(r2 + 1e-12)\n\n def diag(self, X):\n return tt.alloc(1.0, X.shape[0])\n\n def full(self, X, Xs=None):\n raise NotImplementedError\n\n\nclass Periodic(Stationary):\n R\"\"\"\n The Periodic kernel.\n\n .. math::\n k(x, x') = \\mathrm{exp}\\left( -\\frac{2 \\mathrm{sin}^2(\\pi |x-x'| \\frac{1}{T})}{\\ell^2} \\right)\n \"\"\"\n\n def __init__(self, input_dim, period, ls=None, ls_inv=None, active_dims=None):\n super(Periodic, self).__init__(input_dim, ls, ls_inv, active_dims)\n self.period = period\n def full(self, X, Xs=None):\n X, Xs = self._slice(X, Xs)\n if Xs is None:\n Xs = X\n f1 = X.dimshuffle(0, 'x', 1)\n f2 = Xs.dimshuffle('x', 0, 1)\n r = np.pi * (f1 - f2) / self.period\n r = tt.sum(tt.square(tt.sin(r) / self.ls), 2)\n return tt.exp(-0.5 * r)\n\n\nclass ExpQuad(Stationary):\n R\"\"\"\n The Exponentiated Quadratic kernel. Also refered to as the Squared\n Exponential, or Radial Basis Function kernel.\n\n .. math::\n\n k(x, x') = \\mathrm{exp}\\left[ -\\frac{(x - x')^2}{2 \\ell^2} \\right]\n \"\"\"\n\n def full(self, X, Xs=None):\n X, Xs = self._slice(X, Xs)\n return tt.exp(-0.5 * self.square_dist(X, Xs))\n\n\nclass RatQuad(Stationary):\n R\"\"\"\n The Rational Quadratic kernel.\n\n .. math::\n\n k(x, x') = \\left(1 + \\frac{(x - x')^2}{2\\alpha\\ell^2} \\right)^{-\\alpha}\n \"\"\"\n\n def __init__(self, input_dim, alpha, ls=None, ls_inv=None, active_dims=None):\n super(RatQuad, self).__init__(input_dim, ls, ls_inv, active_dims)\n self.alpha = alpha\n\n def full(self, X, Xs=None):\n X, Xs = self._slice(X, Xs)\n return (tt.power((1.0 + 0.5 * self.square_dist(X, Xs)\n * (1.0 / self.alpha)), -1.0 * self.alpha))\n\n\nclass Matern52(Stationary):\n R\"\"\"\n The Matern kernel with nu = 5/2.\n\n .. math::\n\n k(x, x') = \\left(1 + \\frac{\\sqrt{5(x - x')^2}}{\\ell} +\n \\frac{5(x-x')^2}{3\\ell^2}\\right)\n \\mathrm{exp}\\left[ - \\frac{\\sqrt{5(x - x')^2}}{\\ell} \\right]\n \"\"\"\n\n def full(self, X, Xs=None):\n X, Xs = self._slice(X, Xs)\n r = self.euclidean_dist(X, Xs)\n return ((1.0 + np.sqrt(5.0) * r + 5.0 / 3.0 * tt.square(r))\n * tt.exp(-1.0 * np.sqrt(5.0) * r))\n\n\nclass Matern32(Stationary):\n R\"\"\"\n The Matern kernel with nu = 3/2.\n\n .. math::\n\n k(x, x') = \\left(1 + \\frac{\\sqrt{3(x - x')^2}}{\\ell}\\right)\n \\mathrm{exp}\\left[ - \\frac{\\sqrt{3(x - x')^2}}{\\ell} \\right]\n \"\"\"\n\n def full(self, X, Xs=None):\n X, Xs = self._slice(X, Xs)\n r = self.euclidean_dist(X, Xs)\n return (1.0 + np.sqrt(3.0) * r) * tt.exp(-np.sqrt(3.0) * r)\n\n\nclass Exponential(Stationary):\n R\"\"\"\n The Exponential kernel.\n\n .. math::\n\n k(x, x') = \\mathrm{exp}\\left[ -\\frac{||x - x'||}{2\\ell^2} \\right]\n \"\"\"\n\n def full(self, X, Xs=None):\n X, Xs = self._slice(X, Xs)\n return tt.exp(-0.5 * self.euclidean_dist(X, Xs))\n\n\nclass Cosine(Stationary):\n R\"\"\"\n The Cosine kernel.\n\n .. math::\n k(x, x') = \\mathrm{cos}\\left( \\pi \\frac{||x - x'||}{ \\ell^2} \\right)\n \"\"\"\n\n def full(self, X, Xs=None):\n X, Xs = self._slice(X, Xs)\n return tt.cos(2.0 * np.pi * self.euclidean_dist(X, Xs))\n\n\nclass Linear(Covariance):\n R\"\"\"\n The Linear kernel.\n\n .. math::\n k(x, x') = (x - c)(x' - c)\n \"\"\"\n\n def __init__(self, input_dim, c, active_dims=None):\n super(Linear, self).__init__(input_dim, active_dims)\n self.c = c\n\n def _common(self, X, Xs=None):\n X, Xs = self._slice(X, Xs)\n Xc = tt.sub(X, self.c)\n return X, Xc, Xs\n\n def full(self, X, Xs=None):\n X, Xc, Xs = self._common(X, Xs)\n if Xs is None:\n return tt.dot(Xc, tt.transpose(Xc))\n else:\n Xsc = tt.sub(Xs, self.c)\n return tt.dot(Xc, tt.transpose(Xsc))\n\n def diag(self, X):\n X, Xc, _ = self._common(X, None)\n return tt.sum(tt.square(Xc), 1)\n\n\nclass Polynomial(Linear):\n R\"\"\"\n The Polynomial kernel.\n\n .. math::\n k(x, x') = [(x - c)(x' - c) + \\mathrm{offset}]^{d}\n \"\"\"\n\n def __init__(self, input_dim, c, d, offset, active_dims=None):\n super(Polynomial, self).__init__(input_dim, c, active_dims)\n self.d = d\n self.offset = offset\n\n def full(self, X, Xs=None):\n linear = super(Polynomial, self).full(X, Xs)\n return tt.power(linear + self.offset, self.d)\n\n def diag(self, X):\n linear = super(Polynomial, self).diag(X)\n return tt.power(linear + self.offset, self.d)\n\n\nclass WarpedInput(Covariance):\n R\"\"\"\n Warp the inputs of any kernel using an arbitrary function\n defined using Theano.\n\n .. math::\n k(x, x') = k(w(x), w(x'))\n\n Parameters\n ----------\n cov_func : Covariance\n warp_func : callable\n Theano function of X and additional optional arguments.\n args : optional, tuple or list of scalars or PyMC3 variables\n Additional inputs (besides X or Xs) to warp_func.\n \"\"\"\n\n def __init__(self, input_dim, cov_func, warp_func, args=None,\n active_dims=None):\n super(WarpedInput, self).__init__(input_dim, active_dims)\n if not callable(warp_func):\n raise TypeError(\"warp_func must be callable\")\n if not isinstance(cov_func, Covariance):\n raise TypeError(\"Must be or inherit from the Covariance class\")\n self.w = handle_args(warp_func, args)\n self.args = args\n self.cov_func = cov_func\n\n def full(self, X, Xs=None):\n X, Xs = self._slice(X, Xs)\n if Xs is None:\n return self.cov_func(self.w(X, self.args), Xs)\n else:\n return self.cov_func(self.w(X, self.args), self.w(Xs, self.args))\n\n def diag(self, X):\n X, _ = self._slice(X, None)\n return self.cov_func(self.w(X, self.args), diag=True)\n\n\nclass Gibbs(Covariance):\n R\"\"\"\n The Gibbs kernel. Use an arbitrary lengthscale function defined\n using Theano. Only tested in one dimension.\n\n .. math::\n k(x, x') = \\sqrt{\\frac{2\\ell(x)\\ell(x')}{\\ell^2(x) + \\ell^2(x')}}\n \\mathrm{exp}\\left[ -\\frac{(x - x')^2}\n {\\ell^2(x) + \\ell^2(x')} \\right]\n\n Parameters\n ----------\n lengthscale_func : callable\n Theano function of X and additional optional arguments.\n args : optional, tuple or list of scalars or PyMC3 variables\n Additional inputs (besides X or Xs) to lengthscale_func.\n \"\"\"\n\n def __init__(self, input_dim, lengthscale_func, args=None,\n active_dims=None):\n super(Gibbs, self).__init__(input_dim, active_dims)\n if active_dims is not None:\n if input_dim != 1 or sum(active_dims) == 1:\n raise NotImplementedError((\"Higher dimensional inputs \",\n \"are untested\"))\n else:\n if input_dim != 1:\n raise NotImplementedError((\"Higher dimensional inputs \",\n \"are untested\"))\n if not callable(lengthscale_func):\n raise TypeError(\"lengthscale_func must be callable\")\n self.lfunc = handle_args(lengthscale_func, args)\n self.args = args\n\n def square_dist(self, X, Xs):\n X2 = tt.sum(tt.square(X), 1)\n if Xs is None:\n sqd = (-2.0 * tt.dot(X, tt.transpose(X))\n + (tt.reshape(X2, (-1, 1)) + tt.reshape(X2, (1, -1))))\n else:\n Xs2 = tt.sum(tt.square(Xs), 1)\n sqd = (-2.0 * tt.dot(X, tt.transpose(Xs))\n + (tt.reshape(Xs2, (-1, 1)) + tt.reshape(Xs2, (1, -1))))\n return tt.clip(sqd, 0.0, np.inf)\n\n def full(self, X, Xs=None):\n X, Xs = self._slice(X, Xs)\n rx = self.lfunc(X, self.args)\n rx2 = tt.reshape(tt.square(rx), (-1, 1))\n if Xs is None:\n r2 = self.square_dist(X, X)\n rz = self.lfunc(X, self.args)\n else:\n r2 = self.square_dist(X, Xs)\n rz = self.lfunc(Xs, self.args)\n rz2 = tt.reshape(tt.square(rz), (1, -1))\n return (tt.sqrt((2.0 * tt.dot(rx, tt.transpose(rz))) / (rx2 + rz2))\n * tt.exp(-1.0 * r2 / (rx2 + rz2)))\n\n def diag(self, X):\n return tt.alloc(1.0, X.shape[0])\n\ndef handle_args(func, args):\n def f(x, args):\n if args is None:\n return func(x)\n else:\n if not isinstance(args, tuple):\n args = (args,)\n return func(x, *args)\n return f\n" ]
[ [ "numpy.linspace", "scipy.cluster.vq.kmeans", "numpy.asarray", "numpy.min", "matplotlib.pyplot.get_cmap", "numpy.percentile", "numpy.int", "numpy.std", "numpy.max", "numpy.random.randint" ], [ "numpy.random.random", "numpy.linspace", "numpy.arange", "matplotlib.use", "numpy.random.poisson", "numpy.random.normal", "numpy.ma.masked_values", "numpy.array", "numpy.where" ], [ "numpy.diag", "numpy.sqrt", "numpy.asarray", "numpy.arange", "numpy.ndim", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
willie3838/pandas
[ "daec2e73b48fe34086dcdeeb0858d29536b6ca6a" ]
[ "pandas/core/indexes/multi.py" ]
[ "from __future__ import annotations\n\nfrom functools import wraps\nfrom sys import getsizeof\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Collection,\n Hashable,\n Iterable,\n List,\n Sequence,\n Tuple,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import (\n algos as libalgos,\n index as libindex,\n lib,\n)\nfrom pandas._libs.hashtable import duplicated\nfrom pandas._typing import (\n AnyArrayLike,\n DtypeObj,\n Scalar,\n Shape,\n)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import (\n InvalidIndexError,\n PerformanceWarning,\n UnsortedIndexError,\n)\nfrom pandas.util._decorators import (\n Appender,\n cache_readonly,\n deprecate_nonkeyword_arguments,\n doc,\n)\n\nfrom pandas.core.dtypes.cast import coerce_indexer_dtype\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_platform_int,\n is_categorical_dtype,\n is_hashable,\n is_integer,\n is_iterator,\n is_list_like,\n is_object_dtype,\n is_scalar,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCDatetimeIndex,\n ABCTimedeltaIndex,\n)\nfrom pandas.core.dtypes.missing import (\n array_equivalent,\n isna,\n)\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import Categorical\nfrom pandas.core.arrays.categorical import factorize_from_iterables\nimport pandas.core.common as com\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import (\n Index,\n _index_shared_docs,\n ensure_index,\n get_unanimous_names,\n)\nfrom pandas.core.indexes.frozen import FrozenList\nfrom pandas.core.indexes.numeric import Int64Index\nfrom pandas.core.ops.invalid import make_invalid_op\nfrom pandas.core.sorting import (\n get_group_index,\n indexer_from_factorized,\n lexsort_indexer,\n)\n\nfrom pandas.io.formats.printing import pprint_thing\n\nif TYPE_CHECKING:\n from pandas import (\n CategoricalIndex,\n DataFrame,\n Series,\n )\n\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n_index_doc_kwargs.update(\n {\"klass\": \"MultiIndex\", \"target_klass\": \"MultiIndex or list of tuples\"}\n)\n\n\nclass MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):\n \"\"\"\n This class manages a MultiIndex by mapping label combinations to positive\n integers.\n \"\"\"\n\n _base = libindex.UInt64Engine\n\n def _codes_to_ints(self, codes):\n \"\"\"\n Transform combination(s) of uint64 in one uint64 (each), in a strictly\n monotonic way (i.e. respecting the lexicographic order of integer\n combinations): see BaseMultiIndexCodesEngine documentation.\n\n Parameters\n ----------\n codes : 1- or 2-dimensional array of dtype uint64\n Combinations of integers (one per row)\n\n Returns\n -------\n scalar or 1-dimensional array, of dtype uint64\n Integer(s) representing one combination (each).\n \"\"\"\n # Shift the representation of each level by the pre-calculated number\n # of bits:\n codes <<= self.offsets\n\n # Now sum and OR are in fact interchangeable. This is a simple\n # composition of the (disjunct) significant bits of each level (i.e.\n # each column in \"codes\") in a single positive integer:\n if codes.ndim == 1:\n # Single key\n return np.bitwise_or.reduce(codes)\n\n # Multiple keys\n return np.bitwise_or.reduce(codes, axis=1)\n\n\nclass MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):\n \"\"\"\n This class manages those (extreme) cases in which the number of possible\n label combinations overflows the 64 bits integers, and uses an ObjectEngine\n containing Python integers.\n \"\"\"\n\n _base = libindex.ObjectEngine\n\n def _codes_to_ints(self, codes):\n \"\"\"\n Transform combination(s) of uint64 in one Python integer (each), in a\n strictly monotonic way (i.e. respecting the lexicographic order of\n integer combinations): see BaseMultiIndexCodesEngine documentation.\n\n Parameters\n ----------\n codes : 1- or 2-dimensional array of dtype uint64\n Combinations of integers (one per row)\n\n Returns\n -------\n int, or 1-dimensional array of dtype object\n Integer(s) representing one combination (each).\n \"\"\"\n # Shift the representation of each level by the pre-calculated number\n # of bits. Since this can overflow uint64, first make sure we are\n # working with Python integers:\n codes = codes.astype(\"object\") << self.offsets\n\n # Now sum and OR are in fact interchangeable. This is a simple\n # composition of the (disjunct) significant bits of each level (i.e.\n # each column in \"codes\") in a single positive integer (per row):\n if codes.ndim == 1:\n # Single key\n return np.bitwise_or.reduce(codes)\n\n # Multiple keys\n return np.bitwise_or.reduce(codes, axis=1)\n\n\ndef names_compat(meth):\n \"\"\"\n A decorator to allow either `name` or `names` keyword but not both.\n\n This makes it easier to share code with base class.\n \"\"\"\n\n @wraps(meth)\n def new_meth(self_or_cls, *args, **kwargs):\n if \"name\" in kwargs and \"names\" in kwargs:\n raise TypeError(\"Can only provide one of `names` and `name`\")\n elif \"name\" in kwargs:\n kwargs[\"names\"] = kwargs.pop(\"name\")\n\n return meth(self_or_cls, *args, **kwargs)\n\n return new_meth\n\n\nclass MultiIndex(Index):\n \"\"\"\n A multi-level, or hierarchical, index object for pandas objects.\n\n Parameters\n ----------\n levels : sequence of arrays\n The unique labels for each level.\n codes : sequence of arrays\n Integers for each level designating which label at each location.\n sortorder : optional int\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : optional sequence of objects\n Names for each of the index levels. (name is accepted for compat).\n copy : bool, default False\n Copy the meta-data.\n verify_integrity : bool, default True\n Check that the levels/codes are consistent and valid.\n\n Attributes\n ----------\n names\n levels\n codes\n nlevels\n levshape\n\n Methods\n -------\n from_arrays\n from_tuples\n from_product\n from_frame\n set_levels\n set_codes\n to_frame\n to_flat_index\n sortlevel\n droplevel\n swaplevel\n reorder_levels\n remove_unused_levels\n get_locs\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex.\n MultiIndex.from_product : Create a MultiIndex from the cartesian product\n of iterables.\n MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.\n MultiIndex.from_frame : Make a MultiIndex from a DataFrame.\n Index : The base pandas Index type.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`__\n for more.\n\n Examples\n --------\n A new ``MultiIndex`` is typically constructed using one of the helper\n methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`\n and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):\n\n >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]\n >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))\n MultiIndex([(1, 'red'),\n (1, 'blue'),\n (2, 'red'),\n (2, 'blue')],\n names=['number', 'color'])\n\n See further examples for how to construct a MultiIndex in the doc strings\n of the mentioned helper methods.\n \"\"\"\n\n _hidden_attrs = Index._hidden_attrs | frozenset()\n\n # initialize to zero-length tuples to make everything work\n _typ = \"multiindex\"\n _names = FrozenList()\n _levels = FrozenList()\n _codes = FrozenList()\n _comparables = [\"names\"]\n\n sortorder: int | None\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n levels=None,\n codes=None,\n sortorder=None,\n names=None,\n dtype=None,\n copy=False,\n name=None,\n verify_integrity: bool = True,\n ):\n\n # compat with Index\n if name is not None:\n names = name\n if levels is None or codes is None:\n raise TypeError(\"Must pass both levels and codes\")\n if len(levels) != len(codes):\n raise ValueError(\"Length of levels and codes must be the same.\")\n if len(levels) == 0:\n raise ValueError(\"Must pass non-zero number of levels/codes\")\n\n result = object.__new__(cls)\n result._cache = {}\n\n # we've already validated levels and codes, so shortcut here\n result._set_levels(levels, copy=copy, validate=False)\n result._set_codes(codes, copy=copy, validate=False)\n\n result._names = [None] * len(levels)\n if names is not None:\n # handles name validation\n result._set_names(names)\n\n if sortorder is not None:\n result.sortorder = int(sortorder)\n else:\n result.sortorder = sortorder\n\n if verify_integrity:\n new_codes = result._verify_integrity()\n result._codes = new_codes\n\n result._reset_identity()\n\n return result\n\n def _validate_codes(self, level: list, code: list):\n \"\"\"\n Reassign code values as -1 if their corresponding levels are NaN.\n\n Parameters\n ----------\n code : list\n Code to reassign.\n level : list\n Level to check for missing values (NaN, NaT, None).\n\n Returns\n -------\n new code where code value = -1 if it corresponds\n to a level with missing values (NaN, NaT, None).\n \"\"\"\n null_mask = isna(level)\n if np.any(null_mask):\n code = np.where(null_mask[code], -1, code)\n return code\n\n def _verify_integrity(self, codes: list | None = None, levels: list | None = None):\n \"\"\"\n Parameters\n ----------\n codes : optional list\n Codes to check for validity. Defaults to current codes.\n levels : optional list\n Levels to check for validity. Defaults to current levels.\n\n Raises\n ------\n ValueError\n If length of levels and codes don't match, if the codes for any\n level would exceed level bounds, or there are any duplicate levels.\n\n Returns\n -------\n new codes where code value = -1 if it corresponds to a\n NaN level.\n \"\"\"\n # NOTE: Currently does not check, among other things, that cached\n # nlevels matches nor that sortorder matches actually sortorder.\n codes = codes or self.codes\n levels = levels or self.levels\n\n if len(levels) != len(codes):\n raise ValueError(\n \"Length of levels and codes must match. NOTE: \"\n \"this index is in an inconsistent state.\"\n )\n codes_length = len(codes[0])\n for i, (level, level_codes) in enumerate(zip(levels, codes)):\n if len(level_codes) != codes_length:\n raise ValueError(\n f\"Unequal code lengths: {[len(code_) for code_ in codes]}\"\n )\n if len(level_codes) and level_codes.max() >= len(level):\n raise ValueError(\n f\"On level {i}, code max ({level_codes.max()}) >= length of \"\n f\"level ({len(level)}). NOTE: this index is in an \"\n \"inconsistent state\"\n )\n if len(level_codes) and level_codes.min() < -1:\n raise ValueError(f\"On level {i}, code value ({level_codes.min()}) < -1\")\n if not level.is_unique:\n raise ValueError(\n f\"Level values must be unique: {list(level)} on level {i}\"\n )\n if self.sortorder is not None:\n if self.sortorder > _lexsort_depth(self.codes, self.nlevels):\n raise ValueError(\n \"Value for sortorder must be inferior or equal to actual \"\n f\"lexsort_depth: sortorder {self.sortorder} \"\n f\"with lexsort_depth {_lexsort_depth(self.codes, self.nlevels)}\"\n )\n\n codes = [\n self._validate_codes(level, code) for level, code in zip(levels, codes)\n ]\n new_codes = FrozenList(codes)\n return new_codes\n\n @classmethod\n def from_arrays(cls, arrays, sortorder=None, names=lib.no_default) -> MultiIndex:\n \"\"\"\n Convert arrays to MultiIndex.\n\n Parameters\n ----------\n arrays : list / sequence of array-likes\n Each array-like gives one level's value for each data point.\n len(arrays) is the number of levels.\n sortorder : int or None\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list / sequence of str, optional\n Names for the levels in the index.\n\n Returns\n -------\n MultiIndex\n\n See Also\n --------\n MultiIndex.from_tuples : Convert list of tuples to MultiIndex.\n MultiIndex.from_product : Make a MultiIndex from cartesian product\n of iterables.\n MultiIndex.from_frame : Make a MultiIndex from a DataFrame.\n\n Examples\n --------\n >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]\n >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))\n MultiIndex([(1, 'red'),\n (1, 'blue'),\n (2, 'red'),\n (2, 'blue')],\n names=['number', 'color'])\n \"\"\"\n error_msg = \"Input must be a list / sequence of array-likes.\"\n if not is_list_like(arrays):\n raise TypeError(error_msg)\n elif is_iterator(arrays):\n arrays = list(arrays)\n\n # Check if elements of array are list-like\n for array in arrays:\n if not is_list_like(array):\n raise TypeError(error_msg)\n\n # Check if lengths of all arrays are equal or not,\n # raise ValueError, if not\n for i in range(1, len(arrays)):\n if len(arrays[i]) != len(arrays[i - 1]):\n raise ValueError(\"all arrays must be same length\")\n\n codes, levels = factorize_from_iterables(arrays)\n if names is lib.no_default:\n names = [getattr(arr, \"name\", None) for arr in arrays]\n\n return cls(\n levels=levels,\n codes=codes,\n sortorder=sortorder,\n names=names,\n verify_integrity=False,\n )\n\n @classmethod\n @names_compat\n def from_tuples(\n cls,\n tuples: Iterable[tuple[Hashable, ...]],\n sortorder: int | None = None,\n names: Sequence[Hashable] | None = None,\n ) -> MultiIndex:\n \"\"\"\n Convert list of tuples to MultiIndex.\n\n Parameters\n ----------\n tuples : list / sequence of tuple-likes\n Each tuple is the index of one row/column.\n sortorder : int or None\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list / sequence of str, optional\n Names for the levels in the index.\n\n Returns\n -------\n MultiIndex\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex.\n MultiIndex.from_product : Make a MultiIndex from cartesian product\n of iterables.\n MultiIndex.from_frame : Make a MultiIndex from a DataFrame.\n\n Examples\n --------\n >>> tuples = [(1, 'red'), (1, 'blue'),\n ... (2, 'red'), (2, 'blue')]\n >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))\n MultiIndex([(1, 'red'),\n (1, 'blue'),\n (2, 'red'),\n (2, 'blue')],\n names=['number', 'color'])\n \"\"\"\n if not is_list_like(tuples):\n raise TypeError(\"Input must be a list / sequence of tuple-likes.\")\n elif is_iterator(tuples):\n tuples = list(tuples)\n tuples = cast(Collection[Tuple[Hashable, ...]], tuples)\n\n arrays: list[Sequence[Hashable]]\n if len(tuples) == 0:\n if names is None:\n raise TypeError(\"Cannot infer number of levels from empty list\")\n arrays = [[]] * len(names)\n elif isinstance(tuples, (np.ndarray, Index)):\n if isinstance(tuples, Index):\n tuples = np.asarray(tuples._values)\n\n arrays = list(lib.tuples_to_object_array(tuples).T)\n elif isinstance(tuples, list):\n arrays = list(lib.to_object_array_tuples(tuples).T)\n else:\n arrs = zip(*tuples)\n arrays = cast(List[Sequence[Hashable]], arrs)\n\n return cls.from_arrays(arrays, sortorder=sortorder, names=names)\n\n @classmethod\n def from_product(\n cls, iterables, sortorder=None, names=lib.no_default\n ) -> MultiIndex:\n \"\"\"\n Make a MultiIndex from the cartesian product of multiple iterables.\n\n Parameters\n ----------\n iterables : list / sequence of iterables\n Each iterable has unique labels for each level of the index.\n sortorder : int or None\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list / sequence of str, optional\n Names for the levels in the index.\n\n .. versionchanged:: 1.0.0\n\n If not explicitly provided, names will be inferred from the\n elements of iterables if an element has a name attribute\n\n Returns\n -------\n MultiIndex\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex.\n MultiIndex.from_tuples : Convert list of tuples to MultiIndex.\n MultiIndex.from_frame : Make a MultiIndex from a DataFrame.\n\n Examples\n --------\n >>> numbers = [0, 1, 2]\n >>> colors = ['green', 'purple']\n >>> pd.MultiIndex.from_product([numbers, colors],\n ... names=['number', 'color'])\n MultiIndex([(0, 'green'),\n (0, 'purple'),\n (1, 'green'),\n (1, 'purple'),\n (2, 'green'),\n (2, 'purple')],\n names=['number', 'color'])\n \"\"\"\n from pandas.core.reshape.util import cartesian_product\n\n if not is_list_like(iterables):\n raise TypeError(\"Input must be a list / sequence of iterables.\")\n elif is_iterator(iterables):\n iterables = list(iterables)\n\n codes, levels = factorize_from_iterables(iterables)\n if names is lib.no_default:\n names = [getattr(it, \"name\", None) for it in iterables]\n\n # codes are all ndarrays, so cartesian_product is lossless\n codes = cartesian_product(codes)\n return cls(levels, codes, sortorder=sortorder, names=names)\n\n @classmethod\n def from_frame(cls, df: DataFrame, sortorder=None, names=None) -> MultiIndex:\n \"\"\"\n Make a MultiIndex from a DataFrame.\n\n Parameters\n ----------\n df : DataFrame\n DataFrame to be converted to MultiIndex.\n sortorder : int, optional\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list-like, optional\n If no names are provided, use the column names, or tuple of column\n names if the columns is a MultiIndex. If a sequence, overwrite\n names with the given sequence.\n\n Returns\n -------\n MultiIndex\n The MultiIndex representation of the given DataFrame.\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex.\n MultiIndex.from_tuples : Convert list of tuples to MultiIndex.\n MultiIndex.from_product : Make a MultiIndex from cartesian product\n of iterables.\n\n Examples\n --------\n >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],\n ... ['NJ', 'Temp'], ['NJ', 'Precip']],\n ... columns=['a', 'b'])\n >>> df\n a b\n 0 HI Temp\n 1 HI Precip\n 2 NJ Temp\n 3 NJ Precip\n\n >>> pd.MultiIndex.from_frame(df)\n MultiIndex([('HI', 'Temp'),\n ('HI', 'Precip'),\n ('NJ', 'Temp'),\n ('NJ', 'Precip')],\n names=['a', 'b'])\n\n Using explicit names, instead of the column names\n\n >>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])\n MultiIndex([('HI', 'Temp'),\n ('HI', 'Precip'),\n ('NJ', 'Temp'),\n ('NJ', 'Precip')],\n names=['state', 'observation'])\n \"\"\"\n if not isinstance(df, ABCDataFrame):\n raise TypeError(\"Input must be a DataFrame\")\n\n column_names, columns = zip(*df.items())\n names = column_names if names is None else names\n return cls.from_arrays(columns, sortorder=sortorder, names=names)\n\n # --------------------------------------------------------------------\n\n @cache_readonly\n def _values(self) -> np.ndarray:\n # We override here, since our parent uses _data, which we don't use.\n values = []\n\n for i in range(self.nlevels):\n vals = self._get_level_values(i)\n if is_categorical_dtype(vals.dtype):\n vals = cast(\"CategoricalIndex\", vals)\n vals = vals._data._internal_get_values()\n if isinstance(vals.dtype, ExtensionDtype) or isinstance(\n vals, (ABCDatetimeIndex, ABCTimedeltaIndex)\n ):\n vals = vals.astype(object)\n # error: Incompatible types in assignment (expression has type \"ndarray\",\n # variable has type \"Index\")\n vals = np.array(vals, copy=False) # type: ignore[assignment]\n values.append(vals)\n\n arr = lib.fast_zip(values)\n return arr\n\n @property\n def values(self) -> np.ndarray:\n return self._values\n\n @property\n def array(self):\n \"\"\"\n Raises a ValueError for `MultiIndex` because there's no single\n array backing a MultiIndex.\n\n Raises\n ------\n ValueError\n \"\"\"\n raise ValueError(\n \"MultiIndex has no single backing array. Use \"\n \"'MultiIndex.to_numpy()' to get a NumPy array of tuples.\"\n )\n\n @cache_readonly\n def dtypes(self) -> Series:\n \"\"\"\n Return the dtypes as a Series for the underlying MultiIndex\n \"\"\"\n from pandas import Series\n\n return Series(\n {\n f\"level_{idx}\" if level.name is None else level.name: level.dtype\n for idx, level in enumerate(self.levels)\n }\n )\n\n def __len__(self) -> int:\n return len(self.codes[0])\n\n # --------------------------------------------------------------------\n # Levels Methods\n\n @cache_readonly\n def levels(self) -> FrozenList:\n # Use cache_readonly to ensure that self.get_locs doesn't repeatedly\n # create new IndexEngine\n # https://github.com/pandas-dev/pandas/issues/31648\n result = [x._rename(name=name) for x, name in zip(self._levels, self._names)]\n for level in result:\n # disallow midx.levels[0].name = \"foo\"\n level._no_setting_name = True\n return FrozenList(result)\n\n def _set_levels(\n self,\n levels,\n *,\n level=None,\n copy: bool = False,\n validate: bool = True,\n verify_integrity: bool = False,\n ) -> None:\n # This is NOT part of the levels property because it should be\n # externally not allowed to set levels. User beware if you change\n # _levels directly\n if validate:\n if len(levels) == 0:\n raise ValueError(\"Must set non-zero number of levels.\")\n if level is None and len(levels) != self.nlevels:\n raise ValueError(\"Length of levels must match number of levels.\")\n if level is not None and len(levels) != len(level):\n raise ValueError(\"Length of levels must match length of level.\")\n\n if level is None:\n new_levels = FrozenList(\n ensure_index(lev, copy=copy)._view() for lev in levels\n )\n else:\n level_numbers = [self._get_level_number(lev) for lev in level]\n new_levels_list = list(self._levels)\n for lev_num, lev in zip(level_numbers, levels):\n new_levels_list[lev_num] = ensure_index(lev, copy=copy)._view()\n new_levels = FrozenList(new_levels_list)\n\n if verify_integrity:\n new_codes = self._verify_integrity(levels=new_levels)\n self._codes = new_codes\n\n names = self.names\n self._levels = new_levels\n if any(names):\n self._set_names(names)\n\n self._reset_cache()\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"levels\"])\n def set_levels(\n self, levels, level=None, inplace=None, verify_integrity: bool = True\n ):\n \"\"\"\n Set new levels on MultiIndex. Defaults to returning new index.\n\n Parameters\n ----------\n levels : sequence or list of sequence\n New level(s) to apply.\n level : int, level name, or sequence of int/level names (default None)\n Level(s) to set (None for all levels).\n inplace : bool\n If True, mutates in place.\n\n .. deprecated:: 1.2.0\n verify_integrity : bool, default True\n If True, checks that levels and codes are compatible.\n\n Returns\n -------\n new index (of same type and class...etc) or None\n The same type as the caller or None if ``inplace=True``.\n\n Examples\n --------\n >>> idx = pd.MultiIndex.from_tuples(\n ... [\n ... (1, \"one\"),\n ... (1, \"two\"),\n ... (2, \"one\"),\n ... (2, \"two\"),\n ... (3, \"one\"),\n ... (3, \"two\")\n ... ],\n ... names=[\"foo\", \"bar\"]\n ... )\n >>> idx\n MultiIndex([(1, 'one'),\n (1, 'two'),\n (2, 'one'),\n (2, 'two'),\n (3, 'one'),\n (3, 'two')],\n names=['foo', 'bar'])\n\n >>> idx.set_levels([['a', 'b', 'c'], [1, 2]])\n MultiIndex([('a', 1),\n ('a', 2),\n ('b', 1),\n ('b', 2),\n ('c', 1),\n ('c', 2)],\n names=['foo', 'bar'])\n >>> idx.set_levels(['a', 'b', 'c'], level=0)\n MultiIndex([('a', 'one'),\n ('a', 'two'),\n ('b', 'one'),\n ('b', 'two'),\n ('c', 'one'),\n ('c', 'two')],\n names=['foo', 'bar'])\n >>> idx.set_levels(['a', 'b'], level='bar')\n MultiIndex([(1, 'a'),\n (1, 'b'),\n (2, 'a'),\n (2, 'b'),\n (3, 'a'),\n (3, 'b')],\n names=['foo', 'bar'])\n\n If any of the levels passed to ``set_levels()`` exceeds the\n existing length, all of the values from that argument will\n be stored in the MultiIndex levels, though the values will\n be truncated in the MultiIndex output.\n\n >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])\n MultiIndex([('a', 1),\n ('a', 2),\n ('b', 1),\n ('b', 2),\n ('c', 1),\n ('c', 2)],\n names=['foo', 'bar'])\n >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels\n FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])\n \"\"\"\n if inplace is not None:\n warnings.warn(\n \"inplace is deprecated and will be removed in a future version.\",\n FutureWarning,\n stacklevel=3,\n )\n else:\n inplace = False\n\n if is_list_like(levels) and not isinstance(levels, Index):\n levels = list(levels)\n\n level, levels = _require_listlike(level, levels, \"Levels\")\n\n if inplace:\n idx = self\n else:\n idx = self._view()\n idx._reset_identity()\n idx._set_levels(\n levels, level=level, validate=True, verify_integrity=verify_integrity\n )\n if not inplace:\n return idx\n\n @property\n def nlevels(self) -> int:\n \"\"\"\n Integer number of levels in this MultiIndex.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])\n >>> mi\n MultiIndex([('a', 'b', 'c')],\n )\n >>> mi.nlevels\n 3\n \"\"\"\n return len(self._levels)\n\n @property\n def levshape(self) -> Shape:\n \"\"\"\n A tuple with the length of each level.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])\n >>> mi\n MultiIndex([('a', 'b', 'c')],\n )\n >>> mi.levshape\n (1, 1, 1)\n \"\"\"\n return tuple(len(x) for x in self.levels)\n\n # --------------------------------------------------------------------\n # Codes Methods\n\n @property\n def codes(self):\n return self._codes\n\n def _set_codes(\n self,\n codes,\n *,\n level=None,\n copy: bool = False,\n validate: bool = True,\n verify_integrity: bool = False,\n ) -> None:\n if validate:\n if level is None and len(codes) != self.nlevels:\n raise ValueError(\"Length of codes must match number of levels\")\n if level is not None and len(codes) != len(level):\n raise ValueError(\"Length of codes must match length of levels.\")\n\n if level is None:\n new_codes = FrozenList(\n _coerce_indexer_frozen(level_codes, lev, copy=copy).view()\n for lev, level_codes in zip(self._levels, codes)\n )\n else:\n level_numbers = [self._get_level_number(lev) for lev in level]\n new_codes_list = list(self._codes)\n for lev_num, level_codes in zip(level_numbers, codes):\n lev = self.levels[lev_num]\n new_codes_list[lev_num] = _coerce_indexer_frozen(\n level_codes, lev, copy=copy\n )\n new_codes = FrozenList(new_codes_list)\n\n if verify_integrity:\n new_codes = self._verify_integrity(codes=new_codes)\n\n self._codes = new_codes\n\n self._reset_cache()\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"codes\"])\n def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = True):\n \"\"\"\n Set new codes on MultiIndex. Defaults to returning new index.\n\n Parameters\n ----------\n codes : sequence or list of sequence\n New codes to apply.\n level : int, level name, or sequence of int/level names (default None)\n Level(s) to set (None for all levels).\n inplace : bool\n If True, mutates in place.\n\n .. deprecated:: 1.2.0\n verify_integrity : bool, default True\n If True, checks that levels and codes are compatible.\n\n Returns\n -------\n new index (of same type and class...etc) or None\n The same type as the caller or None if ``inplace=True``.\n\n Examples\n --------\n >>> idx = pd.MultiIndex.from_tuples(\n ... [(1, \"one\"), (1, \"two\"), (2, \"one\"), (2, \"two\")], names=[\"foo\", \"bar\"]\n ... )\n >>> idx\n MultiIndex([(1, 'one'),\n (1, 'two'),\n (2, 'one'),\n (2, 'two')],\n names=['foo', 'bar'])\n\n >>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])\n MultiIndex([(2, 'one'),\n (1, 'one'),\n (2, 'two'),\n (1, 'two')],\n names=['foo', 'bar'])\n >>> idx.set_codes([1, 0, 1, 0], level=0)\n MultiIndex([(2, 'one'),\n (1, 'two'),\n (2, 'one'),\n (1, 'two')],\n names=['foo', 'bar'])\n >>> idx.set_codes([0, 0, 1, 1], level='bar')\n MultiIndex([(1, 'one'),\n (1, 'one'),\n (2, 'two'),\n (2, 'two')],\n names=['foo', 'bar'])\n >>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])\n MultiIndex([(2, 'one'),\n (1, 'one'),\n (2, 'two'),\n (1, 'two')],\n names=['foo', 'bar'])\n \"\"\"\n if inplace is not None:\n warnings.warn(\n \"inplace is deprecated and will be removed in a future version.\",\n FutureWarning,\n stacklevel=3,\n )\n else:\n inplace = False\n\n level, codes = _require_listlike(level, codes, \"Codes\")\n\n if inplace:\n idx = self\n else:\n idx = self._view()\n idx._reset_identity()\n idx._set_codes(codes, level=level, verify_integrity=verify_integrity)\n if not inplace:\n return idx\n\n # --------------------------------------------------------------------\n # Index Internals\n\n @cache_readonly\n def _engine(self):\n # Calculate the number of bits needed to represent labels in each\n # level, as log2 of their sizes (including -1 for NaN):\n sizes = np.ceil(np.log2([len(level) + 1 for level in self.levels]))\n\n # Sum bit counts, starting from the _right_....\n lev_bits = np.cumsum(sizes[::-1])[::-1]\n\n # ... in order to obtain offsets such that sorting the combination of\n # shifted codes (one for each level, resulting in a unique integer) is\n # equivalent to sorting lexicographically the codes themselves. Notice\n # that each level needs to be shifted by the number of bits needed to\n # represent the _previous_ ones:\n offsets = np.concatenate([lev_bits[1:], [0]]).astype(\"uint64\")\n\n # Check the total number of bits needed for our representation:\n if lev_bits[0] > 64:\n # The levels would overflow a 64 bit uint - use Python integers:\n return MultiIndexPyIntEngine(self.levels, self.codes, offsets)\n return MultiIndexUIntEngine(self.levels, self.codes, offsets)\n\n @property\n def _constructor(self) -> Callable[..., MultiIndex]:\n return type(self).from_tuples\n\n @doc(Index._shallow_copy)\n def _shallow_copy(self, values: np.ndarray, name=lib.no_default) -> MultiIndex:\n names = name if name is not lib.no_default else self.names\n\n return type(self).from_tuples(values, sortorder=None, names=names)\n\n def _view(self) -> MultiIndex:\n result = type(self)(\n levels=self.levels,\n codes=self.codes,\n sortorder=self.sortorder,\n names=self.names,\n verify_integrity=False,\n )\n result._cache = self._cache.copy()\n result._cache.pop(\"levels\", None) # GH32669\n return result\n\n # --------------------------------------------------------------------\n\n def copy(\n self,\n names=None,\n dtype=None,\n levels=None,\n codes=None,\n deep=False,\n name=None,\n ):\n \"\"\"\n Make a copy of this object. Names, dtype, levels and codes can be\n passed and will be set on new copy.\n\n Parameters\n ----------\n names : sequence, optional\n dtype : numpy dtype or pandas type, optional\n\n .. deprecated:: 1.2.0\n levels : sequence, optional\n\n .. deprecated:: 1.2.0\n codes : sequence, optional\n\n .. deprecated:: 1.2.0\n deep : bool, default False\n name : Label\n Kept for compatibility with 1-dimensional Index. Should not be used.\n\n Returns\n -------\n MultiIndex\n\n Notes\n -----\n In most cases, there should be no functional difference from using\n ``deep``, but if ``deep`` is passed it will attempt to deepcopy.\n This could be potentially expensive on large MultiIndex objects.\n \"\"\"\n names = self._validate_names(name=name, names=names, deep=deep)\n if levels is not None:\n warnings.warn(\n \"parameter levels is deprecated and will be removed in a future \"\n \"version. Use the set_levels method instead.\",\n FutureWarning,\n stacklevel=2,\n )\n if codes is not None:\n warnings.warn(\n \"parameter codes is deprecated and will be removed in a future \"\n \"version. Use the set_codes method instead.\",\n FutureWarning,\n stacklevel=2,\n )\n\n if deep:\n from copy import deepcopy\n\n if levels is None:\n levels = deepcopy(self.levels)\n if codes is None:\n codes = deepcopy(self.codes)\n\n levels = levels if levels is not None else self.levels\n codes = codes if codes is not None else self.codes\n\n new_index = type(self)(\n levels=levels,\n codes=codes,\n sortorder=self.sortorder,\n names=names,\n verify_integrity=False,\n )\n new_index._cache = self._cache.copy()\n new_index._cache.pop(\"levels\", None) # GH32669\n\n if dtype:\n warnings.warn(\n \"parameter dtype is deprecated and will be removed in a future \"\n \"version. Use the astype method instead.\",\n FutureWarning,\n stacklevel=2,\n )\n new_index = new_index.astype(dtype)\n return new_index\n\n def __array__(self, dtype=None) -> np.ndarray:\n \"\"\"the array interface, return my values\"\"\"\n return self.values\n\n def view(self, cls=None):\n \"\"\"this is defined as a copy with the same identity\"\"\"\n result = self.copy()\n result._id = self._id\n return result\n\n @doc(Index.__contains__)\n def __contains__(self, key: Any) -> bool:\n hash(key)\n try:\n self.get_loc(key)\n return True\n except (LookupError, TypeError, ValueError):\n return False\n\n @cache_readonly\n def dtype(self) -> np.dtype:\n return np.dtype(\"O\")\n\n def _is_memory_usage_qualified(self) -> bool:\n \"\"\"return a boolean if we need a qualified .info display\"\"\"\n\n def f(level):\n return \"mixed\" in level or \"string\" in level or \"unicode\" in level\n\n return any(f(level) for level in self._inferred_type_levels)\n\n @doc(Index.memory_usage)\n def memory_usage(self, deep: bool = False) -> int:\n # we are overwriting our base class to avoid\n # computing .values here which could materialize\n # a tuple representation unnecessarily\n return self._nbytes(deep)\n\n @cache_readonly\n def nbytes(self) -> int:\n \"\"\"return the number of bytes in the underlying data\"\"\"\n return self._nbytes(False)\n\n def _nbytes(self, deep: bool = False) -> int:\n \"\"\"\n return the number of bytes in the underlying data\n deeply introspect the level data if deep=True\n\n include the engine hashtable\n\n *this is in internal routine*\n\n \"\"\"\n # for implementations with no useful getsizeof (PyPy)\n objsize = 24\n\n level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)\n label_nbytes = sum(i.nbytes for i in self.codes)\n names_nbytes = sum(getsizeof(i, objsize) for i in self.names)\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n def _formatter_func(self, tup):\n \"\"\"\n Formats each item in tup according to its level's formatter function.\n \"\"\"\n formatter_funcs = [level._formatter_func for level in self.levels]\n return tuple(func(val) for func, val in zip(formatter_funcs, tup))\n\n def _format_native_types(self, na_rep=\"nan\", **kwargs):\n new_levels = []\n new_codes = []\n\n # go through the levels and format them\n for level, level_codes in zip(self.levels, self.codes):\n level_strs = level._format_native_types(na_rep=na_rep, **kwargs)\n # add nan values, if there are any\n mask = level_codes == -1\n if mask.any():\n nan_index = len(level_strs)\n # numpy 1.21 deprecated implicit string casting\n level_strs = level_strs.astype(str)\n level_strs = np.append(level_strs, na_rep)\n assert not level_codes.flags.writeable # i.e. copy is needed\n level_codes = level_codes.copy() # make writeable\n level_codes[mask] = nan_index\n new_levels.append(level_strs)\n new_codes.append(level_codes)\n\n if len(new_levels) == 1:\n # a single-level multi-index\n return Index(new_levels[0].take(new_codes[0]))._format_native_types()\n else:\n # reconstruct the multi-index\n mi = MultiIndex(\n levels=new_levels,\n codes=new_codes,\n names=self.names,\n sortorder=self.sortorder,\n verify_integrity=False,\n )\n return mi._values\n\n def format(\n self,\n name: bool | None = None,\n formatter: Callable | None = None,\n na_rep: str | None = None,\n names: bool = False,\n space: int = 2,\n sparsify=None,\n adjoin: bool = True,\n ) -> list:\n if name is not None:\n names = name\n\n if len(self) == 0:\n return []\n\n stringified_levels = []\n for lev, level_codes in zip(self.levels, self.codes):\n na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)\n\n if len(lev) > 0:\n\n formatted = lev.take(level_codes).format(formatter=formatter)\n\n # we have some NA\n mask = level_codes == -1\n if mask.any():\n formatted = np.array(formatted, dtype=object)\n formatted[mask] = na\n formatted = formatted.tolist()\n\n else:\n # weird all NA case\n formatted = [\n pprint_thing(na if isna(x) else x, escape_chars=(\"\\t\", \"\\r\", \"\\n\"))\n for x in algos.take_nd(lev._values, level_codes)\n ]\n stringified_levels.append(formatted)\n\n result_levels = []\n for lev, lev_name in zip(stringified_levels, self.names):\n level = []\n\n if names:\n level.append(\n pprint_thing(lev_name, escape_chars=(\"\\t\", \"\\r\", \"\\n\"))\n if lev_name is not None\n else \"\"\n )\n\n level.extend(np.array(lev, dtype=object))\n result_levels.append(level)\n\n if sparsify is None:\n sparsify = get_option(\"display.multi_sparse\")\n\n if sparsify:\n sentinel = \"\"\n # GH3547 use value of sparsify as sentinel if it's \"Falsey\"\n assert isinstance(sparsify, bool) or sparsify is lib.no_default\n if sparsify in [False, lib.no_default]:\n sentinel = sparsify\n # little bit of a kludge job for #1217\n result_levels = sparsify_labels(\n result_levels, start=int(names), sentinel=sentinel\n )\n\n if adjoin:\n from pandas.io.formats.format import get_adjustment\n\n adj = get_adjustment()\n return adj.adjoin(space, *result_levels).split(\"\\n\")\n else:\n return result_levels\n\n # --------------------------------------------------------------------\n # Names Methods\n\n def _get_names(self) -> FrozenList:\n return FrozenList(self._names)\n\n def _set_names(self, names, *, level=None, validate: bool = True):\n \"\"\"\n Set new names on index. Each name has to be a hashable type.\n\n Parameters\n ----------\n values : str or sequence\n name(s) to set\n level : int, level name, or sequence of int/level names (default None)\n If the index is a MultiIndex (hierarchical), level(s) to set (None\n for all levels). Otherwise level must be None\n validate : bool, default True\n validate that the names match level lengths\n\n Raises\n ------\n TypeError if each name is not hashable.\n\n Notes\n -----\n sets names on levels. WARNING: mutates!\n\n Note that you generally want to set this *after* changing levels, so\n that it only acts on copies\n \"\"\"\n # GH 15110\n # Don't allow a single string for names in a MultiIndex\n if names is not None and not is_list_like(names):\n raise ValueError(\"Names should be list-like for a MultiIndex\")\n names = list(names)\n\n if validate:\n if level is not None and len(names) != len(level):\n raise ValueError(\"Length of names must match length of level.\")\n if level is None and len(names) != self.nlevels:\n raise ValueError(\n \"Length of names must match number of levels in MultiIndex.\"\n )\n\n if level is None:\n level = range(self.nlevels)\n else:\n level = [self._get_level_number(lev) for lev in level]\n\n # set the name\n for lev, name in zip(level, names):\n if name is not None:\n # GH 20527\n # All items in 'names' need to be hashable:\n if not is_hashable(name):\n raise TypeError(\n f\"{type(self).__name__}.name must be a hashable type\"\n )\n # error: Cannot determine type of '__setitem__'\n self._names[lev] = name # type: ignore[has-type]\n\n # If .levels has been accessed, the names in our cache will be stale.\n self._reset_cache()\n\n names = property(\n fset=_set_names,\n fget=_get_names,\n doc=\"\"\"\n Names of levels in MultiIndex.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays(\n ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])\n >>> mi\n MultiIndex([(1, 3, 5),\n (2, 4, 6)],\n names=['x', 'y', 'z'])\n >>> mi.names\n FrozenList(['x', 'y', 'z'])\n \"\"\",\n )\n\n # --------------------------------------------------------------------\n\n @doc(Index._get_grouper_for_level)\n def _get_grouper_for_level(self, mapper, *, level):\n indexer = self.codes[level]\n level_index = self.levels[level]\n\n if mapper is not None:\n # Handle group mapping function and return\n level_values = self.levels[level].take(indexer)\n grouper = level_values.map(mapper)\n return grouper, None, None\n\n codes, uniques = algos.factorize(indexer, sort=True)\n\n if len(uniques) > 0 and uniques[0] == -1:\n # Handle NAs\n mask = indexer != -1\n ok_codes, uniques = algos.factorize(indexer[mask], sort=True)\n\n codes = np.empty(len(indexer), dtype=indexer.dtype)\n codes[mask] = ok_codes\n codes[~mask] = -1\n\n if len(uniques) < len(level_index):\n # Remove unobserved levels from level_index\n level_index = level_index.take(uniques)\n else:\n # break references back to us so that setting the name\n # on the output of a groupby doesn't reflect back here.\n level_index = level_index.copy()\n\n if level_index._can_hold_na:\n grouper = level_index.take(codes, fill_value=True)\n else:\n grouper = level_index.take(codes)\n\n return grouper, codes, level_index\n\n @cache_readonly\n def inferred_type(self) -> str:\n return \"mixed\"\n\n def _get_level_number(self, level) -> int:\n count = self.names.count(level)\n if (count > 1) and not is_integer(level):\n raise ValueError(\n f\"The name {level} occurs multiple times, use a level number\"\n )\n try:\n level = self.names.index(level)\n except ValueError as err:\n if not is_integer(level):\n raise KeyError(f\"Level {level} not found\") from err\n elif level < 0:\n level += self.nlevels\n if level < 0:\n orig_level = level - self.nlevels\n raise IndexError(\n f\"Too many levels: Index has only {self.nlevels} levels, \"\n f\"{orig_level} is not a valid level number\"\n ) from err\n # Note: levels are zero-based\n elif level >= self.nlevels:\n raise IndexError(\n f\"Too many levels: Index has only {self.nlevels} levels, \"\n f\"not {level + 1}\"\n ) from err\n return level\n\n @property\n def _has_complex_internals(self) -> bool:\n # used to avoid libreduction code paths, which raise or require conversion\n return True\n\n @cache_readonly\n def is_monotonic_increasing(self) -> bool:\n \"\"\"\n return if the index is monotonic increasing (only equal or\n increasing) values.\n \"\"\"\n if any(-1 in code for code in self.codes):\n return False\n\n if all(level.is_monotonic for level in self.levels):\n # If each level is sorted, we can operate on the codes directly. GH27495\n return libalgos.is_lexsorted(\n [x.astype(\"int64\", copy=False) for x in self.codes]\n )\n\n # reversed() because lexsort() wants the most significant key last.\n values = [\n self._get_level_values(i)._values for i in reversed(range(len(self.levels)))\n ]\n try:\n sort_order = np.lexsort(values)\n return Index(sort_order).is_monotonic\n except TypeError:\n\n # we have mixed types and np.lexsort is not happy\n return Index(self._values).is_monotonic\n\n @cache_readonly\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n return if the index is monotonic decreasing (only equal or\n decreasing) values.\n \"\"\"\n # monotonic decreasing if and only if reverse is monotonic increasing\n return self[::-1].is_monotonic_increasing\n\n @cache_readonly\n def _inferred_type_levels(self) -> list[str]:\n \"\"\"return a list of the inferred types, one for each level\"\"\"\n return [i.inferred_type for i in self.levels]\n\n @doc(Index.duplicated)\n def duplicated(self, keep=\"first\") -> np.ndarray:\n shape = tuple(len(lev) for lev in self.levels)\n ids = get_group_index(self.codes, shape, sort=False, xnull=False)\n\n return duplicated(ids, keep)\n\n # error: Cannot override final attribute \"_duplicated\"\n # (previously declared in base class \"IndexOpsMixin\")\n _duplicated = duplicated # type: ignore[misc]\n\n def fillna(self, value=None, downcast=None):\n \"\"\"\n fillna is not implemented for MultiIndex\n \"\"\"\n raise NotImplementedError(\"isna is not defined for MultiIndex\")\n\n @doc(Index.dropna)\n def dropna(self, how: str = \"any\") -> MultiIndex:\n nans = [level_codes == -1 for level_codes in self.codes]\n if how == \"any\":\n indexer = np.any(nans, axis=0)\n elif how == \"all\":\n indexer = np.all(nans, axis=0)\n else:\n raise ValueError(f\"invalid how option: {how}\")\n\n new_codes = [level_codes[~indexer] for level_codes in self.codes]\n return self.set_codes(codes=new_codes)\n\n def _get_level_values(self, level: int, unique: bool = False) -> Index:\n \"\"\"\n Return vector of label values for requested level,\n equal to the length of the index\n\n **this is an internal method**\n\n Parameters\n ----------\n level : int\n unique : bool, default False\n if True, drop duplicated values\n\n Returns\n -------\n Index\n \"\"\"\n lev = self.levels[level]\n level_codes = self.codes[level]\n name = self._names[level]\n if unique:\n level_codes = algos.unique(level_codes)\n filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value)\n return lev._shallow_copy(filled, name=name)\n\n def get_level_values(self, level):\n \"\"\"\n Return vector of label values for requested level.\n\n Length of returned vector is equal to the length of the index.\n\n Parameters\n ----------\n level : int or str\n ``level`` is either the integer position of the level in the\n MultiIndex, or the name of the level.\n\n Returns\n -------\n values : Index\n Values is a level of this MultiIndex converted to\n a single :class:`Index` (or subclass thereof).\n\n Examples\n --------\n Create a MultiIndex:\n\n >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))\n >>> mi.names = ['level_1', 'level_2']\n\n Get level values by supplying level as either integer or name:\n\n >>> mi.get_level_values(0)\n Index(['a', 'b', 'c'], dtype='object', name='level_1')\n >>> mi.get_level_values('level_2')\n Index(['d', 'e', 'f'], dtype='object', name='level_2')\n \"\"\"\n level = self._get_level_number(level)\n values = self._get_level_values(level)\n return values\n\n @doc(Index.unique)\n def unique(self, level=None):\n\n if level is None:\n return super().unique()\n else:\n level = self._get_level_number(level)\n return self._get_level_values(level=level, unique=True)\n\n def to_frame(self, index: bool = True, name=None) -> DataFrame:\n \"\"\"\n Create a DataFrame with the levels of the MultiIndex as columns.\n\n Column ordering is determined by the DataFrame constructor with data as\n a dict.\n\n Parameters\n ----------\n index : bool, default True\n Set the index of the returned DataFrame as the original MultiIndex.\n\n name : list / sequence of str, optional\n The passed names should substitute index level names.\n\n Returns\n -------\n DataFrame : a DataFrame containing the original MultiIndex data.\n\n See Also\n --------\n DataFrame : Two-dimensional, size-mutable, potentially heterogeneous\n tabular data.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([['a', 'b'], ['c', 'd']])\n >>> mi\n MultiIndex([('a', 'c'),\n ('b', 'd')],\n )\n\n >>> df = mi.to_frame()\n >>> df\n 0 1\n a c a c\n b d b d\n\n >>> df = mi.to_frame(index=False)\n >>> df\n 0 1\n 0 a c\n 1 b d\n\n >>> df = mi.to_frame(name=['x', 'y'])\n >>> df\n x y\n a c a c\n b d b d\n \"\"\"\n from pandas import DataFrame\n\n if name is not None:\n if not is_list_like(name):\n raise TypeError(\"'name' must be a list / sequence of column names.\")\n\n if len(name) != len(self.levels):\n raise ValueError(\n \"'name' should have same length as number of levels on index.\"\n )\n idx_names = name\n else:\n idx_names = self.names\n\n # Guarantee resulting column order - PY36+ dict maintains insertion order\n result = DataFrame(\n {\n (level if lvlname is None else lvlname): self._get_level_values(level)\n for lvlname, level in zip(idx_names, range(len(self.levels)))\n },\n copy=False,\n )\n\n if index:\n result.index = self\n return result\n\n def to_flat_index(self) -> Index:\n \"\"\"\n Convert a MultiIndex to an Index of Tuples containing the level values.\n\n Returns\n -------\n pd.Index\n Index with the MultiIndex data represented in Tuples.\n\n See Also\n --------\n MultiIndex.from_tuples : Convert flat index back to MultiIndex.\n\n Notes\n -----\n This method will simply return the caller if called by anything other\n than a MultiIndex.\n\n Examples\n --------\n >>> index = pd.MultiIndex.from_product(\n ... [['foo', 'bar'], ['baz', 'qux']],\n ... names=['a', 'b'])\n >>> index.to_flat_index()\n Index([('foo', 'baz'), ('foo', 'qux'),\n ('bar', 'baz'), ('bar', 'qux')],\n dtype='object')\n \"\"\"\n return Index(self._values, tupleize_cols=False)\n\n @property\n def _is_all_dates(self) -> bool:\n return False\n\n def is_lexsorted(self) -> bool:\n warnings.warn(\n \"MultiIndex.is_lexsorted is deprecated as a public function, \"\n \"users should use MultiIndex.is_monotonic_increasing instead.\",\n FutureWarning,\n stacklevel=2,\n )\n return self._is_lexsorted()\n\n def _is_lexsorted(self) -> bool:\n \"\"\"\n Return True if the codes are lexicographically sorted.\n\n Returns\n -------\n bool\n\n Examples\n --------\n In the below examples, the first level of the MultiIndex is sorted because\n a<b<c, so there is no need to look at the next level.\n\n >>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'e', 'f']]).is_lexsorted()\n True\n >>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'f', 'e']]).is_lexsorted()\n True\n\n In case there is a tie, the lexicographical sorting looks\n at the next level of the MultiIndex.\n\n >>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'b', 'c']]).is_lexsorted()\n True\n >>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'c', 'b']]).is_lexsorted()\n False\n >>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],\n ... ['aa', 'bb', 'aa', 'bb']]).is_lexsorted()\n True\n >>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],\n ... ['bb', 'aa', 'aa', 'bb']]).is_lexsorted()\n False\n \"\"\"\n return self._lexsort_depth == self.nlevels\n\n @property\n def lexsort_depth(self):\n warnings.warn(\n \"MultiIndex.is_lexsorted is deprecated as a public function, \"\n \"users should use MultiIndex.is_monotonic_increasing instead.\",\n FutureWarning,\n stacklevel=2,\n )\n return self._lexsort_depth\n\n @cache_readonly\n def _lexsort_depth(self) -> int:\n \"\"\"\n Compute and return the lexsort_depth, the number of levels of the\n MultiIndex that are sorted lexically\n\n Returns\n -------\n int\n \"\"\"\n if self.sortorder is not None:\n return self.sortorder\n return _lexsort_depth(self.codes, self.nlevels)\n\n def _sort_levels_monotonic(self) -> MultiIndex:\n \"\"\"\n This is an *internal* function.\n\n Create a new MultiIndex from the current to monotonically sorted\n items IN the levels. This does not actually make the entire MultiIndex\n monotonic, JUST the levels.\n\n The resulting MultiIndex will have the same outward\n appearance, meaning the same .values and ordering. It will also\n be .equals() to the original.\n\n Returns\n -------\n MultiIndex\n\n Examples\n --------\n >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],\n ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])\n >>> mi\n MultiIndex([('a', 'bb'),\n ('a', 'aa'),\n ('b', 'bb'),\n ('b', 'aa')],\n )\n\n >>> mi.sort_values()\n MultiIndex([('a', 'aa'),\n ('a', 'bb'),\n ('b', 'aa'),\n ('b', 'bb')],\n )\n \"\"\"\n if self._is_lexsorted() and self.is_monotonic:\n return self\n\n new_levels = []\n new_codes = []\n\n for lev, level_codes in zip(self.levels, self.codes):\n\n if not lev.is_monotonic:\n try:\n # indexer to reorder the levels\n indexer = lev.argsort()\n except TypeError:\n pass\n else:\n lev = lev.take(indexer)\n\n # indexer to reorder the level codes\n indexer = ensure_platform_int(indexer)\n ri = lib.get_reverse_indexer(indexer, len(indexer))\n level_codes = algos.take_nd(ri, level_codes)\n\n new_levels.append(lev)\n new_codes.append(level_codes)\n\n return MultiIndex(\n new_levels,\n new_codes,\n names=self.names,\n sortorder=self.sortorder,\n verify_integrity=False,\n )\n\n def remove_unused_levels(self) -> MultiIndex:\n \"\"\"\n Create new MultiIndex from current that removes unused levels.\n\n Unused level(s) means levels that are not expressed in the\n labels. The resulting MultiIndex will have the same outward\n appearance, meaning the same .values and ordering. It will\n also be .equals() to the original.\n\n Returns\n -------\n MultiIndex\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_product([range(2), list('ab')])\n >>> mi\n MultiIndex([(0, 'a'),\n (0, 'b'),\n (1, 'a'),\n (1, 'b')],\n )\n\n >>> mi[2:]\n MultiIndex([(1, 'a'),\n (1, 'b')],\n )\n\n The 0 from the first level is not represented\n and can be removed\n\n >>> mi2 = mi[2:].remove_unused_levels()\n >>> mi2.levels\n FrozenList([[1], ['a', 'b']])\n \"\"\"\n new_levels = []\n new_codes = []\n\n changed = False\n for lev, level_codes in zip(self.levels, self.codes):\n\n # Since few levels are typically unused, bincount() is more\n # efficient than unique() - however it only accepts positive values\n # (and drops order):\n uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1\n has_na = int(len(uniques) and (uniques[0] == -1))\n\n if len(uniques) != len(lev) + has_na:\n\n if lev.isna().any() and len(uniques) == len(lev):\n break\n # We have unused levels\n changed = True\n\n # Recalculate uniques, now preserving order.\n # Can easily be cythonized by exploiting the already existing\n # \"uniques\" and stop parsing \"level_codes\" when all items\n # are found:\n uniques = algos.unique(level_codes)\n if has_na:\n na_idx = np.where(uniques == -1)[0]\n # Just ensure that -1 is in first position:\n uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]\n\n # codes get mapped from uniques to 0:len(uniques)\n # -1 (if present) is mapped to last position\n code_mapping = np.zeros(len(lev) + has_na)\n # ... and reassigned value -1:\n code_mapping[uniques] = np.arange(len(uniques)) - has_na\n\n level_codes = code_mapping[level_codes]\n\n # new levels are simple\n lev = lev.take(uniques[has_na:])\n\n new_levels.append(lev)\n new_codes.append(level_codes)\n\n result = self.view()\n\n if changed:\n result._reset_identity()\n result._set_levels(new_levels, validate=False)\n result._set_codes(new_codes, validate=False)\n\n return result\n\n # --------------------------------------------------------------------\n # Pickling Methods\n\n def __reduce__(self):\n \"\"\"Necessary for making this object picklable\"\"\"\n d = {\n \"levels\": list(self.levels),\n \"codes\": list(self.codes),\n \"sortorder\": self.sortorder,\n \"names\": list(self.names),\n }\n return ibase._new_Index, (type(self), d), None\n\n # --------------------------------------------------------------------\n\n def __getitem__(self, key):\n if is_scalar(key):\n key = com.cast_scalar_indexer(key, warn_float=True)\n\n retval = []\n for lev, level_codes in zip(self.levels, self.codes):\n if level_codes[key] == -1:\n retval.append(np.nan)\n else:\n retval.append(lev[level_codes[key]])\n\n return tuple(retval)\n else:\n # in general cannot be sure whether the result will be sorted\n sortorder = None\n if com.is_bool_indexer(key):\n key = np.asarray(key, dtype=bool)\n sortorder = self.sortorder\n elif isinstance(key, slice):\n if key.step is None or key.step > 0:\n sortorder = self.sortorder\n elif isinstance(key, Index):\n key = np.asarray(key)\n\n new_codes = [level_codes[key] for level_codes in self.codes]\n\n return MultiIndex(\n levels=self.levels,\n codes=new_codes,\n names=self.names,\n sortorder=sortorder,\n verify_integrity=False,\n )\n\n def _getitem_slice(self: MultiIndex, slobj: slice) -> MultiIndex:\n \"\"\"\n Fastpath for __getitem__ when we know we have a slice.\n \"\"\"\n sortorder = None\n if slobj.step is None or slobj.step > 0:\n sortorder = self.sortorder\n\n new_codes = [level_codes[slobj] for level_codes in self.codes]\n\n return type(self)(\n levels=self.levels,\n codes=new_codes,\n names=self._names,\n sortorder=sortorder,\n verify_integrity=False,\n )\n\n @Appender(_index_shared_docs[\"take\"] % _index_doc_kwargs)\n def take(\n self: MultiIndex,\n indices,\n axis: int = 0,\n allow_fill: bool = True,\n fill_value=None,\n **kwargs,\n ) -> MultiIndex:\n nv.validate_take((), kwargs)\n indices = ensure_platform_int(indices)\n\n # only fill if we are passing a non-None fill_value\n allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)\n\n na_value = -1\n\n taken = [lab.take(indices) for lab in self.codes]\n if allow_fill:\n mask = indices == -1\n if mask.any():\n masked = []\n for new_label in taken:\n label_values = new_label\n label_values[mask] = na_value\n masked.append(np.asarray(label_values))\n taken = masked\n\n return MultiIndex(\n levels=self.levels, codes=taken, names=self.names, verify_integrity=False\n )\n\n def append(self, other):\n \"\"\"\n Append a collection of Index options together\n\n Parameters\n ----------\n other : Index or list/tuple of indices\n\n Returns\n -------\n appended : Index\n \"\"\"\n if not isinstance(other, (list, tuple)):\n other = [other]\n\n if all(\n (isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other\n ):\n arrays = []\n for i in range(self.nlevels):\n label = self._get_level_values(i)\n appended = [o._get_level_values(i) for o in other]\n arrays.append(label.append(appended))\n return MultiIndex.from_arrays(arrays, names=self.names)\n\n to_concat = (self._values,) + tuple(k._values for k in other)\n new_tuples = np.concatenate(to_concat)\n\n # if all(isinstance(x, MultiIndex) for x in other):\n try:\n return MultiIndex.from_tuples(new_tuples, names=self.names)\n except (TypeError, IndexError):\n return Index(new_tuples)\n\n def argsort(self, *args, **kwargs) -> np.ndarray:\n return self._values.argsort(*args, **kwargs)\n\n @Appender(_index_shared_docs[\"repeat\"] % _index_doc_kwargs)\n def repeat(self, repeats: int, axis=None) -> MultiIndex:\n nv.validate_repeat((), {\"axis\": axis})\n # error: Incompatible types in assignment (expression has type \"ndarray\",\n # variable has type \"int\")\n repeats = ensure_platform_int(repeats) # type: ignore[assignment]\n return MultiIndex(\n levels=self.levels,\n codes=[\n level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)\n for level_codes in self.codes\n ],\n names=self.names,\n sortorder=self.sortorder,\n verify_integrity=False,\n )\n\n def drop(self, codes, level=None, errors=\"raise\"):\n \"\"\"\n Make new MultiIndex with passed list of codes deleted\n\n Parameters\n ----------\n codes : array-like\n Must be a list of tuples when level is not specified\n level : int or level name, default None\n errors : str, default 'raise'\n\n Returns\n -------\n dropped : MultiIndex\n \"\"\"\n if level is not None:\n return self._drop_from_level(codes, level, errors)\n\n if not isinstance(codes, (np.ndarray, Index)):\n try:\n codes = com.index_labels_to_array(codes, dtype=np.dtype(\"object\"))\n except ValueError:\n pass\n\n inds = []\n for level_codes in codes:\n try:\n loc = self.get_loc(level_codes)\n # get_loc returns either an integer, a slice, or a boolean\n # mask\n if isinstance(loc, int):\n inds.append(loc)\n elif isinstance(loc, slice):\n step = loc.step if loc.step is not None else 1\n inds.extend(range(loc.start, loc.stop, step))\n elif com.is_bool_indexer(loc):\n if self._lexsort_depth == 0:\n warnings.warn(\n \"dropping on a non-lexsorted multi-index \"\n \"without a level parameter may impact performance.\",\n PerformanceWarning,\n stacklevel=3,\n )\n loc = loc.nonzero()[0]\n inds.extend(loc)\n else:\n msg = f\"unsupported indexer of type {type(loc)}\"\n raise AssertionError(msg)\n except KeyError:\n if errors != \"ignore\":\n raise\n\n return self.delete(inds)\n\n def _drop_from_level(self, codes, level, errors=\"raise\") -> MultiIndex:\n codes = com.index_labels_to_array(codes)\n i = self._get_level_number(level)\n index = self.levels[i]\n values = index.get_indexer(codes)\n # If nan should be dropped it will equal -1 here. We have to check which values\n # are not nan and equal -1, this means they are missing in the index\n nan_codes = isna(codes)\n values[(np.equal(nan_codes, False)) & (values == -1)] = -2\n if index.shape[0] == self.shape[0]:\n values[np.equal(nan_codes, True)] = -2\n\n not_found = codes[values == -2]\n if len(not_found) != 0 and errors != \"ignore\":\n raise KeyError(f\"labels {not_found} not found in level\")\n mask = ~algos.isin(self.codes[i], values)\n\n return self[mask]\n\n def swaplevel(self, i=-2, j=-1) -> MultiIndex:\n \"\"\"\n Swap level i with level j.\n\n Calling this method does not change the ordering of the values.\n\n Parameters\n ----------\n i : int, str, default -2\n First level of index to be swapped. Can pass level name as string.\n Type of parameters can be mixed.\n j : int, str, default -1\n Second level of index to be swapped. Can pass level name as string.\n Type of parameters can be mixed.\n\n Returns\n -------\n MultiIndex\n A new MultiIndex.\n\n See Also\n --------\n Series.swaplevel : Swap levels i and j in a MultiIndex.\n Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a\n particular axis.\n\n Examples\n --------\n >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],\n ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])\n >>> mi\n MultiIndex([('a', 'bb'),\n ('a', 'aa'),\n ('b', 'bb'),\n ('b', 'aa')],\n )\n >>> mi.swaplevel(0, 1)\n MultiIndex([('bb', 'a'),\n ('aa', 'a'),\n ('bb', 'b'),\n ('aa', 'b')],\n )\n \"\"\"\n new_levels = list(self.levels)\n new_codes = list(self.codes)\n new_names = list(self.names)\n\n i = self._get_level_number(i)\n j = self._get_level_number(j)\n\n new_levels[i], new_levels[j] = new_levels[j], new_levels[i]\n new_codes[i], new_codes[j] = new_codes[j], new_codes[i]\n new_names[i], new_names[j] = new_names[j], new_names[i]\n\n return MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n\n def reorder_levels(self, order) -> MultiIndex:\n \"\"\"\n Rearrange levels using input order. May not drop or duplicate levels.\n\n Parameters\n ----------\n order : list of int or list of str\n List representing new level order. Reference level by number\n (position) or by key (label).\n\n Returns\n -------\n MultiIndex\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=['x', 'y'])\n >>> mi\n MultiIndex([(1, 3),\n (2, 4)],\n names=['x', 'y'])\n\n >>> mi.reorder_levels(order=[1, 0])\n MultiIndex([(3, 1),\n (4, 2)],\n names=['y', 'x'])\n\n >>> mi.reorder_levels(order=['y', 'x'])\n MultiIndex([(3, 1),\n (4, 2)],\n names=['y', 'x'])\n \"\"\"\n order = [self._get_level_number(i) for i in order]\n if len(order) != self.nlevels:\n raise AssertionError(\n f\"Length of order must be same as number of levels ({self.nlevels}), \"\n f\"got {len(order)}\"\n )\n new_levels = [self.levels[i] for i in order]\n new_codes = [self.codes[i] for i in order]\n new_names = [self.names[i] for i in order]\n\n return MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n\n def _get_codes_for_sorting(self) -> list[Categorical]:\n \"\"\"\n we are categorizing our codes by using the\n available categories (all, not just observed)\n excluding any missing ones (-1); this is in preparation\n for sorting, where we need to disambiguate that -1 is not\n a valid valid\n \"\"\"\n\n def cats(level_codes):\n return np.arange(\n np.array(level_codes).max() + 1 if len(level_codes) else 0,\n dtype=level_codes.dtype,\n )\n\n return [\n Categorical.from_codes(level_codes, cats(level_codes), ordered=True)\n for level_codes in self.codes\n ]\n\n def sortlevel(\n self, level=0, ascending: bool = True, sort_remaining: bool = True\n ) -> tuple[MultiIndex, np.ndarray]:\n \"\"\"\n Sort MultiIndex at the requested level.\n\n The result will respect the original ordering of the associated\n factor at that level.\n\n Parameters\n ----------\n level : list-like, int or str, default 0\n If a string is given, must be a name of the level.\n If list-like must be names or ints of levels.\n ascending : bool, default True\n False to sort in descending order.\n Can also be a list to specify a directed ordering.\n sort_remaining : sort by the remaining levels after level\n\n Returns\n -------\n sorted_index : pd.MultiIndex\n Resulting index.\n indexer : np.ndarray\n Indices of output values in original index.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([[0, 0], [2, 1]])\n >>> mi\n MultiIndex([(0, 2),\n (0, 1)],\n )\n\n >>> mi.sortlevel()\n (MultiIndex([(0, 1),\n (0, 2)],\n ), array([1, 0]))\n\n >>> mi.sortlevel(sort_remaining=False)\n (MultiIndex([(0, 2),\n (0, 1)],\n ), array([0, 1]))\n\n >>> mi.sortlevel(1)\n (MultiIndex([(0, 1),\n (0, 2)],\n ), array([1, 0]))\n\n >>> mi.sortlevel(1, ascending=False)\n (MultiIndex([(0, 2),\n (0, 1)],\n ), array([0, 1]))\n \"\"\"\n if isinstance(level, (str, int)):\n level = [level]\n level = [self._get_level_number(lev) for lev in level]\n sortorder = None\n\n # we have a directed ordering via ascending\n if isinstance(ascending, list):\n if not len(level) == len(ascending):\n raise ValueError(\"level must have same length as ascending\")\n\n indexer = lexsort_indexer(\n [self.codes[lev] for lev in level], orders=ascending\n )\n\n # level ordering\n else:\n\n codes = list(self.codes)\n shape = list(self.levshape)\n\n # partition codes and shape\n primary = tuple(codes[lev] for lev in level)\n primshp = tuple(shape[lev] for lev in level)\n\n # Reverse sorted to retain the order of\n # smaller indices that needs to be removed\n for lev in sorted(level, reverse=True):\n codes.pop(lev)\n shape.pop(lev)\n\n if sort_remaining:\n primary += primary + tuple(codes)\n primshp += primshp + tuple(shape)\n else:\n sortorder = level[0]\n\n indexer = indexer_from_factorized(primary, primshp, compress=False)\n\n if not ascending:\n indexer = indexer[::-1]\n\n indexer = ensure_platform_int(indexer)\n new_codes = [level_codes.take(indexer) for level_codes in self.codes]\n\n new_index = MultiIndex(\n codes=new_codes,\n levels=self.levels,\n names=self.names,\n sortorder=sortorder,\n verify_integrity=False,\n )\n\n return new_index, indexer\n\n def _wrap_reindex_result(self, target, indexer, preserve_names: bool):\n if not isinstance(target, MultiIndex):\n if indexer is None:\n target = self\n elif (indexer >= 0).all():\n target = self.take(indexer)\n else:\n try:\n target = MultiIndex.from_tuples(target)\n except TypeError:\n # not all tuples, see test_constructor_dict_multiindex_reindex_flat\n return target\n\n target = self._maybe_preserve_names(target, preserve_names)\n return target\n\n def _maybe_preserve_names(self, target: Index, preserve_names: bool):\n if (\n preserve_names\n and target.nlevels == self.nlevels\n and target.names != self.names\n ):\n target = target.copy(deep=False)\n target.names = self.names\n return target\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n def _check_indexing_error(self, key):\n if not is_hashable(key) or is_iterator(key):\n # We allow tuples if they are hashable, whereas other Index\n # subclasses require scalar.\n # We have to explicitly exclude generators, as these are hashable.\n raise InvalidIndexError(key)\n\n @cache_readonly\n def _should_fallback_to_positional(self) -> bool:\n \"\"\"\n Should integer key(s) be treated as positional?\n \"\"\"\n # GH#33355\n return self.levels[0]._should_fallback_to_positional\n\n def _get_values_for_loc(self, series: Series, loc, key):\n \"\"\"\n Do a positional lookup on the given Series, returning either a scalar\n or a Series.\n\n Assumes that `series.index is self`\n \"\"\"\n new_values = series._values[loc]\n if is_scalar(loc):\n return new_values\n\n if len(new_values) == 1 and not self.nlevels > 1:\n # If more than one level left, we can not return a scalar\n return new_values[0]\n\n new_index = self[loc]\n new_index = maybe_droplevels(new_index, key)\n new_ser = series._constructor(new_values, index=new_index, name=series.name)\n return new_ser.__finalize__(series)\n\n def _get_indexer_strict(self, key, axis_name: str) -> tuple[Index, np.ndarray]:\n\n keyarr = key\n if not isinstance(keyarr, Index):\n keyarr = com.asarray_tuplesafe(keyarr)\n\n if len(keyarr) and not isinstance(keyarr[0], tuple):\n indexer = self._get_indexer_level_0(keyarr)\n\n self._raise_if_missing(key, indexer, axis_name)\n return self[indexer], indexer\n\n return super()._get_indexer_strict(key, axis_name)\n\n def _raise_if_missing(self, key, indexer, axis_name: str):\n keyarr = key\n if not isinstance(key, Index):\n keyarr = com.asarray_tuplesafe(key)\n\n if len(keyarr) and not isinstance(keyarr[0], tuple):\n # i.e. same condition for special case in MultiIndex._get_indexer_strict\n\n mask = indexer == -1\n if mask.any():\n check = self.levels[0].get_indexer(keyarr)\n cmask = check == -1\n if cmask.any():\n raise KeyError(f\"{keyarr[cmask]} not in index\")\n # We get here when levels still contain values which are not\n # actually in Index anymore\n raise KeyError(f\"{keyarr} not in index\")\n else:\n return super()._raise_if_missing(key, indexer, axis_name)\n\n def _get_indexer_level_0(self, target) -> np.ndarray:\n \"\"\"\n Optimized equivalent to `self.get_level_values(0).get_indexer_for(target)`.\n \"\"\"\n lev = self.levels[0]\n codes = self._codes[0]\n cat = Categorical.from_codes(codes=codes, categories=lev)\n ci = Index(cat)\n return ci.get_indexer_for(target)\n\n def get_slice_bound(\n self, label: Hashable | Sequence[Hashable], side: str, kind: str | None = None\n ) -> int:\n \"\"\"\n For an ordered MultiIndex, compute slice bound\n that corresponds to given label.\n\n Returns leftmost (one-past-the-rightmost if `side=='right') position\n of given label.\n\n Parameters\n ----------\n label : object or tuple of objects\n side : {'left', 'right'}\n kind : {'loc', 'getitem', None}\n\n Returns\n -------\n int\n Index of label.\n\n Notes\n -----\n This method only works if level 0 index of the MultiIndex is lexsorted.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')])\n\n Get the locations from the leftmost 'b' in the first level\n until the end of the multiindex:\n\n >>> mi.get_slice_bound('b', side=\"left\")\n 1\n\n Like above, but if you get the locations from the rightmost\n 'b' in the first level and 'f' in the second level:\n\n >>> mi.get_slice_bound(('b','f'), side=\"right\")\n 3\n\n See Also\n --------\n MultiIndex.get_loc : Get location for a label or a tuple of labels.\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n \"\"\"\n if not isinstance(label, tuple):\n label = (label,)\n return self._partial_tup_index(label, side=side)\n\n def slice_locs(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n For an ordered MultiIndex, compute the slice locations for input\n labels.\n\n The input labels can be tuples representing partial levels, e.g. for a\n MultiIndex with 3 levels, you can pass a single value (corresponding to\n the first level), or a 1-, 2-, or 3-tuple.\n\n Parameters\n ----------\n start : label or tuple, default None\n If None, defaults to the beginning\n end : label or tuple\n If None, defaults to the end\n step : int or None\n Slice step\n kind : string, optional, defaults None\n\n Returns\n -------\n (start, end) : (int, int)\n\n Notes\n -----\n This method only works if the MultiIndex is properly lexsorted. So,\n if only the first 2 levels of a 3-level MultiIndex are lexsorted,\n you can only pass two levels to ``.slice_locs``.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],\n ... names=['A', 'B'])\n\n Get the slice locations from the beginning of 'b' in the first level\n until the end of the multiindex:\n\n >>> mi.slice_locs(start='b')\n (1, 4)\n\n Like above, but stop at the end of 'b' in the first level and 'f' in\n the second level:\n\n >>> mi.slice_locs(start='b', end=('b', 'f'))\n (1, 3)\n\n See Also\n --------\n MultiIndex.get_loc : Get location for a label or a tuple of labels.\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n \"\"\"\n # This function adds nothing to its parent implementation (the magic\n # happens in get_slice_bound method), but it adds meaningful doc.\n return super().slice_locs(start, end, step)\n\n def _partial_tup_index(self, tup: tuple, side=\"left\"):\n if len(tup) > self._lexsort_depth:\n raise UnsortedIndexError(\n f\"Key length ({len(tup)}) was greater than MultiIndex lexsort depth \"\n f\"({self._lexsort_depth})\"\n )\n\n n = len(tup)\n start, end = 0, len(self)\n zipped = zip(tup, self.levels, self.codes)\n for k, (lab, lev, level_codes) in enumerate(zipped):\n section = level_codes[start:end]\n\n if lab not in lev and not isna(lab):\n # short circuit\n try:\n loc = lev.searchsorted(lab, side=side)\n except TypeError as err:\n # non-comparable e.g. test_slice_locs_with_type_mismatch\n raise TypeError(f\"Level type mismatch: {lab}\") from err\n if not is_integer(loc):\n # non-comparable level, e.g. test_groupby_example\n raise TypeError(f\"Level type mismatch: {lab}\")\n if side == \"right\" and loc >= 0:\n loc -= 1\n return start + section.searchsorted(loc, side=side)\n\n idx = self._get_loc_single_level_index(lev, lab)\n if isinstance(idx, slice) and k < n - 1:\n # Get start and end value from slice, necessary when a non-integer\n # interval is given as input GH#37707\n start = idx.start\n end = idx.stop\n elif k < n - 1:\n end = start + section.searchsorted(idx, side=\"right\")\n start = start + section.searchsorted(idx, side=\"left\")\n elif isinstance(idx, slice):\n idx = idx.start\n return start + section.searchsorted(idx, side=side)\n else:\n return start + section.searchsorted(idx, side=side)\n\n def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:\n \"\"\"\n If key is NA value, location of index unify as -1.\n\n Parameters\n ----------\n level_index: Index\n key : label\n\n Returns\n -------\n loc : int\n If key is NA value, loc is -1\n Else, location of key in index.\n\n See Also\n --------\n Index.get_loc : The get_loc method for (single-level) index.\n \"\"\"\n if is_scalar(key) and isna(key):\n return -1\n else:\n return level_index.get_loc(key)\n\n def get_loc(self, key, method=None):\n \"\"\"\n Get location for a label or a tuple of labels.\n\n The location is returned as an integer/slice or boolean\n mask.\n\n Parameters\n ----------\n key : label or tuple of labels (one for each level)\n method : None\n\n Returns\n -------\n loc : int, slice object or boolean mask\n If the key is past the lexsort depth, the return may be a\n boolean mask array, otherwise it is always a slice or int.\n\n See Also\n --------\n Index.get_loc : The get_loc method for (single-level) index.\n MultiIndex.slice_locs : Get slice location given start label(s) and\n end label(s).\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n\n Notes\n -----\n The key cannot be a slice, list of same-level labels, a boolean mask,\n or a sequence of such. If you want to use those, use\n :meth:`MultiIndex.get_locs` instead.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])\n\n >>> mi.get_loc('b')\n slice(1, 3, None)\n\n >>> mi.get_loc(('b', 'e'))\n 1\n \"\"\"\n if method is not None:\n raise NotImplementedError(\n \"only the default get_loc method is \"\n \"currently supported for MultiIndex\"\n )\n\n hash(key)\n\n def _maybe_to_slice(loc):\n \"\"\"convert integer indexer to boolean mask or slice if possible\"\"\"\n if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:\n return loc\n\n loc = lib.maybe_indices_to_slice(loc, len(self))\n if isinstance(loc, slice):\n return loc\n\n mask = np.empty(len(self), dtype=\"bool\")\n mask.fill(False)\n mask[loc] = True\n return mask\n\n if not isinstance(key, tuple):\n loc = self._get_level_indexer(key, level=0)\n return _maybe_to_slice(loc)\n\n keylen = len(key)\n if self.nlevels < keylen:\n raise KeyError(\n f\"Key length ({keylen}) exceeds index depth ({self.nlevels})\"\n )\n\n if keylen == self.nlevels and self.is_unique:\n try:\n return self._engine.get_loc(key)\n except TypeError:\n # e.g. partial string slicing\n loc, _ = self.get_loc_level(key, list(range(self.nlevels)))\n return loc\n\n # -- partial selection or non-unique index\n # break the key into 2 parts based on the lexsort_depth of the index;\n # the first part returns a continuous slice of the index; the 2nd part\n # needs linear search within the slice\n i = self._lexsort_depth\n lead_key, follow_key = key[:i], key[i:]\n\n if not lead_key:\n start = 0\n stop = len(self)\n else:\n try:\n start, stop = self.slice_locs(lead_key, lead_key)\n except TypeError as err:\n # e.g. test_groupby_example key = ((0, 0, 1, 2), \"new_col\")\n # when self has 5 integer levels\n raise KeyError(key) from err\n\n if start == stop:\n raise KeyError(key)\n\n if not follow_key:\n return slice(start, stop)\n\n warnings.warn(\n \"indexing past lexsort depth may impact performance.\",\n PerformanceWarning,\n stacklevel=10,\n )\n\n loc = np.arange(start, stop, dtype=np.intp)\n\n for i, k in enumerate(follow_key, len(lead_key)):\n mask = self.codes[i][loc] == self._get_loc_single_level_index(\n self.levels[i], k\n )\n if not mask.all():\n loc = loc[mask]\n if not len(loc):\n raise KeyError(key)\n\n return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)\n\n def get_loc_level(self, key, level=0, drop_level: bool = True):\n \"\"\"\n Get location and sliced index for requested label(s)/level(s).\n\n Parameters\n ----------\n key : label or sequence of labels\n level : int/level name or list thereof, optional\n drop_level : bool, default True\n If ``False``, the resulting index will not drop any level.\n\n Returns\n -------\n loc : A 2-tuple where the elements are:\n Element 0: int, slice object or boolean array\n Element 1: The resulting sliced multiindex/index. If the key\n contains all levels, this will be ``None``.\n\n See Also\n --------\n MultiIndex.get_loc : Get location for a label or a tuple of labels.\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],\n ... names=['A', 'B'])\n\n >>> mi.get_loc_level('b')\n (slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))\n\n >>> mi.get_loc_level('e', level='B')\n (array([False, True, False]), Index(['b'], dtype='object', name='A'))\n\n >>> mi.get_loc_level(['b', 'e'])\n (1, None)\n \"\"\"\n if not isinstance(level, (list, tuple)):\n level = self._get_level_number(level)\n else:\n level = [self._get_level_number(lev) for lev in level]\n\n loc, mi = self._get_loc_level(key, level=level)\n if not drop_level:\n if lib.is_integer(loc):\n mi = self[loc : loc + 1]\n else:\n mi = self[loc]\n return loc, mi\n\n def _get_loc_level(self, key, level: int | list[int] = 0):\n \"\"\"\n get_loc_level but with `level` known to be positional, not name-based.\n \"\"\"\n\n # different name to distinguish from maybe_droplevels\n def maybe_mi_droplevels(indexer, levels):\n new_index = self[indexer]\n\n for i in sorted(levels, reverse=True):\n new_index = new_index._drop_level_numbers([i])\n\n return new_index\n\n if isinstance(level, (tuple, list)):\n if len(key) != len(level):\n raise AssertionError(\n \"Key for location must have same length as number of levels\"\n )\n result = None\n for lev, k in zip(level, key):\n loc, new_index = self._get_loc_level(k, level=lev)\n if isinstance(loc, slice):\n mask = np.zeros(len(self), dtype=bool)\n mask[loc] = True\n loc = mask\n result = loc if result is None else result & loc\n\n try:\n # FIXME: we should be only dropping levels on which we are\n # scalar-indexing\n mi = maybe_mi_droplevels(result, level)\n except ValueError:\n # droplevel failed because we tried to drop all levels,\n # i.e. len(level) == self.nlevels\n mi = self[result]\n\n return result, mi\n\n # kludge for #1796\n if isinstance(key, list):\n key = tuple(key)\n\n if isinstance(key, tuple) and level == 0:\n\n try:\n # Check if this tuple is a single key in our first level\n if key in self.levels[0]:\n indexer = self._get_level_indexer(key, level=level)\n new_index = maybe_mi_droplevels(indexer, [0])\n return indexer, new_index\n except (TypeError, InvalidIndexError):\n pass\n\n if not any(isinstance(k, slice) for k in key):\n\n if len(key) == self.nlevels and self.is_unique:\n # Complete key in unique index -> standard get_loc\n try:\n return (self._engine.get_loc(key), None)\n except KeyError as err:\n raise KeyError(key) from err\n except TypeError:\n # e.g. partial string indexing\n # test_partial_string_timestamp_multiindex\n pass\n\n # partial selection\n indexer = self.get_loc(key)\n ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]\n if len(ilevels) == self.nlevels:\n if is_integer(indexer):\n # we are dropping all levels\n return indexer, None\n\n # TODO: in some cases we still need to drop some levels,\n # e.g. test_multiindex_perf_warn\n # test_partial_string_timestamp_multiindex\n ilevels = [\n i\n for i in range(len(key))\n if (\n not isinstance(key[i], str)\n or not self.levels[i]._supports_partial_string_indexing\n )\n and key[i] != slice(None, None)\n ]\n if len(ilevels) == self.nlevels:\n # TODO: why?\n ilevels = []\n return indexer, maybe_mi_droplevels(indexer, ilevels)\n\n else:\n indexer = None\n for i, k in enumerate(key):\n if not isinstance(k, slice):\n loc_level = self._get_level_indexer(k, level=i)\n if isinstance(loc_level, slice):\n if com.is_null_slice(loc_level) or com.is_full_slice(\n loc_level, len(self)\n ):\n # everything\n continue\n else:\n # e.g. test_xs_IndexSlice_argument_not_implemented\n k_index = np.zeros(len(self), dtype=bool)\n k_index[loc_level] = True\n\n else:\n k_index = loc_level\n\n elif com.is_null_slice(k):\n # taking everything, does not affect `indexer` below\n continue\n\n else:\n # FIXME: this message can be inaccurate, e.g.\n # test_series_varied_multiindex_alignment\n raise TypeError(f\"Expected label or tuple of labels, got {key}\")\n\n if indexer is None:\n indexer = k_index\n else:\n indexer &= k_index\n if indexer is None:\n indexer = slice(None, None)\n ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]\n return indexer, maybe_mi_droplevels(indexer, ilevels)\n else:\n indexer = self._get_level_indexer(key, level=level)\n if (\n isinstance(key, str)\n and self.levels[level]._supports_partial_string_indexing\n ):\n # check to see if we did an exact lookup vs sliced\n check = self.levels[level].get_loc(key)\n if not is_integer(check):\n # e.g. test_partial_string_timestamp_multiindex\n return indexer, self[indexer]\n\n return indexer, maybe_mi_droplevels(indexer, [level])\n\n def _get_level_indexer(\n self, key, level: int = 0, indexer: Int64Index | None = None\n ):\n # `level` kwarg is _always_ positional, never name\n # return an indexer, boolean array or a slice showing where the key is\n # in the totality of values\n # if the indexer is provided, then use this\n\n level_index = self.levels[level]\n level_codes = self.codes[level]\n\n def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):\n # given the inputs and the codes/indexer, compute an indexer set\n # if we have a provided indexer, then this need not consider\n # the entire labels set\n if step is not None and step < 0:\n # Switch elements for negative step size\n start, stop = stop - 1, start - 1\n r = np.arange(start, stop, step)\n\n if indexer is not None and len(indexer) != len(codes):\n\n # we have an indexer which maps the locations in the labels\n # that we have already selected (and is not an indexer for the\n # entire set) otherwise this is wasteful so we only need to\n # examine locations that are in this set the only magic here is\n # that the result are the mappings to the set that we have\n # selected\n from pandas import Series\n\n mapper = Series(indexer)\n indexer = codes.take(ensure_platform_int(indexer))\n result = Series(Index(indexer).isin(r).nonzero()[0])\n m = result.map(mapper)\n # error: Incompatible types in assignment (expression has type\n # \"ndarray\", variable has type \"Series\")\n m = np.asarray(m) # type: ignore[assignment]\n\n else:\n # error: Incompatible types in assignment (expression has type\n # \"ndarray\", variable has type \"Series\")\n m = np.zeros(len(codes), dtype=bool) # type: ignore[assignment]\n m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True\n\n return m\n\n if isinstance(key, slice):\n # handle a slice, returning a slice if we can\n # otherwise a boolean indexer\n\n try:\n if key.start is not None:\n start = level_index.get_loc(key.start)\n else:\n start = 0\n if key.stop is not None:\n stop = level_index.get_loc(key.stop)\n elif isinstance(start, slice):\n stop = len(level_index)\n else:\n stop = len(level_index) - 1\n step = key.step\n except KeyError:\n\n # we have a partial slice (like looking up a partial date\n # string)\n start = stop = level_index.slice_indexer(key.start, key.stop, key.step)\n step = start.step\n\n if isinstance(start, slice) or isinstance(stop, slice):\n # we have a slice for start and/or stop\n # a partial date slicer on a DatetimeIndex generates a slice\n # note that the stop ALREADY includes the stopped point (if\n # it was a string sliced)\n start = getattr(start, \"start\", start)\n stop = getattr(stop, \"stop\", stop)\n return convert_indexer(start, stop, step)\n\n elif level > 0 or self._lexsort_depth == 0 or step is not None:\n # need to have like semantics here to right\n # searching as when we are using a slice\n # so include the stop+1 (so we include stop)\n return convert_indexer(start, stop + 1, step)\n else:\n # sorted, so can return slice object -> view\n i = level_codes.searchsorted(start, side=\"left\")\n j = level_codes.searchsorted(stop, side=\"right\")\n return slice(i, j, step)\n\n else:\n\n idx = self._get_loc_single_level_index(level_index, key)\n\n if level > 0 or self._lexsort_depth == 0:\n # Desired level is not sorted\n if isinstance(idx, slice):\n # test_get_loc_partial_timestamp_multiindex\n locs = (level_codes >= idx.start) & (level_codes < idx.stop)\n return locs\n\n locs = np.array(level_codes == idx, dtype=bool, copy=False)\n\n if not locs.any():\n # The label is present in self.levels[level] but unused:\n raise KeyError(key)\n return locs\n\n if isinstance(idx, slice):\n # e.g. test_partial_string_timestamp_multiindex\n start = level_codes.searchsorted(idx.start, side=\"left\")\n # NB: \"left\" here bc of slice semantics\n end = level_codes.searchsorted(idx.stop, side=\"left\")\n else:\n start = level_codes.searchsorted(idx, side=\"left\")\n end = level_codes.searchsorted(idx, side=\"right\")\n\n if start == end:\n # The label is present in self.levels[level] but unused:\n raise KeyError(key)\n return slice(start, end)\n\n def get_locs(self, seq):\n \"\"\"\n Get location for a sequence of labels.\n\n Parameters\n ----------\n seq : label, slice, list, mask or a sequence of such\n You should use one of the above for each level.\n If a level should not be used, set it to ``slice(None)``.\n\n Returns\n -------\n numpy.ndarray\n NumPy array of integers suitable for passing to iloc.\n\n See Also\n --------\n MultiIndex.get_loc : Get location for a label or a tuple of labels.\n MultiIndex.slice_locs : Get slice location given start label(s) and\n end label(s).\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])\n\n >>> mi.get_locs('b') # doctest: +SKIP\n array([1, 2], dtype=int64)\n\n >>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP\n array([1, 2], dtype=int64)\n\n >>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP\n array([2], dtype=int64)\n \"\"\"\n\n # must be lexsorted to at least as many levels\n true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]\n if true_slices and true_slices[-1] >= self._lexsort_depth:\n raise UnsortedIndexError(\n \"MultiIndex slicing requires the index to be lexsorted: slicing \"\n f\"on levels {true_slices}, lexsort depth {self._lexsort_depth}\"\n )\n\n n = len(self)\n # indexer is the list of all positions that we want to take; we\n # start with it being everything and narrow it down as we look at each\n # entry in `seq`\n indexer = Index(np.arange(n))\n\n def _convert_to_indexer(r) -> Int64Index:\n # return an indexer\n if isinstance(r, slice):\n m = np.zeros(n, dtype=bool)\n m[r] = True\n r = m.nonzero()[0]\n elif com.is_bool_indexer(r):\n if len(r) != n:\n raise ValueError(\n \"cannot index with a boolean indexer \"\n \"that is not the same length as the \"\n \"index\"\n )\n r = r.nonzero()[0]\n return Int64Index(r)\n\n def _update_indexer(idxr: Index, indexer: Index) -> Index:\n indexer_intersection = indexer.intersection(idxr)\n if indexer_intersection.empty and not idxr.empty and not indexer.empty:\n raise KeyError(seq)\n return indexer_intersection\n\n for i, k in enumerate(seq):\n\n if com.is_bool_indexer(k):\n # a boolean indexer, must be the same length!\n k = np.asarray(k)\n lvl_indexer = _convert_to_indexer(k)\n indexer = _update_indexer(lvl_indexer, indexer=indexer)\n\n elif is_list_like(k):\n # a collection of labels to include from this level (these\n # are or'd)\n\n indexers: Int64Index | None = None\n for x in k:\n try:\n # Argument \"indexer\" to \"_get_level_indexer\" of \"MultiIndex\"\n # has incompatible type \"Index\"; expected \"Optional[Int64Index]\"\n item_lvl_indexer = self._get_level_indexer(\n x, level=i, indexer=indexer # type: ignore[arg-type]\n )\n except KeyError:\n # ignore not founds; see discussion in GH#39424\n warnings.warn(\n \"The behavior of indexing on a MultiIndex with a nested \"\n \"sequence of labels is deprecated and will change in a \"\n \"future version. `series.loc[label, sequence]` will \"\n \"raise if any members of 'sequence' or not present in \"\n \"the index's second level. To retain the old behavior, \"\n \"use `series.index.isin(sequence, level=1)`\",\n # TODO: how to opt in to the future behavior?\n # TODO: how to handle IntervalIndex level? (no test cases)\n FutureWarning,\n stacklevel=7,\n )\n continue\n else:\n idxrs = _convert_to_indexer(item_lvl_indexer)\n\n if indexers is None:\n indexers = idxrs\n else:\n indexers = indexers.union(idxrs, sort=False)\n\n if indexers is not None:\n indexer = _update_indexer(indexers, indexer=indexer)\n else:\n # no matches we are done\n # test_loc_getitem_duplicates_multiindex_empty_indexer\n return np.array([], dtype=np.intp)\n\n elif com.is_null_slice(k):\n # empty slice\n pass\n\n elif isinstance(k, slice):\n\n # a slice, include BOTH of the labels\n # Argument \"indexer\" to \"_get_level_indexer\" of \"MultiIndex\" has\n # incompatible type \"Index\"; expected \"Optional[Int64Index]\"\n lvl_indexer = self._get_level_indexer(\n k,\n level=i,\n indexer=indexer, # type: ignore[arg-type]\n )\n indexer = _update_indexer(\n _convert_to_indexer(lvl_indexer),\n indexer=indexer,\n )\n else:\n # a single label\n lvl_indexer = self._get_loc_level(k, level=i)[0]\n indexer = _update_indexer(\n _convert_to_indexer(lvl_indexer),\n indexer=indexer,\n )\n\n # empty indexer\n if indexer is None:\n return np.array([], dtype=np.intp)\n\n assert isinstance(indexer, Int64Index), type(indexer)\n indexer = self._reorder_indexer(seq, indexer)\n\n return indexer._values.astype(np.intp, copy=False)\n\n # --------------------------------------------------------------------\n\n def _reorder_indexer(\n self,\n seq: tuple[Scalar | Iterable | AnyArrayLike, ...],\n indexer: Int64Index,\n ) -> Int64Index:\n \"\"\"\n Reorder an indexer of a MultiIndex (self) so that the label are in the\n same order as given in seq\n\n Parameters\n ----------\n seq : label/slice/list/mask or a sequence of such\n indexer: an Int64Index indexer of self\n\n Returns\n -------\n indexer : a sorted Int64Index indexer of self ordered as seq\n \"\"\"\n # If the index is lexsorted and the list_like label in seq are sorted\n # then we do not need to sort\n if self._is_lexsorted():\n need_sort = False\n for i, k in enumerate(seq):\n if is_list_like(k):\n if not need_sort:\n k_codes = self.levels[i].get_indexer(k)\n k_codes = k_codes[k_codes >= 0] # Filter absent keys\n # True if the given codes are not ordered\n need_sort = (k_codes[:-1] > k_codes[1:]).any()\n elif isinstance(k, slice) and k.step is not None and k.step < 0:\n need_sort = True\n # Bail out if both index and seq are sorted\n if not need_sort:\n return indexer\n\n n = len(self)\n keys: tuple[np.ndarray, ...] = ()\n # For each level of the sequence in seq, map the level codes with the\n # order they appears in a list-like sequence\n # This mapping is then use to reorder the indexer\n for i, k in enumerate(seq):\n if is_scalar(k):\n # GH#34603 we want to treat a scalar the same as an all equal list\n k = [k]\n if com.is_bool_indexer(k):\n new_order = np.arange(n)[indexer]\n elif is_list_like(k):\n # Generate a map with all level codes as sorted initially\n k = algos.unique(k)\n key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(\n self.levels[i]\n )\n # Set order as given in the indexer list\n level_indexer = self.levels[i].get_indexer(k)\n level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys\n key_order_map[level_indexer] = np.arange(len(level_indexer))\n\n new_order = key_order_map[self.codes[i][indexer]]\n elif isinstance(k, slice) and k.step is not None and k.step < 0:\n new_order = np.arange(n)[k][indexer]\n elif isinstance(k, slice) and k.start is None and k.stop is None:\n # slice(None) should not determine order GH#31330\n new_order = np.ones((n,))[indexer]\n else:\n # For all other case, use the same order as the level\n new_order = np.arange(n)[indexer]\n keys = (new_order,) + keys\n\n # Find the reordering using lexsort on the keys mapping\n ind = np.lexsort(keys)\n return indexer[ind]\n\n def truncate(self, before=None, after=None) -> MultiIndex:\n \"\"\"\n Slice index between two labels / tuples, return new MultiIndex\n\n Parameters\n ----------\n before : label or tuple, can be partial. Default None\n None defaults to start\n after : label or tuple, can be partial. Default None\n None defaults to end\n\n Returns\n -------\n truncated : MultiIndex\n \"\"\"\n if after and before and after < before:\n raise ValueError(\"after < before\")\n\n i, j = self.levels[0].slice_locs(before, after)\n left, right = self.slice_locs(before, after)\n\n new_levels = list(self.levels)\n new_levels[0] = new_levels[0][i:j]\n\n new_codes = [level_codes[left:right] for level_codes in self.codes]\n new_codes[0] = new_codes[0] - i\n\n return MultiIndex(\n levels=new_levels,\n codes=new_codes,\n names=self._names,\n verify_integrity=False,\n )\n\n def equals(self, other: object) -> bool:\n \"\"\"\n Determines if two MultiIndex objects have the same labeling information\n (the levels themselves do not necessarily have to be the same)\n\n See Also\n --------\n equal_levels\n \"\"\"\n if self.is_(other):\n return True\n\n if not isinstance(other, Index):\n return False\n\n if len(self) != len(other):\n return False\n\n if not isinstance(other, MultiIndex):\n # d-level MultiIndex can equal d-tuple Index\n if not self._should_compare(other):\n # object Index or Categorical[object] may contain tuples\n return False\n return array_equivalent(self._values, other._values)\n\n if self.nlevels != other.nlevels:\n return False\n\n for i in range(self.nlevels):\n self_codes = self.codes[i]\n other_codes = other.codes[i]\n self_mask = self_codes == -1\n other_mask = other_codes == -1\n if not np.array_equal(self_mask, other_mask):\n return False\n self_codes = self_codes[~self_mask]\n self_values = self.levels[i]._values.take(self_codes)\n\n other_codes = other_codes[~other_mask]\n other_values = other.levels[i]._values.take(other_codes)\n\n # since we use NaT both datetime64 and timedelta64 we can have a\n # situation where a level is typed say timedelta64 in self (IOW it\n # has other values than NaT) but types datetime64 in other (where\n # its all NaT) but these are equivalent\n if len(self_values) == 0 and len(other_values) == 0:\n continue\n\n if not array_equivalent(self_values, other_values):\n return False\n\n return True\n\n def equal_levels(self, other: MultiIndex) -> bool:\n \"\"\"\n Return True if the levels of both MultiIndex objects are the same\n\n \"\"\"\n if self.nlevels != other.nlevels:\n return False\n\n for i in range(self.nlevels):\n if not self.levels[i].equals(other.levels[i]):\n return False\n return True\n\n # --------------------------------------------------------------------\n # Set Methods\n\n def _union(self, other, sort) -> MultiIndex:\n other, result_names = self._convert_can_do_setop(other)\n if (\n any(-1 in code for code in self.codes)\n and any(-1 in code for code in self.codes)\n or self.has_duplicates\n or other.has_duplicates\n ):\n # This is only necessary if both sides have nans or one has dups,\n # fast_unique_multiple is faster\n result = super()._union(other, sort)\n else:\n rvals = other._values.astype(object, copy=False)\n result = lib.fast_unique_multiple([self._values, rvals], sort=sort)\n\n return MultiIndex.from_arrays(zip(*result), sortorder=0, names=result_names)\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n return is_object_dtype(dtype)\n\n def _get_reconciled_name_object(self, other) -> MultiIndex:\n \"\"\"\n If the result of a set operation will be self,\n return self, unless the names change, in which\n case make a shallow copy of self.\n \"\"\"\n names = self._maybe_match_names(other)\n if self.names != names:\n # Incompatible return value type (got \"Optional[MultiIndex]\", expected\n # \"MultiIndex\")\n return self.rename(names) # type: ignore[return-value]\n return self\n\n def _maybe_match_names(self, other):\n \"\"\"\n Try to find common names to attach to the result of an operation between\n a and b. Return a consensus list of names if they match at least partly\n or list of None if they have completely different names.\n \"\"\"\n if len(self.names) != len(other.names):\n return [None] * len(self.names)\n names = []\n for a_name, b_name in zip(self.names, other.names):\n if a_name == b_name:\n names.append(a_name)\n else:\n # TODO: what if they both have np.nan for their names?\n names.append(None)\n return names\n\n def _wrap_intersection_result(self, other, result):\n _, result_names = self._convert_can_do_setop(other)\n\n if len(result) == 0:\n return MultiIndex(\n levels=self.levels,\n codes=[[]] * self.nlevels,\n names=result_names,\n verify_integrity=False,\n )\n else:\n return MultiIndex.from_arrays(zip(*result), sortorder=0, names=result_names)\n\n def _wrap_difference_result(self, other, result):\n _, result_names = self._convert_can_do_setop(other)\n\n if len(result) == 0:\n return MultiIndex(\n levels=[[]] * self.nlevels,\n codes=[[]] * self.nlevels,\n names=result_names,\n verify_integrity=False,\n )\n else:\n return MultiIndex.from_tuples(result, sortorder=0, names=result_names)\n\n def _convert_can_do_setop(self, other):\n result_names = self.names\n\n if not isinstance(other, Index):\n\n if len(other) == 0:\n return self[:0], self.names\n else:\n msg = \"other must be a MultiIndex or a list of tuples\"\n try:\n other = MultiIndex.from_tuples(other, names=self.names)\n except (ValueError, TypeError) as err:\n # ValueError raised by tuples_to_object_array if we\n # have non-object dtype\n raise TypeError(msg) from err\n else:\n result_names = get_unanimous_names(self, other)\n\n return other, result_names\n\n # --------------------------------------------------------------------\n\n @doc(Index.astype)\n def astype(self, dtype, copy: bool = True):\n dtype = pandas_dtype(dtype)\n if is_categorical_dtype(dtype):\n msg = \"> 1 ndim Categorical are not supported at this time\"\n raise NotImplementedError(msg)\n elif not is_object_dtype(dtype):\n raise TypeError(\n \"Setting a MultiIndex dtype to anything other than object \"\n \"is not supported\"\n )\n elif copy is True:\n return self._view()\n return self\n\n def _validate_fill_value(self, item):\n if not isinstance(item, tuple):\n # Pad the key with empty strings if lower levels of the key\n # aren't specified:\n item = (item,) + (\"\",) * (self.nlevels - 1)\n elif len(item) != self.nlevels:\n raise ValueError(\"Item must have length equal to number of levels.\")\n return item\n\n def insert(self, loc: int, item) -> MultiIndex:\n \"\"\"\n Make new MultiIndex inserting new item at location\n\n Parameters\n ----------\n loc : int\n item : tuple\n Must be same length as number of levels in the MultiIndex\n\n Returns\n -------\n new_index : Index\n \"\"\"\n item = self._validate_fill_value(item)\n\n new_levels = []\n new_codes = []\n for k, level, level_codes in zip(item, self.levels, self.codes):\n if k not in level:\n # have to insert into level\n # must insert at end otherwise you have to recompute all the\n # other codes\n lev_loc = len(level)\n level = level.insert(lev_loc, k)\n else:\n lev_loc = level.get_loc(k)\n\n new_levels.append(level)\n new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))\n\n return MultiIndex(\n levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False\n )\n\n def delete(self, loc) -> MultiIndex:\n \"\"\"\n Make new index with passed location deleted\n\n Returns\n -------\n new_index : MultiIndex\n \"\"\"\n new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]\n return MultiIndex(\n levels=self.levels,\n codes=new_codes,\n names=self.names,\n verify_integrity=False,\n )\n\n @doc(Index.isin)\n def isin(self, values, level=None) -> np.ndarray:\n if level is None:\n values = MultiIndex.from_tuples(values, names=self.names)._values\n return algos.isin(self._values, values)\n else:\n num = self._get_level_number(level)\n levs = self.get_level_values(num)\n\n if levs.size == 0:\n return np.zeros(len(levs), dtype=np.bool_)\n return levs.isin(values)\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"names\"])\n def set_names(self, names, level=None, inplace: bool = False) -> MultiIndex | None:\n return super().set_names(names=names, level=level, inplace=inplace)\n\n rename = set_names\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\"])\n def drop_duplicates(self, keep: str | bool = \"first\") -> MultiIndex:\n return super().drop_duplicates(keep=keep)\n\n # ---------------------------------------------------------------\n # Arithmetic/Numeric Methods - Disabled\n\n __add__ = make_invalid_op(\"__add__\")\n __radd__ = make_invalid_op(\"__radd__\")\n __iadd__ = make_invalid_op(\"__iadd__\")\n __sub__ = make_invalid_op(\"__sub__\")\n __rsub__ = make_invalid_op(\"__rsub__\")\n __isub__ = make_invalid_op(\"__isub__\")\n __pow__ = make_invalid_op(\"__pow__\")\n __rpow__ = make_invalid_op(\"__rpow__\")\n __mul__ = make_invalid_op(\"__mul__\")\n __rmul__ = make_invalid_op(\"__rmul__\")\n __floordiv__ = make_invalid_op(\"__floordiv__\")\n __rfloordiv__ = make_invalid_op(\"__rfloordiv__\")\n __truediv__ = make_invalid_op(\"__truediv__\")\n __rtruediv__ = make_invalid_op(\"__rtruediv__\")\n __mod__ = make_invalid_op(\"__mod__\")\n __rmod__ = make_invalid_op(\"__rmod__\")\n __divmod__ = make_invalid_op(\"__divmod__\")\n __rdivmod__ = make_invalid_op(\"__rdivmod__\")\n # Unary methods disabled\n __neg__ = make_invalid_op(\"__neg__\")\n __pos__ = make_invalid_op(\"__pos__\")\n __abs__ = make_invalid_op(\"__abs__\")\n __inv__ = make_invalid_op(\"__inv__\")\n\n\ndef _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int:\n \"\"\"Count depth (up to a maximum of `nlevels`) with which codes are lexsorted.\"\"\"\n int64_codes = [ensure_int64(level_codes) for level_codes in codes]\n for k in range(nlevels, 0, -1):\n if libalgos.is_lexsorted(int64_codes[:k]):\n return k\n return 0\n\n\ndef sparsify_labels(label_list, start: int = 0, sentinel=\"\"):\n pivoted = list(zip(*label_list))\n k = len(label_list)\n\n result = pivoted[: start + 1]\n prev = pivoted[start]\n\n for cur in pivoted[start + 1 :]:\n sparse_cur = []\n\n for i, (p, t) in enumerate(zip(prev, cur)):\n if i == k - 1:\n sparse_cur.append(t)\n result.append(sparse_cur)\n break\n\n if p == t:\n sparse_cur.append(sentinel)\n else:\n sparse_cur.extend(cur[i:])\n result.append(sparse_cur)\n break\n\n prev = cur\n\n return list(zip(*result))\n\n\ndef _get_na_rep(dtype) -> str:\n return {np.datetime64: \"NaT\", np.timedelta64: \"NaT\"}.get(dtype, \"NaN\")\n\n\ndef maybe_droplevels(index: Index, key) -> Index:\n \"\"\"\n Attempt to drop level or levels from the given index.\n\n Parameters\n ----------\n index: Index\n key : scalar or tuple\n\n Returns\n -------\n Index\n \"\"\"\n # drop levels\n original_index = index\n if isinstance(key, tuple):\n for _ in key:\n try:\n index = index._drop_level_numbers([0])\n except ValueError:\n # we have dropped too much, so back out\n return original_index\n else:\n try:\n index = index._drop_level_numbers([0])\n except ValueError:\n pass\n\n return index\n\n\ndef _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:\n \"\"\"\n Coerce the array-like indexer to the smallest integer dtype that can encode all\n of the given categories.\n\n Parameters\n ----------\n array_like : array-like\n categories : array-like\n copy : bool\n\n Returns\n -------\n np.ndarray\n Non-writeable.\n \"\"\"\n array_like = coerce_indexer_dtype(array_like, categories)\n if copy:\n array_like = array_like.copy()\n array_like.flags.writeable = False\n return array_like\n\n\ndef _require_listlike(level, arr, arrname: str):\n \"\"\"\n Ensure that level is either None or listlike, and arr is list-of-listlike.\n \"\"\"\n if level is not None and not is_list_like(level):\n if not is_list_like(arr):\n raise TypeError(f\"{arrname} must be list-like\")\n if is_list_like(arr[0]):\n raise TypeError(f\"{arrname} must be list-like\")\n level = [level]\n arr = [arr]\n elif level is None or is_list_like(level):\n if not is_list_like(arr) or not is_list_like(arr[0]):\n raise TypeError(f\"{arrname} must be list of lists-like\")\n return level, arr\n" ]
[ [ "pandas.Series", "numpy.asarray", "pandas._libs.lib.tuples_to_object_array", "numpy.cumsum", "numpy.dtype", "pandas.core.indexes.base.Index", "pandas.core.indexes.frozen.FrozenList", "numpy.concatenate", "pandas._config.get_option", "numpy.all", "numpy.any", "pandas.core.sorting.get_group_index", "pandas.util._decorators.deprecate_nonkeyword_arguments", "numpy.where", "pandas.core.indexes.numeric.Int64Index", "pandas.compat.numpy.function.validate_take", "pandas.core.indexes.base.get_unanimous_names", "pandas.core.arrays.categorical.factorize_from_iterables", "numpy.arange", "pandas.core.algorithms.unique", "pandas.core.common.asarray_tuplesafe", "pandas._libs.lib.is_integer", "numpy.lexsort", "numpy.bitwise_or.reduce", "pandas.core.algorithms.factorize", "pandas.core.common.cast_scalar_indexer", "pandas._libs.lib.fast_unique_multiple", "pandas._libs.lib.fast_zip", "pandas._libs.hashtable.duplicated", "pandas.core.dtypes.common.ensure_int64", "pandas.core.dtypes.common.is_iterator", "pandas._libs.algos.is_lexsorted", "pandas.core.dtypes.cast.coerce_indexer_dtype", "pandas.core.algorithms.take_nd", "pandas.core.sorting.lexsort_indexer", "pandas.core.dtypes.common.is_categorical_dtype", "numpy.zeros", "pandas.core.ops.invalid.make_invalid_op", "pandas.core.dtypes.common.is_list_like", "pandas.errors.InvalidIndexError", "pandas.util._decorators.Appender", "pandas.core.dtypes.common.pandas_dtype", "pandas.core.common.is_true_slices", "pandas.core.dtypes.common.is_hashable", "pandas.core.indexes.base.ensure_index", "pandas.core.arrays.Categorical.from_codes", "numpy.delete", "pandas.core.sorting.indexer_from_factorized", "pandas.core.dtypes.common.ensure_platform_int", "numpy.append", "numpy.equal", "numpy.array", "pandas.core.common.index_labels_to_array", "pandas.io.formats.format.get_adjustment", "pandas.errors.UnsortedIndexError", "pandas.core.algorithms.isin", "pandas.core.common.is_bool_indexer", "numpy.array_equal", "pandas.core.dtypes.common.is_scalar", "pandas.core.dtypes.missing.array_equivalent", "pandas._libs.lib.to_object_array_tuples", "pandas.core.dtypes.common.is_integer", "pandas.core.common.is_null_slice", "numpy.ones", "pandas.io.formats.printing.pprint_thing", "pandas.core.dtypes.common.is_object_dtype", "numpy.bincount", "pandas.core.dtypes.missing.isna", "pandas.core.reshape.util.cartesian_product", "pandas.util._decorators.doc", "pandas.compat.numpy.function.validate_repeat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sourabhyadav/test_track
[ "d2b4813aaf45dd35db5de3036eda114ef14d5022", "d2b4813aaf45dd35db5de3036eda114ef14d5022" ]
[ "my_pose_track_v1-1.py", "my_pose_track_v3.py" ]
[ "#!/usr/bin/python\n# -*- coding:utf8 -*-\n\"\"\"\n Author: Haoming Chen\n E-mail: [email protected]\n Time: 2019/12/23\n Description: 使用gt信息\n\"\"\"\nimport time\nimport argparse\n\n# import vision essentials\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport logging\n# import Network\nfrom network_MSRA152 import Network\n# detector utils\nfrom detector.detector_yolov3 import * ##\n\n# pose estimation utils\nfrom HPE.dataset import Preprocessing\nfrom HPE.config import cfg\nfrom tfflat.base import Tester\nfrom tfflat.utils import mem_info\nfrom tfflat.logger import colorlogger\n# from nms.gpu_nms import gpu_nms\n# from nms.cpu_nms import cpu_nms\n\n# import GCN utils\nfrom graph import visualize_pose_matching\nfrom graph.visualize_pose_matching import *\n\n# import my own utils\nimport sys, os, time\n\nsys.path.append(os.path.abspath(\"./graph\"))\nsys.path.append(os.path.abspath(\"./utils\"))\nfrom utils_json import *\nfrom utils_io_file import *\nfrom utils_io_folder import *\nfrom visualizer import *\nfrom visualizer import visualizer\nfrom utils_choose import *\nimport logging\nfrom sheen import Str, ColoredHandler\nfrom my_toolbox.json_utils import *\nfrom my_toolbox.bipartite_graph import *\n\nfrom tqdm import tqdm\n\nflag_visualize = True\nflag_nms = False # Default is False, unless you know what you are doing\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n################\n##单纯为了Debug\nimage_crop_output_path = '/media/D/light-track/data/demo/crop'\nimage_seed_crop_output_path = '/media/D/light-track/data/demo/seed_crop'\ntracking_gt_info = []\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(ColoredHandler())\n\n\n################\n\ndef initialize_parameters():\n # global video_name, img_id\n\n global nms_method, nms_thresh, min_scores, min_box_size\n nms_method = 'nms'\n nms_thresh = 1.\n min_scores = 1e-10\n min_box_size = 0.\n\n global keyframe_interval, enlarge_scale, pose_matching_threshold\n keyframe_interval = 40 # choice examples: [2, 3, 5, 8, 10, 20, 40, 100, ....]\n\n enlarge_scale = 0.2 # how much to enlarge the bbox before pose estimation\n pose_matching_threshold = 0.5\n\n global flag_flip\n flag_flip = True\n\n global total_time_POSE_ESTIMATOR, total_time_POSE_SIMILARITY, total_time_DET, total_time_ALL, total_time_ASSOCIATE\n global total_num_FRAMES, total_num_PERSONS, total_num_VIDEOS\n total_time_POSE_ESTIMATOR = 0\n total_time_POSE_SIMILARITY = 0\n total_time_DET = 0\n total_time_ALL = 0\n total_time_ASSOCIATE = 0\n total_num_VIDEOS = 0\n total_num_FRAMES = 0\n total_num_PERSONS = 0\n\n \"\"\"test\"\"\"\n global filter_bbox_number, iou_alpha1, pose_alpha1\n filter_bbox_number = 0\n iou_alpha1 = 0.9\n pose_alpha1 = -1.1 # 求的是pose差异值,差异值越小表示越越相似。\n\n return\n\n\ndef light_track(pose_estimator,\n image_folder, output_json_path,\n visualize_folder, output_video_path, gt_info):\n global total_time_POSE_ESTIMATOR, total_time_POSE_SIMILARITY, total_time_DET, total_time_ALL, total_time_ASSOCIATE\n global video_name, iou_alpha1, pose_alpha1\n global filter_bbox_number, total_num_FRAMES, total_num_PERSONS, total_num_VIDEOS\n ''' 1. statistics: get total time for lighttrack processing'''\n st_time_total = time.time()\n\n bbox_dets_list_list = []\n keypoints_list_list = []\n\n num_imgs = len(gt_info)\n\n first_img_id = 0\n\n start_from_labeled = False\n if start_from_labeled:\n first_img_id = find_first_labeled_opensvai_json(gt_info)\n\n # last_gt_img_id = find_last_labeled_opensvai_json(gt_info)\n # num_imgs = last_gt_img_id + 1\n next_id = 0 # track_id 从0开始算\n img_id = first_img_id\n keypoints_number = 15\n total_num_FRAMES = num_imgs\n\n while img_id < num_imgs:\n\n img_gt_info = gt_info[img_id]\n image_name, labeled, candidates_info = read_image_data_opensvai_json(img_gt_info)\n img_path = os.path.join(image_folder, image_name)\n\n bbox_dets_list = [] # keyframe: start from empty\n keypoints_list = [] # keyframe: start from empty\n prev_frame_img_id = max(0, img_id - first_img_id - 1)\n if labeled and (img_id - first_img_id) % 5 == 0:\n logger.info(\"type:{},img_id:{}\".format('gt', img_id))\n # gt frame\n\n num_dets = len(candidates_info)\n\n if img_id == first_img_id:\n for det_id in range(num_dets):\n track_id, bbox_det, keypoints = get_candidate_info_opensvai_json(candidates_info, det_id)\n # first帧直接使用\n bbox_det_dict = {\"img_id\": img_id,\n \"det_id\": det_id,\n \"imgpath\": img_path,\n \"track_id\": track_id,\n \"bbox\": bbox_det}\n keypoints_dict = {\"img_id\": img_id,\n \"det_id\": det_id,\n \"imgpath\": img_path,\n \"track_id\": track_id,\n \"keypoints\": keypoints}\n bbox_dets_list.append(bbox_det_dict)\n keypoints_list.append(keypoints_dict)\n next_id = max(next_id, track_id)\n next_id += 1\n else: # Not First Frame\n bbox_list_prev_frame = bbox_dets_list_list[prev_frame_img_id].copy()\n keypoints_list_prev_frame = keypoints_list_list[prev_frame_img_id].copy()\n scores = np.zeros((num_dets, len(keypoints_list_prev_frame)))\n for det_id in range(num_dets):\n track_id, bbox_det, keypoints = get_candidate_info_opensvai_json(candidates_info, det_id)\n bbox_det_dict = {\"img_id\": img_id,\n \"det_id\": det_id,\n \"imgpath\": img_path,\n \"track_id\": None,\n \"bbox\": bbox_det}\n keypoints_dict = {\"img_id\": img_id,\n \"det_id\": det_id,\n \"imgpath\": img_path,\n \"track_id\": None,\n \"keypoints\": keypoints}\n # 计算当前帧的bbox和先前帧bboxes的分数\n for prev_det_id in range(len(keypoints_list_prev_frame)):\n prev_bbox_det_dict = bbox_list_prev_frame[prev_det_id]\n prev_keypoints_dict = keypoints_list_prev_frame[prev_det_id]\n iou_score = iou(bbox_det, prev_bbox_det_dict['bbox'], xyxy=False)\n if iou_score > 0.5:\n st_time_pose = time.time()\n # gt的点标的并不全,没有标注的数据c为0\n prev_keypoints = prev_keypoints_dict[\"keypoints\"].copy()\n for index, value in enumerate(keypoints[2::3]):\n if value == 0:\n prev_keypoints[index * 3:(index + 1) * 3] = 0, 0, 0\n\n pose_match_score = get_pose_matching_score(keypoints, prev_keypoints,\n bbox_det_dict['bbox'],\n prev_bbox_det_dict['bbox'])\n end_time_pose = time.time()\n total_time_POSE_SIMILARITY += (end_time_pose - st_time_pose)\n scores[det_id, prev_det_id] = iou_alpha1 * iou_score + pose_alpha1 * pose_match_score\n\n bbox_dets_list.append(bbox_det_dict)\n keypoints_list.append(keypoints_dict)\n st_time_ass = time.time()\n bbox_dets_list, keypoints_list, now_next_id = bipartite_graph_matching(bbox_dets_list,\n keypoints_list_prev_frame,\n scores, keypoints_list, next_id)\n end_time_ass = time.time()\n total_time_ASSOCIATE += (end_time_ass - st_time_ass)\n\n next_id = now_next_id\n\n # 这一帧没有一个保留下来的bbox\n if len(bbox_dets_list) == 0:\n bbox_det_dict = {\"img_id\": img_id,\n \"det_id\": 0,\n \"track_id\": None,\n \"imgpath\": img_path,\n \"bbox\": [0, 0, 2, 2]}\n bbox_dets_list.append(bbox_det_dict)\n\n keypoints_dict = {\"img_id\": img_id,\n \"det_id\": 0,\n \"track_id\": None,\n \"imgpath\": img_path,\n \"keypoints\": []}\n keypoints_list.append(keypoints_dict)\n\n bbox_dets_list_list.append(bbox_dets_list)\n keypoints_list_list.append(keypoints_list)\n\n else:\n logger.info(\"type:{},img_id:{}\".format('normal', img_id))\n ''' NOT GT Frame '''\n candidates_total = []\n st_time_DET = time.time()\n candidates_from_detector = inference_yolov3(img_path)\n end_time_DET = time.time()\n total_time_DET += (end_time_DET - st_time_DET)\n\n candidates_from_prev = []\n\n bbox_list_prev_frame = []\n ''' 根据先前帧的信息补充框 '''\n if img_id > first_img_id:\n bbox_list_prev_frame = bbox_dets_list_list[prev_frame_img_id].copy()\n keypoints_list_prev_frame = keypoints_list_list[prev_frame_img_id].copy()\n num_prev_bbox = len(bbox_list_prev_frame)\n for prev_det_id in range(num_prev_bbox):\n # obtain bbox position and track id\n keypoints = keypoints_list_prev_frame[prev_det_id]['keypoints']\n bbox_det_next = get_bbox_from_keypoints(keypoints)\n if bbox_invalid(bbox_det_next):\n continue\n # xywh\n candidates_from_prev.append(bbox_det_next)\n\n ''' 拿到本帧全部的候选框 '''\n candidates_total = candidates_from_detector + candidates_from_prev\n num_candidate = len(candidates_total)\n ''' 使用关节点的置信度来作为bbox的置信度 '''\n candidates_dets = []\n for candidate_id in range(num_candidate):\n bbox_det = candidates_total[candidate_id]\n bbox_det_dict = {\"img_id\": img_id,\n \"det_id\": candidate_id,\n \"imgpath\": img_path,\n \"track_id\": None,\n \"bbox\": bbox_det}\n st_time_pose = time.time()\n keypoints = inference_keypoints(pose_estimator, bbox_det_dict)[0]['keypoints']\n end_time_pose = time.time()\n total_time_POSE_ESTIMATOR += (end_time_pose - st_time_pose)\n bbox_det_next = xywh_to_x1y1x2y2(bbox_det)\n score = sum(keypoints[2::3]) / keypoints_number\n # 不知道为什么他这个pose的置信度会高于1\n if bbox_invalid(bbox_det_next) or score < 0.7:\n filter_bbox_number += 1\n continue\n candidate_det = bbox_det_next + [score]\n candidates_dets.append(candidate_det)\n keypoints_dict = {\"img_id\": img_id,\n \"det_id\": candidate_id,\n \"imgpath\": img_path,\n \"track_id\": None,\n \"keypoints\": keypoints}\n\n bbox_dets_list.append(bbox_det_dict)\n keypoints_list.append(keypoints_dict)\n # 根据bbox的置信度来使用nms\n keep = py_cpu_nms(np.array(candidates_dets, dtype=np.float32), 0.5) if len(candidates_dets) > 0 else []\n\n candidates_total = np.array(candidates_total)[keep]\n t = bbox_dets_list.copy()\n k = keypoints_list.copy()\n # 筛选过后的\n bbox_dets_list = [t[i] for i in keep]\n keypoints_list = [k[i] for i in keep]\n \"\"\" Data association \"\"\"\n cur_det_number = len(candidates_total)\n prev_det_number = len(bbox_list_prev_frame)\n if img_id == first_img_id or prev_det_number == 0:\n for det_id, bbox_det_dict in enumerate(bbox_dets_list):\n keypoints_dict = keypoints_list[det_id]\n bbox_det_dict['det_id'] = det_id\n keypoints_dict['det_id'] = det_id\n track_id = next_id\n bbox_det_dict['track_id'] = track_id\n keypoints_dict['track_id'] = track_id\n next_id = max(next_id, track_id)\n next_id += 1\n else:\n scores = np.zeros((cur_det_number, prev_det_number))\n for det_id in range(cur_det_number):\n bbox_det_dict = bbox_dets_list[det_id]\n keypoints_dict = keypoints_list[det_id]\n bbox_det = bbox_det_dict['bbox']\n keypoints = keypoints_dict['keypoints']\n\n # 计算当前帧的bbox和先前帧bboxes的分数\n for prev_det_id in range(prev_det_number):\n prev_bbox_det_dict = bbox_list_prev_frame[prev_det_id]\n prev_keypoints_dict = keypoints_list_prev_frame[prev_det_id]\n iou_score = iou(bbox_det, prev_bbox_det_dict['bbox'], xyxy=False)\n if iou_score > 0.5:\n st_time_pose = time.time()\n pose_match_score = get_pose_matching_score(keypoints, prev_keypoints_dict[\"keypoints\"],\n bbox_det_dict[\"bbox\"],\n prev_bbox_det_dict[\"bbox\"])\n end_time_pose = time.time()\n total_time_POSE_SIMILARITY += (end_time_pose - st_time_pose)\n scores[det_id, prev_det_id] = iou_alpha1 * iou_score + pose_alpha1 * pose_match_score\n\n st_time_ass = time.time()\n bbox_dets_list, keypoints_list, now_next_id = bipartite_graph_matching(bbox_dets_list,\n bbox_list_prev_frame, scores,\n keypoints_list, next_id)\n end_time_ass = time.time()\n total_time_ASSOCIATE += (end_time_ass - st_time_ass)\n\n next_id = now_next_id\n\n if len(bbox_dets_list) == 0:\n bbox_det_dict = {\"img_id\": img_id,\n \"det_id\": 0,\n \"track_id\": None,\n \"imgpath\": img_path,\n \"bbox\": [0, 0, 2, 2]}\n bbox_dets_list.append(bbox_det_dict)\n\n keypoints_dict = {\"img_id\": img_id,\n \"det_id\": 0,\n \"track_id\": None,\n \"imgpath\": img_path,\n \"keypoints\": []}\n keypoints_list.append(keypoints_dict)\n\n bbox_dets_list_list.append(bbox_dets_list)\n keypoints_list_list.append(keypoints_list)\n\n img_id += 1\n\n ''' 1. statistics: get total time for lighttrack processing'''\n end_time_total = time.time()\n total_time_ALL += (end_time_total - st_time_total)\n\n # convert results into openSVAI format\n print(\"Exporting Results in openSVAI Standard Json Format...\")\n poses_standard = pose_to_standard_mot(keypoints_list_list, bbox_dets_list_list)\n # json_str = python_to_json(poses_standard)\n # print(json_str)\n\n # output json file\n pose_json_folder, _ = get_parent_folder_from_path(output_json_path)\n create_folder(pose_json_folder)\n write_json_to_file(poses_standard, output_json_path)\n print(\"Json Export Finished!\")\n\n # visualization\n if flag_visualize is True:\n print(\"Visualizing Pose Tracking Results...\")\n create_folder(visualize_folder)\n visualizer.show_all_from_standard_json(output_json_path, classes, joint_pairs, joint_names,\n image_folder,\n visualize_folder,\n flag_track=True)\n print(\"Visualization Finished!\")\n\n img_paths = get_immediate_childfile_paths(visualize_folder)\n avg_fps = total_num_FRAMES / total_time_ALL\n # make_video_from_images(img_paths, output_video_path, fps=avg_fps, size=None, is_color=True, format=\"XVID\")\n\n fps = 5 # 25 原来\n visualizer.make_video_from_images(img_paths, output_video_path, fps=fps, size=None, is_color=True,\n format=\"XVID\")\n\n\ndef bipartite_graph_matching(current_bbox_dict_list, prev_bbox_dict_list, score_between_two_frames,\n current_keypoints_dict_list, next_id):\n prev_to_cur_match = Kuhn_Munkras_match(current_bbox_dict_list, prev_bbox_dict_list, score_between_two_frames)\n result_bbox_dict_list = []\n result_keypoints_dict_list = []\n det_number = 0\n assigned_cur_bbox = []\n for prev_index, cur_index in enumerate(prev_to_cur_match):\n if not np.isnan(cur_index):\n assigned_cur_bbox.append(cur_index)\n cur_index = int(cur_index)\n cur_bbox_dict = current_bbox_dict_list[cur_index]\n cur_keypoints_dict = current_keypoints_dict_list[cur_index]\n cur_bbox_dict['det_id'] = det_number\n cur_bbox_dict['track_id'] = prev_index\n cur_keypoints_dict['det_id'] = det_number\n cur_keypoints_dict['track_id'] = prev_index\n result_bbox_dict_list.append(cur_bbox_dict)\n result_keypoints_dict_list.append(cur_keypoints_dict)\n det_number += 1\n\n # 没有分配track_id的bbox,给其新的track_id\n for cur_index in range(len(current_bbox_dict_list)):\n if cur_index not in assigned_cur_bbox:\n cur_bbox_dict = current_bbox_dict_list[cur_index]\n cur_keypoints_dict = current_keypoints_dict_list[cur_index]\n cur_bbox_dict['det_id'] = det_number\n cur_bbox_dict['track_id'] = next_id\n cur_keypoints_dict['det_id'] = det_number\n cur_keypoints_dict['track_id'] = next_id\n result_bbox_dict_list.append(cur_bbox_dict)\n result_keypoints_dict_list.append(cur_keypoints_dict)\n det_number += 1\n next_id += 1\n\n return result_bbox_dict_list, result_keypoints_dict_list, next_id\n\n\ndef distance_between_two_boxs(boxA, boxB):\n x1, y1, _, _ = boxA\n x2, y2, _, _ = boxB\n distance = math.sqrt(math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2))\n return distance\n\n\ndef get_track_id_SGCN(bbox_cur_frame, bbox_list_prev_frame, keypoints_cur_frame,\n keypoints_list_prev_frame):\n assert (len(bbox_list_prev_frame) == len(keypoints_list_prev_frame))\n\n min_index = None\n min_matching_score = sys.maxsize\n global pose_matching_threshold\n # if track_id is still not assigned, the person is really missing or track is really lost\n track_id = -1\n\n for det_index, bbox_det_dict in enumerate(bbox_list_prev_frame):\n bbox_prev_frame = bbox_det_dict[\"bbox\"]\n\n # check the pose matching score\n keypoints_dict = keypoints_list_prev_frame[det_index]\n keypoints_prev_frame = keypoints_dict[\"keypoints\"]\n pose_matching_score = get_pose_matching_score(keypoints_cur_frame, keypoints_prev_frame,\n bbox_cur_frame,\n bbox_prev_frame)\n\n if pose_matching_score <= pose_matching_threshold and pose_matching_score <= min_matching_score:\n # match the target based on the pose matching score\n min_matching_score = pose_matching_score\n min_index = det_index\n\n if min_index is None:\n return -1, None\n else:\n track_id = bbox_list_prev_frame[min_index][\"track_id\"]\n return track_id, min_index\n\n\ndef get_track_id_SpatialConsistency(bbox_cur_frame, bbox_list_prev_frame):\n \"\"\" 用当前帧的bbox,去找之前帧中的bboxes的IOU值最大bbox。\n\n 使用一个bbox去前一帧找IOU值最大的。\n\n \"\"\"\n\n thresh = 0.3\n max_iou_score = 0\n max_index = -1\n\n for bbox_index, bbox_det_dict in enumerate(bbox_list_prev_frame):\n bbox_prev_frame = bbox_det_dict[\"bbox\"]\n\n boxA = xywh_to_x1y1x2y2(bbox_cur_frame)\n boxB = xywh_to_x1y1x2y2(bbox_prev_frame)\n iou_score = iou(boxA, boxB)\n if iou_score > max_iou_score:\n max_iou_score = iou_score\n max_index = bbox_index\n\n if max_iou_score > thresh:\n track_id = bbox_list_prev_frame[max_index][\"track_id\"]\n return track_id, max_index\n else:\n return -1, None\n\n\ndef get_pose_matching_score(keypoints_A, keypoints_B, bbox_A, bbox_B):\n if keypoints_A == [] or keypoints_B == []:\n print(\"graph not correctly generated!\")\n return sys.maxsize\n\n if bbox_invalid(bbox_A) or bbox_invalid(bbox_B):\n print(\"graph not correctly generated!\")\n return sys.maxsize\n\n graph_A, flag_pass_check = keypoints_to_graph(keypoints_A, bbox_A)\n if flag_pass_check is False:\n print(\"graph not correctly generated!\")\n return sys.maxsize\n\n graph_B, flag_pass_check = keypoints_to_graph(keypoints_B, bbox_B)\n if flag_pass_check is False:\n print(\"graph not correctly generated!\")\n return sys.maxsize\n\n sample_graph_pair = (graph_A, graph_B)\n data_A, data_B = graph_pair_to_data(sample_graph_pair)\n\n start = time.time()\n flag_match, dist = pose_matching(data_A, data_B)\n end = time.time()\n return dist\n\n\ndef is_target_lost(keypoints, method=\"max_average\"):\n num_keypoints = int(len(keypoints) / 3.0)\n if method == \"average\":\n # pure average\n score = 0\n for i in range(num_keypoints):\n score += keypoints[3 * i + 2]\n score /= num_keypoints * 1.0\n print(\"target_score: {}\".format(score))\n elif method == \"max_average\":\n score_list = keypoints[2::3]\n score_list_sorted = sorted(score_list)\n top_N = 4\n assert (top_N < num_keypoints)\n top_scores = [score_list_sorted[-i] for i in range(1, top_N + 1)]\n score = sum(top_scores) / top_N\n if score < 0.6:\n return True\n else:\n return False\n\n\ndef py_cpu_nms(dets, thresh):\n \"\"\"Pure Python NMS baseline.\"\"\"\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4] # bbox打分\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # 打分从大到小排列,取index\n order = scores.argsort()[::-1]\n # keep为最后保留的边框\n keep = []\n while order.size > 0:\n # order[0]是当前分数最大的窗口,肯定保留\n i = order[0]\n keep.append(i)\n # 计算窗口i与其他所有窗口的交叠部分的面积\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n # 交/并得到iou值\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n # inds为所有与窗口i的iou值小于threshold值的窗口的index,其他窗口此次都被窗口i吸收\n inds = np.where(ovr <= thresh)[0]\n # order里面只保留与窗口i交叠面积小于threshold的那些窗口,由于ovr长度比order长度少1(不包含i),所以inds+1对应到保留的窗口\n order = order[inds + 1]\n\n return keep\n\n\ndef iou(boxA, boxB, xyxy=True):\n # box: (x1, y1, x2, y2)\n # determine the (x, y)-coordinates of the intersection rectangle\n if not xyxy:\n # 如果是xy wh那么要转换数据 - xy是最小坐标\n b1_x1, b1_x2 = boxA[0], boxA[0] + boxA[2]\n b1_y1, b1_y2 = boxA[1], boxA[1] + boxA[3]\n b2_x1, b2_x2 = boxB[0], boxB[0] + boxB[2]\n b2_y1, b2_y2 = boxB[1], boxB[1] + boxB[3]\n xA = max(b1_x1, b2_x1)\n yA = max(b1_y1, b2_y1)\n xB = min(b1_x2, b2_x2)\n yB = min(b1_y2, b2_y2)\n else:\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n\n # compute the area of intersection rectangle\n interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)\n\n # compute the area of both the prediction and ground-truth\n # rectangles\n if not xyxy:\n boxAArea = (boxA[2] + 1) * (boxA[3] + 1)\n boxBArea = (boxB[2] + 1) * (boxB[3] + 1)\n else:\n boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1) # w×h\n boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1) # w×h\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = interArea / float(boxAArea + boxBArea - interArea)\n\n # return the intersection over union value\n return iou\n\n\ndef get_bbox_from_keypoints(keypoints_python_data):\n if keypoints_python_data == [] or keypoints_python_data == 45 * [0]:\n return [0, 0, 2, 2]\n\n num_keypoints = len(keypoints_python_data)\n x_list = []\n y_list = []\n for keypoint_id in range(int(num_keypoints / 3)):\n x = keypoints_python_data[3 * keypoint_id]\n y = keypoints_python_data[3 * keypoint_id + 1]\n vis = keypoints_python_data[3 * keypoint_id + 2] # 是否可见\n if vis != 0 and vis != 3:\n x_list.append(x)\n y_list.append(y)\n min_x = min(x_list)\n min_y = min(y_list)\n max_x = max(x_list)\n max_y = max(y_list)\n\n if not x_list or not y_list:\n return [0, 0, 2, 2]\n\n scale = enlarge_scale # enlarge bbox by 20% with same center position\n bbox = enlarge_bbox([min_x, min_y, max_x, max_y], scale)\n bbox_in_xywh = x1y1x2y2_to_xywh(bbox)\n return bbox_in_xywh\n\n\ndef enlarge_bbox(bbox, scale):\n assert (scale > 0)\n min_x, min_y, max_x, max_y = bbox\n margin_x = int(0.5 * scale * (max_x - min_x))\n margin_y = int(0.5 * scale * (max_y - min_y))\n if margin_x < 0: margin_x = 2\n if margin_y < 0: margin_y = 2\n\n min_x -= margin_x\n max_x += margin_x\n min_y -= margin_y\n max_y += margin_y\n\n width = max_x - min_x\n height = max_y - min_y\n if max_y < 0 or max_x < 0 or width <= 0 or height <= 0 or width > 2000 or height > 2000:\n min_x = 0\n max_x = 2\n min_y = 0\n max_y = 2\n\n bbox_enlarged = [min_x, min_y, max_x, max_y]\n return bbox_enlarged\n\n\ndef inference_keypoints(pose_estimator, test_data):\n cls_dets = test_data[\"bbox\"]\n # nms on the bboxes\n if flag_nms is True:\n cls_dets, keep = apply_nms(cls_dets, nms_method, nms_thresh)\n test_data = np.asarray(test_data)[keep]\n if len(keep) == 0:\n return -1\n else:\n test_data = [test_data]\n\n # crop and detect pose\n pose_heatmaps, details, cls_skeleton, crops, start_id, end_id = get_pose_from_bbox(pose_estimator,\n test_data,\n cfg)\n # get keypoint positions from pose\n keypoints = get_keypoints_from_pose(pose_heatmaps, details, cls_skeleton, crops, start_id, end_id)\n # dump results\n results = prepare_results(test_data[0], keypoints, cls_dets)\n return results\n\n\ndef apply_nms(cls_dets, nms_method, nms_thresh):\n # nms and filter\n keep = np.where((cls_dets[:, 4] >= min_scores) &\n ((cls_dets[:, 3] - cls_dets[:, 1]) * (\n cls_dets[:, 2] - cls_dets[:, 0]) >= min_box_size))[0]\n cls_dets = cls_dets[keep]\n if len(cls_dets) > 0:\n if nms_method == 'nms':\n keep = gpu_nms(cls_dets, nms_thresh)\n elif nms_method == 'soft':\n keep = cpu_soft_nms(np.ascontiguousarray(cls_dets, dtype=np.float32), method=2)\n else:\n assert False\n cls_dets = cls_dets[keep]\n return cls_dets, keep\n\n\ndef get_pose_from_bbox(pose_estimator, test_data, cfg):\n cls_skeleton = np.zeros(\n (len(test_data), cfg.nr_skeleton, 3)) # cfg.nr_skeleton=joint number. size=number*3\n crops = np.zeros((len(test_data), 4))\n\n batch_size = 1\n start_id = 0\n end_id = min(len(test_data), batch_size)\n\n test_imgs = []\n details = []\n for i in range(start_id, end_id):\n test_img, detail = Preprocessing(test_data[i], stage='test')\n test_imgs.append(test_img)\n details.append(detail)\n\n details = np.asarray(details)\n feed = test_imgs\n for i in range(end_id - start_id):\n ori_img = test_imgs[i][0].transpose(1, 2, 0)\n if flag_flip == True:\n flip_img = cv2.flip(ori_img, 1)\n feed.append(flip_img.transpose(2, 0, 1)[np.newaxis, ...])\n feed = np.vstack(feed)\n\n res = pose_estimator.predict_one([feed.transpose(0, 2, 3, 1).astype(np.float32)])[0]\n res = res.transpose(0, 3, 1, 2)\n\n if flag_flip == True:\n for i in range(end_id - start_id):\n fmp = res[end_id - start_id + i].transpose((1, 2, 0))\n fmp = cv2.flip(fmp, 1)\n fmp = list(fmp.transpose((2, 0, 1)))\n for (q, w) in cfg.symmetry:\n fmp[q], fmp[w] = fmp[w], fmp[q]\n fmp = np.array(fmp)\n res[i] += fmp\n res[i] /= 2\n\n pose_heatmaps = res\n return pose_heatmaps, details, cls_skeleton, crops, start_id, end_id\n\n\ndef get_keypoints_from_pose(pose_heatmaps, details, cls_skeleton, crops, start_id, end_id):\n res = pose_heatmaps\n for test_image_id in range(start_id, end_id):\n r0 = res[test_image_id - start_id].copy()\n r0 /= 255.\n r0 += 0.5\n\n for w in range(cfg.nr_skeleton):\n res[test_image_id - start_id, w] /= np.amax(res[test_image_id - start_id, w])\n\n border = 10\n dr = np.zeros(\n (cfg.nr_skeleton, cfg.output_shape[0] + 2 * border, cfg.output_shape[1] + 2 * border))\n dr[:, border:-border, border:-border] = res[test_image_id - start_id][:cfg.nr_skeleton].copy()\n\n for w in range(cfg.nr_skeleton):\n dr[w] = cv2.GaussianBlur(dr[w], (21, 21), 0)\n\n for w in range(cfg.nr_skeleton):\n lb = dr[w].argmax()\n y, x = np.unravel_index(lb, dr[w].shape)\n dr[w, y, x] = 0\n lb = dr[w].argmax()\n py, px = np.unravel_index(lb, dr[w].shape)\n y -= border\n x -= border\n py -= border + y\n px -= border + x\n ln = (px ** 2 + py ** 2) ** 0.5\n delta = 0.25\n if ln > 1e-3:\n x += delta * px / ln\n y += delta * py / ln\n x = max(0, min(x, cfg.output_shape[1] - 1))\n y = max(0, min(y, cfg.output_shape[0] - 1))\n cls_skeleton[test_image_id, w, :2] = (x * 4 + 2, y * 4 + 2)\n cls_skeleton[test_image_id, w, 2] = r0[w, int(round(y) + 1e-10), int(round(x) + 1e-10)]\n\n # map back to original images\n crops[test_image_id, :] = details[test_image_id - start_id, :]\n for w in range(cfg.nr_skeleton):\n cls_skeleton[test_image_id, w, 0] = cls_skeleton[test_image_id, w, 0] / cfg.data_shape[\n 1] * (crops[test_image_id][2] - crops[test_image_id][0]) + crops[test_image_id][0]\n cls_skeleton[test_image_id, w, 1] = cls_skeleton[test_image_id, w, 1] / cfg.data_shape[\n 0] * (crops[test_image_id][3] - crops[test_image_id][1]) + crops[test_image_id][1]\n return cls_skeleton\n\n\ndef prepare_results(test_data, cls_skeleton, cls_dets):\n cls_partsco = cls_skeleton[:, :, 2].copy().reshape(-1, cfg.nr_skeleton)\n\n cls_scores = 1\n dump_results = []\n cls_skeleton = np.concatenate(\n [cls_skeleton.reshape(-1, cfg.nr_skeleton * 3),\n (cls_scores * cls_partsco.mean(axis=1))[:, np.newaxis]],\n axis=1)\n for i in range(len(cls_skeleton)):\n result = dict(image_id=test_data['img_id'],\n category_id=1,\n score=float(round(cls_skeleton[i][-1], 4)),\n keypoints=cls_skeleton[i][:-1].round(3).tolist())\n dump_results.append(result)\n return dump_results\n\n\ndef pose_to_standard_mot(keypoints_list_list, dets_list_list):\n openSVAI_python_data_list = []\n\n num_keypoints_list = len(keypoints_list_list)\n num_dets_list = len(dets_list_list)\n assert (num_keypoints_list == num_dets_list)\n\n for i in range(num_dets_list):\n\n dets_list = dets_list_list[i]\n keypoints_list = keypoints_list_list[i]\n\n if dets_list == []:\n continue\n img_path = dets_list[0][\"imgpath\"]\n img_folder_path = os.path.dirname(img_path)\n img_name = os.path.basename(img_path)\n img_info = {\"folder\": img_folder_path,\n \"name\": img_name,\n \"id\": [int(i)]}\n openSVAI_python_data = {\"image\": [], \"candidates\": []}\n openSVAI_python_data[\"image\"] = img_info\n\n num_dets = len(dets_list)\n num_keypoints = len(\n keypoints_list) # number of persons, not number of keypoints for each person\n candidate_list = []\n\n for j in range(num_dets):\n keypoints_dict = keypoints_list[j]\n dets_dict = dets_list[j]\n img_id = keypoints_dict[\"img_id\"]\n det_id = keypoints_dict[\"det_id\"]\n track_id = keypoints_dict[\"track_id\"]\n img_path = keypoints_dict[\"imgpath\"]\n\n bbox_dets_data = dets_list[det_id]\n det = dets_dict[\"bbox\"]\n if det == [0, 0, 2, 2]:\n # do not provide keypoints\n candidate = {\"det_bbox\": [0, 0, 2, 2],\n \"det_score\": 0}\n else:\n bbox_in_xywh = det[0:4]\n keypoints = keypoints_dict[\"keypoints\"]\n\n track_score = sum(keypoints[2::3]) / len(keypoints) / 3.0\n\n candidate = {\"det_bbox\": bbox_in_xywh,\n \"det_score\": 1,\n \"track_id\": track_id,\n \"track_score\": track_score,\n \"pose_keypoints_2d\": keypoints}\n candidate_list.append(candidate)\n openSVAI_python_data[\"candidates\"] = candidate_list\n openSVAI_python_data_list.append(openSVAI_python_data)\n return openSVAI_python_data_list\n\n\ndef x1y1x2y2_to_xywh(det):\n x1, y1, x2, y2 = det\n w, h = int(x2) - int(x1), int(y2) - int(y1)\n return [x1, y1, w, h]\n\n\ndef xywh_to_x1y1x2y2(det):\n x1, y1, w, h = det\n x2, y2 = x1 + w, y1 + h\n return [x1, y1, x2, y2]\n\n\ndef bbox_invalid(bbox):\n if bbox == [0, 0, 2, 2]:\n return True\n if bbox[2] <= 0 or bbox[3] <= 0 or bbox[2] > 2000 or bbox[3] > 2000:\n return True\n return False\n\n\ndef non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):\n \"\"\"\n Removes detections with lower object confidence score than 'conf_thres' and performs\n Non-Maximum Suppression to further filter detections.\n Returns detections with shape:\n (x1, y1, x2, y2, object_conf, class_score, class_pred)\n \"\"\"\n # prediction [image_number,]\n # From (center x, center y, width, height) to (x1, y1, x2, y2)\n prediction[..., :4] = xywh2xyxy(prediction[..., :4]) # 前四位\n output = [None for _ in range(len(prediction))]\n for image_i, image_pred in enumerate(prediction):\n # Filter out confidence scores below threshold\n image_pred = image_pred[image_pred[:, 4] >= conf_thres]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Object confidence times class confidence\n score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0] # 1是维度\n # Sort by it\n image_pred = image_pred[(-score).argsort()] # 将image_pred根据score排序,score越大的预测,排在越前面。\n class_preds = image_pred[:, 5:].max(1, keepdim=True)[1].float() # keepdim=True shape : [...,1]\n detections = torch.cat((image_pred[:, :5], class_preds), 1) # 按列拼,直接拼成它的第5个值。\n # Perform non-maximum suppression\n keep_boxes = []\n while detections.size(0):\n # 所有的候选跟置信度最大的比较(也会和它自己比较)\n large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres\n label_match = detections[0, -1] == detections[:, -1]\n # Indices of boxes with lower confidence scores, large IOUs and matching labels\n invalid = large_overlap & label_match\n weights = detections[invalid, 4:5]\n # Merge overlapping bboxes by order of confidence\n detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()\n keep_boxes += [detections[0]]\n detections = detections[~invalid]\n if keep_boxes:\n output[image_i] = torch.stack(keep_boxes)\n return output\n\n\nif __name__ == '__main__':\n\n global args\n parser = argparse.ArgumentParser()\n # parser.add_argument('--video_path', '-v', type=str, dest='video_path',\n # # default=\"data/demo/video.mp4\")\n # default=\"data/demo/0003.m4\")\n # parser.add_argument('--images_path', '-i', type=str, dest='images_path',\n # default=\"data/demo/mpii-video-pose/0001\")\n # parser.add_argument('--model', '-m', type=str, dest='test_model',\n # default=\"weights/mobile-deconv/snapshot_296.ckpt\")\n # parser.add_argument('--model', '-m', type=str, dest='test_model',\n # default=\"weights/CPN101/CPN_snapshot_293.ckpt\")\n parser.add_argument('--model', '-m', type=str, dest='test_model',\n default=\"weights/MSRA152/MSRA_snapshot_285.ckpt\")\n # default=\"weights/mobile-deconv/snapshot_296.ckpt\")\n parser.add_argument('--train', type=bool, dest='train',\n default=True)\n # parser.add_argument('--exp_number', type=str, dest='exp_number', default='2017-val',\n # help='number of experiment')\n parser.add_argument('--exp_number', type=str, dest='exp_number', default='test_one_video',\n help='number of experiment')\n args = parser.parse_args()\n args.bbox_thresh = 0.4\n\n # initialize pose estimator\n initialize_parameters()\n pose_estimator = Tester(Network(), cfg)\n pose_estimator.load_weights(args.test_model)\n\n train = args.train\n exp_number = args.exp_number\n\n ##################################\n test_one_video = False\n # exp_number = \"test_one_video_MSRA152\"\n val = True\n exp_number = \"2017-val-iou-{}-pose{}-together-MSRA152\".format(iou_alpha1, pose_alpha1)\n test = False\n # exp_number = \"2017-test-iou-pose-together\"\n experiment_output_root = '/media/F'\n visualize_root_folder = \"{}/exp_{}/visualize\".format(experiment_output_root, exp_number)\n output_video_folder = \"{}/exp_{}/videos\".format(experiment_output_root, exp_number)\n output_json_folder = \"{}/exp_{}/jsons\".format(experiment_output_root, exp_number)\n evaluation_folder = \"{}/exp_{}/evaluation\".format(experiment_output_root, exp_number)\n logger_file_foler = \"{}/exp_{}/log\".format(experiment_output_root, exp_number)\n\n create_folder(output_video_folder)\n create_folder(output_json_folder)\n create_folder(logger_file_foler)\n\n create_folder(evaluation_folder)\n create_folder(os.path.join(evaluation_folder, \"annotations\", \"val\"))\n create_folder(os.path.join(evaluation_folder, \"out\"))\n create_folder(os.path.join(evaluation_folder, \"posetrack_results\"))\n ## save log file\n logging.basicConfig(format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n level=logging.DEBUG,\n filename=os.path.join(logger_file_foler, 'experiment.log'),\n filemode='a')\n ####################################\n\n logger.info(\" test_one_video:{} val:{} test:{} \".format(test_one_video, val, test))\n\n \"\"\" 每个JSON文件为一个视频,读取一个个的JSON文件,产生结果 \"\"\"\n if test_one_video:\n numbers = ['24642', '24635', '23699', '23695', '23484', '23471', '23416', '22682', '22671', '22642', '22124',\n '00043', '00096']\n posetrack_dataset_path = \"/media/D/DataSet/PoseTrack/PoseTrack2017/posetrack_data\"\n numbers = ['23699']\n input_jsons = [\"/media/D/DataSet/PoseTrack2017/train/{}_mpii_relpath_5sec_trainsub_OpenSVAI.json\".format(number)\n for\n number in numbers]\n frame_number = 0\n videos_number = len(input_jsons)\n for input_json in tqdm(input_jsons):\n videos_json_data, videos_number = read_opensvai_json(input_json)\n for video_seq_id in range(videos_number):\n video_json_data = videos_json_data[video_seq_id]\n video_path, video_name = read_video_data_opensvai_json(video_json_data)\n image_folder = os.path.join(posetrack_dataset_path, video_path)\n visualize_folder = os.path.join(visualize_root_folder, video_name)\n output_video_path = os.path.join(output_video_folder, \"{}_out.mp4\".format(video_name))\n output_json_path = os.path.join(output_json_folder, \"{}.json\".format(video_name))\n create_folder(visualize_folder)\n frame_number += len(video_json_data)\n light_track(pose_estimator, image_folder, output_json_path, visualize_folder, output_video_path,\n video_json_data)\n logger.info(\"videos_number:{}\".format(videos_number))\n logger.info(\"frames_number:{}\".format(frame_number))\n\n \"\"\" The PoseTrack2017 validation set \"\"\"\n if val:\n input_jsons_folder = \"/media/D/DataSet/PoseTrack2017/val/\"\n posetrack_dataset_path = \"/media/D/DataSet/PoseTrack/PoseTrack2017/posetrack_data\"\n val_jsons = os.listdir(input_jsons_folder)\n frame_number = 0\n videos_number = len(val_jsons)\n for json in val_jsons:\n videos_json_data, _ = read_opensvai_json(os.path.join(input_jsons_folder, json))\n assert len(videos_json_data) == 1\n video_json_data = videos_json_data[0]\n video_path, video_name = read_video_data_opensvai_json(video_json_data)\n image_folder = os.path.join(posetrack_dataset_path, video_path)\n visualize_folder = os.path.join(visualize_root_folder, video_name)\n output_video_path = os.path.join(output_video_folder, \"{}_out.mp4\".format(video_name))\n output_json_path = os.path.join(output_json_folder, \"{}.json\".format(video_name))\n create_folder(visualize_folder)\n frame_number += len(video_json_data)\n light_track(pose_estimator, image_folder, output_json_path, visualize_folder, output_video_path,\n video_json_data)\n\n logger.info(\"videos_number:{}\".format(videos_number))\n logger.info(\"frames_number:{}\".format(frame_number))\n \"\"\" The PoseTrack2017 test set \"\"\"\n if test:\n input_jsons_folder = \"/media/D/DataSet/PoseTrack2017/test/\"\n posetrack_dataset_path = \"/media/D/DataSet/PoseTrack/PoseTrack2017/posetrack_data\"\n val_jsons = os.listdir(input_jsons_folder)\n frame_number = 0\n videos_number = len(val_jsons)\n for json in val_jsons:\n videos_json_data, _ = read_opensvai_json(os.path.join(input_jsons_folder, json))\n assert len(videos_json_data) == 1\n video_json_data = videos_json_data['annolist']\n video_path, video_name = read_video_data_opensvai_json(video_json_data)\n image_folder = os.path.join(posetrack_dataset_path, video_path)\n visualize_folder = os.path.join(visualize_root_folder, video_name)\n output_video_path = os.path.join(output_video_folder, \"{}_out.mp4\".format(video_name))\n output_json_path = os.path.join(output_json_folder, \"{}.json\".format(video_name))\n create_folder(visualize_folder)\n frame_number += len(video_json_data)\n light_track(pose_estimator, image_folder, output_json_path, visualize_folder, output_video_path,\n video_json_data)\n\n logger.info(\"videos_number:{}\".format(videos_number))\n logger.info(\"frames_number:{}\".format(frame_number))\n\n ''' Display statistics '''\n logger.info(\"total_time_ALL: {:.2f}s\".format(total_time_ALL))\n logger.info(\"total_time_DET: {:.2f}s\".format(total_time_DET))\n logger.info(\"total_time_POSE_ESTIMATOR: {:.2f}s\".format(total_time_POSE_ESTIMATOR))\n logger.info(\"total_time_POSE_SIMILARITY: {:.2f}s\".format(total_time_POSE_SIMILARITY))\n logger.info(\"total_time_ASSOCIATE: {:.2f}s\".format(total_time_ASSOCIATE))\n logger.info(\"total_time_LIGHTTRACK: {:.2f}s\".format(\n total_time_ALL - total_time_DET - total_time_POSE_ESTIMATOR - total_time_POSE_SIMILARITY - total_time_ASSOCIATE))\n logger.info(\"filter_bbox_number:{}\".format(filter_bbox_number))\n", "#!/usr/bin/python\n# -*- coding:utf8 -*-\n\"\"\"\n Author: Haoming Chen\n E-mail: [email protected]\n Time: 2020/01/13\n Description: 利用未来帧gt的信息,从未来回到过去进行矫正。\n\"\"\"\nimport time\nimport argparse\n\n# import vision essentials\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport logging\n# import Network\nfrom network_MSRA152 import Network\n# detector utils\nfrom detector.detector_yolov3 import * ##\n\n# pose estimation utils\nfrom HPE.dataset import Preprocessing\nfrom HPE.config import cfg\nfrom tfflat.base import Tester\nfrom tfflat.utils import mem_info\nfrom tfflat.logger import colorlogger\n# from nms.gpu_nms import gpu_nms\n# from nms.cpu_nms import cpu_nms\n\n# import GCN utils\nfrom graph import visualize_pose_matching\nfrom graph.visualize_pose_matching import *\n\n# import my own utils\nimport sys, os, time\n\nsys.path.append(os.path.abspath(\"./graph\"))\nsys.path.append(os.path.abspath(\"./utils\"))\nfrom utils_json import *\nfrom utils_io_file import *\nfrom utils_io_folder import *\nfrom visualizer import *\nfrom visualizer import visualizer\nfrom utils_choose import *\nimport logging\nfrom sheen import Str, ColoredHandler\nfrom my_toolbox.json_utils import *\nfrom my_toolbox.bipartite_graph import *\n\nfrom tqdm import tqdm\n\nflag_visualize = True\nflag_nms = False # Default is False, unless you know what you are doing\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n################\n##单纯为了Debug\nimage_crop_output_path = '/media/D/light-track/data/demo/crop'\nimage_seed_crop_output_path = '/media/D/light-track/data/demo/seed_crop'\ntracking_gt_info = []\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(ColoredHandler())\n\n\n################\n\ndef initialize_parameters():\n # global video_name, img_id\n\n global nms_method, nms_thresh, min_scores, min_box_size\n nms_method = 'nms'\n nms_thresh = 1.\n min_scores = 1e-10\n min_box_size = 0.\n\n global keyframe_interval, enlarge_scale, pose_matching_threshold\n keyframe_interval = 40 # choice examples: [2, 3, 5, 8, 10, 20, 40, 100, ....]\n\n enlarge_scale = 0.2 # how much to enlarge the bbox before pose estimation\n pose_matching_threshold = 0.5\n\n global flag_flip\n flag_flip = True\n\n global total_time_POSE_ESTIMATOR, total_time_POSE_SIMILARITY, total_time_DET, total_time_ALL, total_time_ASSOCIATE\n global total_num_FRAMES, total_num_PERSONS, total_num_VIDEOS\n total_time_POSE_ESTIMATOR = 0\n total_time_POSE_SIMILARITY = 0\n total_time_DET = 0\n total_time_ALL = 0\n total_time_ASSOCIATE = 0\n total_num_VIDEOS = 0\n total_num_FRAMES = 0\n total_num_PERSONS = 0\n\n \"\"\"test\"\"\"\n global filter_bbox_number, iou_alpha1, pose_alpha1\n filter_bbox_number = 0\n iou_alpha1 = 1.5\n pose_alpha1 = -0.95 # 求的是pose差异值,差异值越小表示越越相似。\n\n return\n\n\ndef light_track(pose_estimator,\n image_folder, output_json_path,\n visualize_folder, output_video_path, gt_info):\n global total_time_POSE_ESTIMATOR, total_time_POSE_SIMILARITY, total_time_DET, total_time_ALL, total_time_ASSOCIATE\n global video_name, iou_alpha1, pose_alpha1\n global filter_bbox_number, total_num_FRAMES, total_num_PERSONS, total_num_VIDEOS\n ''' 1. statistics: get total time for lighttrack processing'''\n st_time_total = time.time()\n ### hyper-papermet\n keypoints_number = 15\n interval = 5\n\n bbox_dets_list_list = []\n keypoints_list_list = []\n\n num_imgs = len(gt_info)\n\n first_img_id = 0\n\n start_from_labeled = False\n if start_from_labeled:\n first_img_id = find_first_labeled_opensvai_json(gt_info)\n\n next_id = 0 # track_id 从0开始算\n img_id = first_img_id\n total_num_FRAMES += num_imgs\n\n gt_frame_index_list = find_gt_frame_index_list(gt_info, interval=interval)\n while img_id < num_imgs:\n ## loop Initialization\n img_gt_info = gt_info[img_id]\n image_name, labeled, candidates_info = read_image_data_opensvai_json(img_gt_info)\n img_path = os.path.join(image_folder, image_name)\n\n bbox_dets_list = [] # keyframe: start from empty\n keypoints_list = [] # keyframe: start from empty\n prev_frame_img_id = max(0, img_id - first_img_id - 1)\n\n # 假如第一帧是gt帧,那么直接复制gt的结果,放到list_list中\n if start_from_labeled and img_id == first_img_id:\n num_dets = len(candidates_info)\n for det_id in range(num_dets):\n track_id, bbox_det, keypoints = get_candidate_info_opensvai_json(candidates_info, det_id)\n # first帧直接使用\n bbox_det_dict = {\"img_id\": img_id,\n \"det_id\": det_id,\n \"imgpath\": img_path,\n \"track_id\": track_id,\n \"bbox\": bbox_det}\n keypoints_dict = {\"img_id\": img_id,\n \"det_id\": det_id,\n \"imgpath\": img_path,\n \"track_id\": track_id,\n \"keypoints\": keypoints}\n bbox_dets_list.append(bbox_det_dict)\n keypoints_list.append(keypoints_dict)\n next_id = max(next_id, track_id)\n next_id += 1\n bbox_dets_list_list.append(bbox_dets_list)\n keypoints_list_list.append(keypoints_list)\n else:\n #### 持续跟踪,当img_id是gt帧的时候会将gt和预测的进行比较.\n logger.info(\"Tracing,img_id:{}\".format(img_id))\n candidates_total = []\n st_time_DET = time.time()\n candidates_from_detector = inference_yolov3(img_path)\n end_time_DET = time.time()\n total_time_DET += (end_time_DET - st_time_DET)\n\n candidates_from_prev = []\n\n bbox_list_prev_frame = []\n ''' 根据先前帧的信息补充框 '''\n if img_id > first_img_id:\n bbox_list_prev_frame = bbox_dets_list_list[prev_frame_img_id].copy()\n keypoints_list_prev_frame = keypoints_list_list[prev_frame_img_id].copy()\n num_prev_bbox = len(bbox_list_prev_frame)\n for prev_det_id in range(num_prev_bbox):\n # obtain bbox position and track id\n keypoints = keypoints_list_prev_frame[prev_det_id]['keypoints']\n bbox_det_next = get_bbox_from_keypoints(keypoints)\n if bbox_invalid(bbox_det_next):\n continue\n # xywh\n candidates_from_prev.append(bbox_det_next)\n\n ''' 拿到本帧全部的候选框 '''\n candidates_total = candidates_from_detector + candidates_from_prev\n num_candidate = len(candidates_total)\n ''' 使用关节点的置信度来作为bbox的置信度 '''\n candidates_dets = []\n for candidate_id in range(num_candidate):\n bbox_det = candidates_total[candidate_id]\n bbox_det_dict = {\"img_id\": img_id,\n \"det_id\": candidate_id,\n \"imgpath\": img_path,\n \"track_id\": None,\n \"bbox\": bbox_det}\n st_time_pose = time.time()\n keypoints = inference_keypoints(pose_estimator, bbox_det_dict)[0]['keypoints']\n end_time_pose = time.time()\n total_time_POSE_ESTIMATOR += (end_time_pose - st_time_pose)\n bbox_det_next = xywh_to_x1y1x2y2(bbox_det)\n score = sum(keypoints[2::3]) / keypoints_number\n # 不知道为什么他这个pose的置信度会高于1\n if bbox_invalid(bbox_det_next) or score < 0.7:\n filter_bbox_number += 1\n continue\n candidate_det = bbox_det_next + [score]\n candidates_dets.append(candidate_det)\n keypoints_dict = {\"img_id\": img_id,\n \"det_id\": candidate_id,\n \"imgpath\": img_path,\n \"track_id\": None,\n \"keypoints\": keypoints}\n\n bbox_dets_list.append(bbox_det_dict)\n keypoints_list.append(keypoints_dict)\n # 根据bbox的置信度来使用nms\n keep = py_cpu_nms(np.array(candidates_dets, dtype=np.float32), 0.5) if len(candidates_dets) > 0 else []\n\n candidates_total = np.array(candidates_total)[keep]\n t = bbox_dets_list.copy()\n k = keypoints_list.copy()\n # 筛选过后的\n bbox_dets_list = [t[i] for i in keep]\n keypoints_list = [k[i] for i in keep]\n \"\"\" Data association \"\"\"\n cur_det_number = len(candidates_total)\n prev_det_number = len(bbox_list_prev_frame)\n if img_id == first_img_id or prev_det_number == 0:\n for det_id, bbox_det_dict in enumerate(bbox_dets_list):\n keypoints_dict = keypoints_list[det_id]\n bbox_det_dict['det_id'] = det_id\n keypoints_dict['det_id'] = det_id\n track_id = next_id\n bbox_det_dict['track_id'] = track_id\n keypoints_dict['track_id'] = track_id\n next_id = max(next_id, track_id)\n next_id += 1\n else:\n scores = np.zeros((cur_det_number, prev_det_number))\n for det_id in range(cur_det_number):\n bbox_det_dict = bbox_dets_list[det_id]\n keypoints_dict = keypoints_list[det_id]\n bbox_det = bbox_det_dict['bbox']\n keypoints = keypoints_dict['keypoints']\n\n # 计算当前帧的bbox和先前帧bboxes的分数\n for prev_det_id in range(prev_det_number):\n prev_bbox_det_dict = bbox_list_prev_frame[prev_det_id]\n prev_keypoints_dict = keypoints_list_prev_frame[prev_det_id]\n iou_score = iou(bbox_det, prev_bbox_det_dict['bbox'], xyxy=False)\n if iou_score > 0.5:\n scores[det_id, prev_det_id] = iou_alpha1 * iou_score\n\n st_time_ass = time.time()\n bbox_dets_list, keypoints_list, now_next_id = bipartite_graph_matching(bbox_dets_list,\n bbox_list_prev_frame, scores,\n keypoints_list, next_id)\n end_time_ass = time.time()\n total_time_ASSOCIATE += (end_time_ass - st_time_ass)\n\n next_id = now_next_id\n\n if len(bbox_dets_list) == 0:\n bbox_det_dict = {\"img_id\": img_id,\n \"det_id\": 0,\n \"track_id\": None,\n \"imgpath\": img_path,\n \"bbox\": [0, 0, 2, 2]}\n bbox_dets_list.append(bbox_det_dict)\n\n keypoints_dict = {\"img_id\": img_id,\n \"det_id\": 0,\n \"track_id\": None,\n \"imgpath\": img_path,\n \"keypoints\": []}\n keypoints_list.append(keypoints_dict)\n\n bbox_dets_list_list.append(bbox_dets_list)\n keypoints_list_list.append(keypoints_list)\n ##########################################\n #### 如果是gt帧则会与预测帧的结果进行比较 ####\n ##########################################\n if img_id in gt_frame_index_list and gt_frame_index_list.index(img_id) >= 1:\n logger.info(\"type:{},img_id:{}\".format('gt_guide', img_id))\n # gt frame\n num_dets = len(candidates_info)\n\n bbox_list_prediction = bbox_dets_list_list[img_id - first_img_id].copy()\n keypoints_list_prediction = keypoints_list_list[img_id - first_img_id].copy()\n bbox_list_gt = []\n keypoints_list_gt = []\n for det_id in range(num_dets):\n track_id, bbox_det, keypoints = get_candidate_info_opensvai_json(candidates_info, det_id)\n bbox_det_dict = {\"img_id\": img_id,\n \"det_id\": det_id,\n \"imgpath\": img_path,\n \"track_id\": track_id,\n \"bbox\": bbox_det}\n keypoints_dict = {\"img_id\": img_id,\n \"det_id\": det_id,\n \"imgpath\": img_path,\n \"track_id\": track_id,\n \"keypoints\": keypoints}\n\n bbox_list_gt.append(bbox_det_dict)\n keypoints_list_gt.append(keypoints_dict)\n bbox_dets_list_list[img_id - first_img_id] = bbox_list_gt\n keypoints_list_list[img_id - first_img_id] = keypoints_list_gt\n need_correct = distance_between_gt_prediction(\n gt_dict={\"det\": bbox_list_gt, \"keypoints\": keypoints_list_gt},\n predict_dict={\"det\": bbox_list_prediction,\n \"keypoints\": keypoints_list_prediction})\n if need_correct:\n ## 往前进行矫正\n correct_index = img_id - 1\n correct_end_index = img_id - int(interval / 2)\n # 从后往前\n while correct_index >= correct_end_index:\n ## 假设框是对的,id错了\n ## 此时的prev_det_number 是gt\n bbox_dets_list = bbox_dets_list_list[correct_index - first_img_id]\n keypoints_list = keypoints_list_list[correct_index - first_img_id]\n\n prev_det_number = len(bbox_list_gt)\n cur_det_number = len(bbox_dets_list)\n # prev 是已完成匹配的,cur是待匹配的\n scores = np.zeros((cur_det_number, prev_det_number))\n for det_id in range(cur_det_number):\n bbox_det_dict = bbox_dets_list[det_id]\n keypoints_dict = keypoints_list[det_id]\n bbox_det = bbox_det_dict['bbox']\n keypoints = keypoints_dict['keypoints']\n\n # 计算当前帧的bbox和先前帧bboxes的分数\n for prev_det_id in range(prev_det_number):\n bbox_det_dict_gt = bbox_list_gt[prev_det_id]\n iou_score = iou(bbox_det, bbox_det_dict_gt['bbox'], xyxy=False)\n if iou_score > 0.2:\n scores[det_id, prev_det_id] = iou_alpha1 * iou_score\n\n if prev_det_number > 0 and cur_det_number > 0:\n bbox_dets_list, keypoints_list, now_next_id = bipartite_graph_matching(bbox_dets_list,\n bbox_list_gt,\n scores,\n keypoints_list,\n next_id)\n\n # 这一帧没有一个保留下来的bbox\n if len(bbox_dets_list) == 0:\n bbox_det_dict = {\"img_id\": img_id,\n \"det_id\": 0,\n \"track_id\": None,\n \"imgpath\": img_path,\n \"bbox\": [0, 0, 2, 2]}\n bbox_dets_list.append(bbox_det_dict)\n\n keypoints_dict = {\"img_id\": img_id,\n \"det_id\": 0,\n \"track_id\": None,\n \"imgpath\": img_path,\n \"keypoints\": []}\n keypoints_list.append(keypoints_dict)\n bbox_dets_list_list[correct_index - first_img_id] = bbox_dets_list.copy()\n keypoints_list_list[correct_index - first_img_id] = keypoints_list.copy()\n correct_index -= 1\n\n img_id += 1\n\n ''' 1. statistics: get total time for lighttrack processing'''\n end_time_total = time.time()\n total_time_ALL += (end_time_total - st_time_total)\n\n # convert results into openSVAI format\n print(\"Exporting Results in openSVAI Standard Json Format...\")\n poses_standard = pose_to_standard_mot(keypoints_list_list, bbox_dets_list_list)\n # json_str = python_to_json(poses_standard)\n # print(json_str)\n\n # output json file\n pose_json_folder, _ = get_parent_folder_from_path(output_json_path)\n create_folder(pose_json_folder)\n write_json_to_file(poses_standard, output_json_path)\n print(\"Json Export Finished!\")\n\n # visualization\n if flag_visualize is True:\n print(\"Visualizing Pose Tracking Results...\")\n create_folder(visualize_folder)\n visualizer.show_all_from_standard_json(output_json_path, classes, joint_pairs, joint_names,\n image_folder,\n visualize_folder,\n flag_track=True)\n print(\"Visualization Finished!\")\n\n img_paths = get_immediate_childfile_paths(visualize_folder)\n avg_fps = total_num_FRAMES / total_time_ALL\n # make_video_from_images(img_paths, output_video_path, fps=avg_fps, size=None, is_color=True, format=\"XVID\")\n\n fps = 5 # 25 原来\n visualizer.make_video_from_images(img_paths, output_video_path, fps=fps, size=None, is_color=True,\n format=\"XVID\")\n\n\ndef distance_between_gt_prediction(gt_dict, predict_dict):\n \"\"\"\n 判断是否需要矫正\n :param gt_dict:\n :param predict_dict:\n :return:\n \"\"\"\n gt_det_list = gt_dict['det']\n gt_keypoints_list = gt_dict['keypoints']\n predict_det_list = predict_dict['det']\n predict_keypoints_list = predict_dict['keypoints']\n # TODO\n # for gt_det_id in gt_det_list:\n # gt_det = gt_det_list[gt_det_id]\n # gt_track_id = gt_det['track_id']\n # for predict_det_id in predict_det_list:\n # predict_det = predict_det_list[predict_det_id]\n # predict_track_id = predict_det['track_id']\n # if predict_track_id == gt_track_id:\n\n return True\n\n\ndef find_gt_frame_index_list(gt_info, interval=5):\n gt_index_list = []\n prev_gt_index = -1\n for index in range(len(gt_info)):\n if gt_info[index]['labeled'] is True and (len(gt_index_list) == 0 or (index - prev_gt_index) % interval == 0):\n prev_gt_index = index\n gt_index_list.append(index)\n return gt_index_list\n\n\ndef bipartite_graph_matching(current_bbox_dict_list, prev_bbox_dict_list, score_between_two_frames,\n current_keypoints_dict_list, next_id):\n prev_to_cur_match = Kuhn_Munkras_match(current_bbox_dict_list, prev_bbox_dict_list, score_between_two_frames)\n result_bbox_dict_list = []\n result_keypoints_dict_list = []\n det_number = 0\n assigned_cur_bbox = []\n for prev_index, cur_index in enumerate(prev_to_cur_match):\n if not np.isnan(cur_index):\n assigned_cur_bbox.append(cur_index)\n cur_index = int(cur_index)\n cur_bbox_dict = current_bbox_dict_list[cur_index]\n cur_keypoints_dict = current_keypoints_dict_list[cur_index]\n cur_bbox_dict['det_id'] = det_number\n cur_bbox_dict['track_id'] = prev_index\n cur_keypoints_dict['det_id'] = det_number\n cur_keypoints_dict['track_id'] = prev_index\n result_bbox_dict_list.append(cur_bbox_dict)\n result_keypoints_dict_list.append(cur_keypoints_dict)\n det_number += 1\n\n # 没有分配track_id的bbox,给其新的track_id\n for cur_index in range(len(current_bbox_dict_list)):\n if cur_index not in assigned_cur_bbox:\n cur_bbox_dict = current_bbox_dict_list[cur_index]\n cur_keypoints_dict = current_keypoints_dict_list[cur_index]\n cur_bbox_dict['det_id'] = det_number\n cur_bbox_dict['track_id'] = next_id\n cur_keypoints_dict['det_id'] = det_number\n cur_keypoints_dict['track_id'] = next_id\n result_bbox_dict_list.append(cur_bbox_dict)\n result_keypoints_dict_list.append(cur_keypoints_dict)\n det_number += 1\n next_id += 1\n\n return result_bbox_dict_list, result_keypoints_dict_list, next_id\n\n\ndef distance_between_two_boxs(boxA, boxB):\n x1, y1, _, _ = boxA\n x2, y2, _, _ = boxB\n distance = math.sqrt(math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2))\n return distance\n\n\ndef get_track_id_SGCN(bbox_cur_frame, bbox_list_prev_frame, keypoints_cur_frame,\n keypoints_list_prev_frame):\n assert (len(bbox_list_prev_frame) == len(keypoints_list_prev_frame))\n\n min_index = None\n min_matching_score = sys.maxsize\n global pose_matching_threshold\n # if track_id is still not assigned, the person is really missing or track is really lost\n track_id = -1\n\n for det_index, bbox_det_dict in enumerate(bbox_list_prev_frame):\n bbox_prev_frame = bbox_det_dict[\"bbox\"]\n\n # check the pose matching score\n keypoints_dict = keypoints_list_prev_frame[det_index]\n keypoints_prev_frame = keypoints_dict[\"keypoints\"]\n pose_matching_score = get_pose_matching_score(keypoints_cur_frame, keypoints_prev_frame,\n bbox_cur_frame,\n bbox_prev_frame)\n\n if pose_matching_score <= pose_matching_threshold and pose_matching_score <= min_matching_score:\n # match the target based on the pose matching score\n min_matching_score = pose_matching_score\n min_index = det_index\n\n if min_index is None:\n return -1, None\n else:\n track_id = bbox_list_prev_frame[min_index][\"track_id\"]\n return track_id, min_index\n\n\ndef get_track_id_SpatialConsistency(bbox_cur_frame, bbox_list_prev_frame):\n \"\"\" 用当前帧的bbox,去找之前帧中的bboxes的IOU值最大bbox。\n\n 使用一个bbox去前一帧找IOU值最大的。\n\n \"\"\"\n\n thresh = 0.3\n max_iou_score = 0\n max_index = -1\n\n for bbox_index, bbox_det_dict in enumerate(bbox_list_prev_frame):\n bbox_prev_frame = bbox_det_dict[\"bbox\"]\n\n boxA = xywh_to_x1y1x2y2(bbox_cur_frame)\n boxB = xywh_to_x1y1x2y2(bbox_prev_frame)\n iou_score = iou(boxA, boxB)\n if iou_score > max_iou_score:\n max_iou_score = iou_score\n max_index = bbox_index\n\n if max_iou_score > thresh:\n track_id = bbox_list_prev_frame[max_index][\"track_id\"]\n return track_id, max_index\n else:\n return -1, None\n\n\ndef get_pose_matching_score(keypoints_A, keypoints_B, bbox_A, bbox_B):\n if keypoints_A == [] or keypoints_B == []:\n print(\"graph not correctly generated!\")\n return sys.maxsize\n\n if bbox_invalid(bbox_A) or bbox_invalid(bbox_B):\n print(\"graph not correctly generated!\")\n return sys.maxsize\n\n graph_A, flag_pass_check = keypoints_to_graph(keypoints_A, bbox_A)\n if flag_pass_check is False:\n print(\"graph not correctly generated!\")\n return sys.maxsize\n\n graph_B, flag_pass_check = keypoints_to_graph(keypoints_B, bbox_B)\n if flag_pass_check is False:\n print(\"graph not correctly generated!\")\n return sys.maxsize\n\n sample_graph_pair = (graph_A, graph_B)\n data_A, data_B = graph_pair_to_data(sample_graph_pair)\n\n start = time.time()\n flag_match, dist = pose_matching(data_A, data_B)\n end = time.time()\n return dist\n\n\ndef is_target_lost(keypoints, method=\"max_average\"):\n num_keypoints = int(len(keypoints) / 3.0)\n if method == \"average\":\n # pure average\n score = 0\n for i in range(num_keypoints):\n score += keypoints[3 * i + 2]\n score /= num_keypoints * 1.0\n print(\"target_score: {}\".format(score))\n elif method == \"max_average\":\n score_list = keypoints[2::3]\n score_list_sorted = sorted(score_list)\n top_N = 4\n assert (top_N < num_keypoints)\n top_scores = [score_list_sorted[-i] for i in range(1, top_N + 1)]\n score = sum(top_scores) / top_N\n if score < 0.6:\n return True\n else:\n return False\n\n\ndef py_cpu_nms(dets, thresh):\n \"\"\"Pure Python NMS baseline.\"\"\"\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4] # bbox打分\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # 打分从大到小排列,取index\n order = scores.argsort()[::-1]\n # keep为最后保留的边框\n keep = []\n while order.size > 0:\n # order[0]是当前分数最大的窗口,肯定保留\n i = order[0]\n keep.append(i)\n # 计算窗口i与其他所有窗口的交叠部分的面积\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n # 交/并得到iou值\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n # inds为所有与窗口i的iou值小于threshold值的窗口的index,其他窗口此次都被窗口i吸收\n inds = np.where(ovr <= thresh)[0]\n # order里面只保留与窗口i交叠面积小于threshold的那些窗口,由于ovr长度比order长度少1(不包含i),所以inds+1对应到保留的窗口\n order = order[inds + 1]\n\n return keep\n\n\ndef iou(boxA, boxB, xyxy=True):\n # box: (x1, y1, x2, y2)\n # determine the (x, y)-coordinates of the intersection rectangle\n if not xyxy:\n # 如果是xy wh那么要转换数据 - xy是最小坐标\n b1_x1, b1_x2 = boxA[0], boxA[0] + boxA[2]\n b1_y1, b1_y2 = boxA[1], boxA[1] + boxA[3]\n b2_x1, b2_x2 = boxB[0], boxB[0] + boxB[2]\n b2_y1, b2_y2 = boxB[1], boxB[1] + boxB[3]\n xA = max(b1_x1, b2_x1)\n yA = max(b1_y1, b2_y1)\n xB = min(b1_x2, b2_x2)\n yB = min(b1_y2, b2_y2)\n else:\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n\n # compute the area of intersection rectangle\n interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)\n\n # compute the area of both the prediction and ground-truth\n # rectangles\n if not xyxy:\n boxAArea = (boxA[2] + 1) * (boxA[3] + 1)\n boxBArea = (boxB[2] + 1) * (boxB[3] + 1)\n else:\n boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1) # w×h\n boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1) # w×h\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = interArea / float(boxAArea + boxBArea - interArea)\n\n # return the intersection over union value\n return iou\n\n\ndef get_bbox_from_keypoints(keypoints_python_data):\n if keypoints_python_data == [] or keypoints_python_data == 45 * [0]:\n return [0, 0, 2, 2]\n\n num_keypoints = len(keypoints_python_data)\n x_list = []\n y_list = []\n for keypoint_id in range(int(num_keypoints / 3)):\n x = keypoints_python_data[3 * keypoint_id]\n y = keypoints_python_data[3 * keypoint_id + 1]\n vis = keypoints_python_data[3 * keypoint_id + 2] # 是否可见\n if vis != 0 and vis != 3:\n x_list.append(x)\n y_list.append(y)\n min_x = min(x_list)\n min_y = min(y_list)\n max_x = max(x_list)\n max_y = max(y_list)\n\n if not x_list or not y_list:\n return [0, 0, 2, 2]\n\n scale = enlarge_scale # enlarge bbox by 20% with same center position\n bbox = enlarge_bbox([min_x, min_y, max_x, max_y], scale)\n bbox_in_xywh = x1y1x2y2_to_xywh(bbox)\n return bbox_in_xywh\n\n\ndef enlarge_bbox(bbox, scale):\n assert (scale > 0)\n min_x, min_y, max_x, max_y = bbox\n margin_x = int(0.5 * scale * (max_x - min_x))\n margin_y = int(0.5 * scale * (max_y - min_y))\n if margin_x < 0: margin_x = 2\n if margin_y < 0: margin_y = 2\n\n min_x -= margin_x\n max_x += margin_x\n min_y -= margin_y\n max_y += margin_y\n\n width = max_x - min_x\n height = max_y - min_y\n if max_y < 0 or max_x < 0 or width <= 0 or height <= 0 or width > 2000 or height > 2000:\n min_x = 0\n max_x = 2\n min_y = 0\n max_y = 2\n\n bbox_enlarged = [min_x, min_y, max_x, max_y]\n return bbox_enlarged\n\n\ndef inference_keypoints(pose_estimator, test_data):\n cls_dets = test_data[\"bbox\"]\n # nms on the bboxes\n if flag_nms is True:\n cls_dets, keep = apply_nms(cls_dets, nms_method, nms_thresh)\n test_data = np.asarray(test_data)[keep]\n if len(keep) == 0:\n return -1\n else:\n test_data = [test_data]\n\n # crop and detect pose\n pose_heatmaps, details, cls_skeleton, crops, start_id, end_id = get_pose_from_bbox(pose_estimator,\n test_data,\n cfg)\n # get keypoint positions from pose\n keypoints = get_keypoints_from_pose(pose_heatmaps, details, cls_skeleton, crops, start_id, end_id)\n # dump results\n results = prepare_results(test_data[0], keypoints, cls_dets)\n return results\n\n\ndef apply_nms(cls_dets, nms_method, nms_thresh):\n # nms and filter\n keep = np.where((cls_dets[:, 4] >= min_scores) &\n ((cls_dets[:, 3] - cls_dets[:, 1]) * (\n cls_dets[:, 2] - cls_dets[:, 0]) >= min_box_size))[0]\n cls_dets = cls_dets[keep]\n if len(cls_dets) > 0:\n if nms_method == 'nms':\n keep = gpu_nms(cls_dets, nms_thresh)\n elif nms_method == 'soft':\n keep = cpu_soft_nms(np.ascontiguousarray(cls_dets, dtype=np.float32), method=2)\n else:\n assert False\n cls_dets = cls_dets[keep]\n return cls_dets, keep\n\n\ndef get_pose_from_bbox(pose_estimator, test_data, cfg):\n cls_skeleton = np.zeros(\n (len(test_data), cfg.nr_skeleton, 3)) # cfg.nr_skeleton=joint number. size=number*3\n crops = np.zeros((len(test_data), 4))\n\n batch_size = 1\n start_id = 0\n end_id = min(len(test_data), batch_size)\n\n test_imgs = []\n details = []\n for i in range(start_id, end_id):\n test_img, detail = Preprocessing(test_data[i], stage='test')\n test_imgs.append(test_img)\n details.append(detail)\n\n details = np.asarray(details)\n feed = test_imgs\n for i in range(end_id - start_id):\n ori_img = test_imgs[i][0].transpose(1, 2, 0)\n if flag_flip == True:\n flip_img = cv2.flip(ori_img, 1)\n feed.append(flip_img.transpose(2, 0, 1)[np.newaxis, ...])\n feed = np.vstack(feed)\n\n res = pose_estimator.predict_one([feed.transpose(0, 2, 3, 1).astype(np.float32)])[0]\n res = res.transpose(0, 3, 1, 2)\n\n if flag_flip == True:\n for i in range(end_id - start_id):\n fmp = res[end_id - start_id + i].transpose((1, 2, 0))\n fmp = cv2.flip(fmp, 1)\n fmp = list(fmp.transpose((2, 0, 1)))\n for (q, w) in cfg.symmetry:\n fmp[q], fmp[w] = fmp[w], fmp[q]\n fmp = np.array(fmp)\n res[i] += fmp\n res[i] /= 2\n\n pose_heatmaps = res\n return pose_heatmaps, details, cls_skeleton, crops, start_id, end_id\n\n\ndef get_keypoints_from_pose(pose_heatmaps, details, cls_skeleton, crops, start_id, end_id):\n res = pose_heatmaps\n for test_image_id in range(start_id, end_id):\n r0 = res[test_image_id - start_id].copy()\n r0 /= 255.\n r0 += 0.5\n\n for w in range(cfg.nr_skeleton):\n res[test_image_id - start_id, w] /= np.amax(res[test_image_id - start_id, w])\n\n border = 10\n dr = np.zeros(\n (cfg.nr_skeleton, cfg.output_shape[0] + 2 * border, cfg.output_shape[1] + 2 * border))\n dr[:, border:-border, border:-border] = res[test_image_id - start_id][:cfg.nr_skeleton].copy()\n\n for w in range(cfg.nr_skeleton):\n dr[w] = cv2.GaussianBlur(dr[w], (21, 21), 0)\n\n for w in range(cfg.nr_skeleton):\n lb = dr[w].argmax()\n y, x = np.unravel_index(lb, dr[w].shape)\n dr[w, y, x] = 0\n lb = dr[w].argmax()\n py, px = np.unravel_index(lb, dr[w].shape)\n y -= border\n x -= border\n py -= border + y\n px -= border + x\n ln = (px ** 2 + py ** 2) ** 0.5\n delta = 0.25\n if ln > 1e-3:\n x += delta * px / ln\n y += delta * py / ln\n x = max(0, min(x, cfg.output_shape[1] - 1))\n y = max(0, min(y, cfg.output_shape[0] - 1))\n cls_skeleton[test_image_id, w, :2] = (x * 4 + 2, y * 4 + 2)\n cls_skeleton[test_image_id, w, 2] = r0[w, int(round(y) + 1e-10), int(round(x) + 1e-10)]\n\n # map back to original images\n crops[test_image_id, :] = details[test_image_id - start_id, :]\n for w in range(cfg.nr_skeleton):\n cls_skeleton[test_image_id, w, 0] = cls_skeleton[test_image_id, w, 0] / cfg.data_shape[\n 1] * (crops[test_image_id][2] - crops[test_image_id][0]) + crops[test_image_id][0]\n cls_skeleton[test_image_id, w, 1] = cls_skeleton[test_image_id, w, 1] / cfg.data_shape[\n 0] * (crops[test_image_id][3] - crops[test_image_id][1]) + crops[test_image_id][1]\n return cls_skeleton\n\n\ndef prepare_results(test_data, cls_skeleton, cls_dets):\n cls_partsco = cls_skeleton[:, :, 2].copy().reshape(-1, cfg.nr_skeleton)\n\n cls_scores = 1\n dump_results = []\n cls_skeleton = np.concatenate(\n [cls_skeleton.reshape(-1, cfg.nr_skeleton * 3),\n (cls_scores * cls_partsco.mean(axis=1))[:, np.newaxis]],\n axis=1)\n for i in range(len(cls_skeleton)):\n result = dict(image_id=test_data['img_id'],\n category_id=1,\n score=float(round(cls_skeleton[i][-1], 4)),\n keypoints=cls_skeleton[i][:-1].round(3).tolist())\n dump_results.append(result)\n return dump_results\n\n\ndef pose_to_standard_mot(keypoints_list_list, dets_list_list):\n openSVAI_python_data_list = []\n\n num_keypoints_list = len(keypoints_list_list)\n num_dets_list = len(dets_list_list)\n assert (num_keypoints_list == num_dets_list)\n\n for i in range(num_dets_list):\n\n dets_list = dets_list_list[i]\n keypoints_list = keypoints_list_list[i]\n\n if dets_list == []:\n continue\n img_path = dets_list[0][\"imgpath\"]\n img_folder_path = os.path.dirname(img_path)\n img_name = os.path.basename(img_path)\n img_info = {\"folder\": img_folder_path,\n \"name\": img_name,\n \"id\": [int(i)]}\n openSVAI_python_data = {\"image\": [], \"candidates\": []}\n openSVAI_python_data[\"image\"] = img_info\n\n num_dets = len(dets_list)\n num_keypoints = len(\n keypoints_list) # number of persons, not number of keypoints for each person\n candidate_list = []\n\n for j in range(num_dets):\n keypoints_dict = keypoints_list[j]\n dets_dict = dets_list[j]\n img_id = keypoints_dict[\"img_id\"]\n det_id = keypoints_dict[\"det_id\"]\n track_id = keypoints_dict[\"track_id\"]\n img_path = keypoints_dict[\"imgpath\"]\n\n bbox_dets_data = dets_list[det_id]\n det = dets_dict[\"bbox\"]\n if det == [0, 0, 2, 2]:\n # do not provide keypoints\n candidate = {\"det_bbox\": [0, 0, 2, 2],\n \"det_score\": 0}\n else:\n bbox_in_xywh = det[0:4]\n keypoints = keypoints_dict[\"keypoints\"]\n\n track_score = sum(keypoints[2::3]) / len(keypoints) / 3.0\n\n candidate = {\"det_bbox\": bbox_in_xywh,\n \"det_score\": 1,\n \"track_id\": track_id,\n \"track_score\": track_score,\n \"pose_keypoints_2d\": keypoints}\n candidate_list.append(candidate)\n openSVAI_python_data[\"candidates\"] = candidate_list\n openSVAI_python_data_list.append(openSVAI_python_data)\n return openSVAI_python_data_list\n\n\ndef x1y1x2y2_to_xywh(det):\n x1, y1, x2, y2 = det\n w, h = int(x2) - int(x1), int(y2) - int(y1)\n return [x1, y1, w, h]\n\n\ndef xywh_to_x1y1x2y2(det):\n x1, y1, w, h = det\n x2, y2 = x1 + w, y1 + h\n return [x1, y1, x2, y2]\n\n\ndef bbox_invalid(bbox):\n if bbox == [0, 0, 2, 2]:\n return True\n if bbox[2] <= 0 or bbox[3] <= 0 or bbox[2] > 2000 or bbox[3] > 2000:\n return True\n return False\n\n\ndef non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):\n \"\"\"\n Removes detections with lower object confidence score than 'conf_thres' and performs\n Non-Maximum Suppression to further filter detections.\n Returns detections with shape:\n (x1, y1, x2, y2, object_conf, class_score, class_pred)\n \"\"\"\n # prediction [image_number,]\n # From (center x, center y, width, height) to (x1, y1, x2, y2)\n prediction[..., :4] = xywh2xyxy(prediction[..., :4]) # 前四位\n output = [None for _ in range(len(prediction))]\n for image_i, image_pred in enumerate(prediction):\n # Filter out confidence scores below threshold\n image_pred = image_pred[image_pred[:, 4] >= conf_thres]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Object confidence times class confidence\n score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0] # 1是维度\n # Sort by it\n image_pred = image_pred[(-score).argsort()] # 将image_pred根据score排序,score越大的预测,排在越前面。\n class_preds = image_pred[:, 5:].max(1, keepdim=True)[1].float() # keepdim=True shape : [...,1]\n detections = torch.cat((image_pred[:, :5], class_preds), 1) # 按列拼,直接拼成它的第5个值。\n # Perform non-maximum suppression\n keep_boxes = []\n while detections.size(0):\n # 所有的候选跟置信度最大的比较(也会和它自己比较)\n large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres\n label_match = detections[0, -1] == detections[:, -1]\n # Indices of boxes with lower confidence scores, large IOUs and matching labels\n invalid = large_overlap & label_match\n weights = detections[invalid, 4:5]\n # Merge overlapping bboxes by order of confidence\n detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()\n keep_boxes += [detections[0]]\n detections = detections[~invalid]\n if keep_boxes:\n output[image_i] = torch.stack(keep_boxes)\n return output\n\n\nif __name__ == '__main__':\n\n global args\n parser = argparse.ArgumentParser()\n # parser.add_argument('--video_path', '-v', type=str, dest='video_path',\n # # default=\"data/demo/video.mp4\")\n # default=\"data/demo/0003.m4\")\n # parser.add_argument('--images_path', '-i', type=str, dest='images_path',\n # default=\"data/demo/mpii-video-pose/0001\")\n # parser.add_argument('--model', '-m', type=str, dest='test_model',\n # default=\"weights/mobile-deconv/snapshot_296.ckpt\")\n # parser.add_argument('--model', '-m', type=str, dest='test_model',\n # default=\"weights/CPN101/CPN_snapshot_293.ckpt\")\n parser.add_argument('--model', '-m', type=str, dest='test_model',\n default=\"weights/MSRA152/MSRA_snapshot_285.ckpt\")\n # default=\"weights/mobile-deconv/snapshot_296.ckpt\")\n parser.add_argument('--train', type=bool, dest='train',\n default=True)\n # parser.add_argument('--exp_number', type=str, dest='exp_number', default='2017-val',\n # help='number of experiment')\n parser.add_argument('--exp_number', type=str, dest='exp_number', default='test_one_video',\n help='number of experiment')\n args = parser.parse_args()\n args.bbox_thresh = 0.4\n\n # initialize pose estimator\n initialize_parameters()\n pose_estimator = Tester(Network(), cfg)\n pose_estimator.load_weights(args.test_model)\n\n train = args.train\n exp_number = args.exp_number\n\n ##################################\n test_one_video = True\n exp_number = \"test_one_video_MSRA152_guide\"\n val = False\n # exp_number = \"2017-val-iou-{}-pose{}-together-MSRA152\".format(iou_alpha1, pose_alpha1)\n # exp_number = \"2017-val-iou-{}-together-MSRA152-guide\".format(iou_alpha1)\n test = False\n # exp_number = \"2017-test-iou-pose-together\"\n experiment_output_root = '/media/F'\n visualize_root_folder = \"{}/exp_{}/visualize\".format(experiment_output_root, exp_number)\n output_video_folder = \"{}/exp_{}/videos\".format(experiment_output_root, exp_number)\n output_json_folder = \"{}/exp_{}/jsons\".format(experiment_output_root, exp_number)\n evaluation_folder = \"{}/exp_{}/evaluation\".format(experiment_output_root, exp_number)\n logger_file_foler = \"{}/exp_{}/log\".format(experiment_output_root, exp_number)\n\n create_folder(output_video_folder)\n create_folder(output_json_folder)\n create_folder(logger_file_foler)\n\n create_folder(evaluation_folder)\n create_folder(os.path.join(evaluation_folder, \"annotations\", \"val\"))\n create_folder(os.path.join(evaluation_folder, \"out\"))\n create_folder(os.path.join(evaluation_folder, \"posetrack_results\"))\n ## save log file\n logging.basicConfig(format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n level=logging.DEBUG,\n filename=os.path.join(logger_file_foler, 'experiment.log'),\n filemode='a')\n ####################################\n\n logger.info(\" test_one_video:{} val:{} test:{} \".format(test_one_video, val, test))\n\n \"\"\" 每个JSON文件为一个视频,读取一个个的JSON文件,产生结果 \"\"\"\n if test_one_video:\n numbers = ['24642', '24635', '23699', '23695', '23484', '23471', '23416', '22682', '22671', '22642', '22124',\n '00043', '00096']\n posetrack_dataset_path = \"/media/D/DataSet/PoseTrack/PoseTrack2017/posetrack_data\"\n numbers = ['23699']\n input_jsons = [\"/media/D/DataSet/PoseTrack2017/train/{}_mpii_relpath_5sec_trainsub_OpenSVAI.json\".format(number)\n for\n number in numbers]\n frame_number = 0\n videos_number = len(input_jsons)\n for input_json in tqdm(input_jsons):\n videos_json_data, videos_number = read_opensvai_json(input_json)\n for video_seq_id in range(videos_number):\n video_json_data = videos_json_data[video_seq_id]\n video_path, video_name = read_video_data_opensvai_json(video_json_data)\n image_folder = os.path.join(posetrack_dataset_path, video_path)\n visualize_folder = os.path.join(visualize_root_folder, video_name)\n output_video_path = os.path.join(output_video_folder, \"{}_out.mp4\".format(video_name))\n output_json_path = os.path.join(output_json_folder, \"{}.json\".format(video_name))\n create_folder(visualize_folder)\n frame_number += len(video_json_data)\n light_track(pose_estimator, image_folder, output_json_path, visualize_folder, output_video_path,\n video_json_data)\n logger.info(\"videos_number:{}\".format(videos_number))\n logger.info(\"frames_number:{}\".format(frame_number))\n\n \"\"\" The PoseTrack2017 validation set \"\"\"\n if val:\n input_jsons_folder = \"/media/D/DataSet/PoseTrack2017/val/\"\n posetrack_dataset_path = \"/media/D/DataSet/PoseTrack/PoseTrack2017/posetrack_data\"\n val_jsons = os.listdir(input_jsons_folder)\n frame_number = 0\n videos_number = len(val_jsons)\n for json in val_jsons:\n videos_json_data, _ = read_opensvai_json(os.path.join(input_jsons_folder, json))\n assert len(videos_json_data) == 1\n video_json_data = videos_json_data[0]\n video_path, video_name = read_video_data_opensvai_json(video_json_data)\n image_folder = os.path.join(posetrack_dataset_path, video_path)\n visualize_folder = os.path.join(visualize_root_folder, video_name)\n output_video_path = os.path.join(output_video_folder, \"{}_out.mp4\".format(video_name))\n output_json_path = os.path.join(output_json_folder, \"{}.json\".format(video_name))\n create_folder(visualize_folder)\n frame_number += len(video_json_data)\n light_track(pose_estimator, image_folder, output_json_path, visualize_folder, output_video_path,\n video_json_data)\n\n logger.info(\"videos_number:{}\".format(videos_number))\n logger.info(\"frames_number:{}\".format(frame_number))\n \"\"\" The PoseTrack2017 test set \"\"\"\n if test:\n input_jsons_folder = \"/media/D/DataSet/PoseTrack2017/test/\"\n posetrack_dataset_path = \"/media/D/DataSet/PoseTrack/PoseTrack2017/posetrack_data\"\n val_jsons = os.listdir(input_jsons_folder)\n frame_number = 0\n videos_number = len(val_jsons)\n for json in val_jsons:\n videos_json_data, _ = read_opensvai_json(os.path.join(input_jsons_folder, json))\n assert len(videos_json_data) == 1\n video_json_data = videos_json_data['annolist']\n video_path, video_name = read_video_data_opensvai_json(video_json_data)\n image_folder = os.path.join(posetrack_dataset_path, video_path)\n visualize_folder = os.path.join(visualize_root_folder, video_name)\n output_video_path = os.path.join(output_video_folder, \"{}_out.mp4\".format(video_name))\n output_json_path = os.path.join(output_json_folder, \"{}.json\".format(video_name))\n create_folder(visualize_folder)\n frame_number += len(video_json_data)\n light_track(pose_estimator, image_folder, output_json_path, visualize_folder, output_video_path,\n video_json_data)\n\n logger.info(\"videos_number:{}\".format(videos_number))\n logger.info(\"frames_number:{}\".format(frame_number))\n\n ''' Display statistics '''\n logger.info(\"total_time_ALL: {:.2f}s\".format(total_time_ALL))\n logger.info(\"total_time_DET: {:.2f}s\".format(total_time_DET))\n logger.info(\"total_time_POSE_ESTIMATOR: {:.2f}s\".format(total_time_POSE_ESTIMATOR))\n logger.info(\"total_time_POSE_SIMILARITY: {:.2f}s\".format(total_time_POSE_SIMILARITY))\n logger.info(\"total_time_ASSOCIATE: {:.2f}s\".format(total_time_ASSOCIATE))\n logger.info(\"total_time_LIGHTTRACK: {:.2f}s\".format(\n total_time_ALL - total_time_DET - total_time_POSE_ESTIMATOR - total_time_POSE_SIMILARITY - total_time_ASSOCIATE))\n logger.info(\"filter_bbox_number:{}\".format(filter_bbox_number))\n" ]
[ [ "numpy.amax", "numpy.unravel_index", "numpy.maximum", "numpy.minimum", "numpy.asarray", "numpy.isnan", "numpy.ascontiguousarray", "numpy.zeros", "numpy.array", "numpy.where", "numpy.vstack" ], [ "numpy.amax", "numpy.unravel_index", "numpy.maximum", "numpy.minimum", "numpy.asarray", "numpy.isnan", "numpy.ascontiguousarray", "numpy.zeros", "numpy.array", "numpy.where", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pdh930105/overhaul-distillation
[ "8f218c3faadd7b97006206cdc31711471ad5e309" ]
[ "CIFAR-100/train_with_distillation.py" ]
[ "from __future__ import print_function\n\nimport os\nimport time\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nimport argparse\n\nimport distiller\nimport load_settings\n\nparser = argparse.ArgumentParser(description='CIFAR-100 training')\nparser.add_argument('--data_path', type=str, default='../data')\nparser.add_argument('--paper_setting', default='a', type=str)\nparser.add_argument('--epochs', default=200, type=int, help='number of total epochs to run')\nparser.add_argument('--batch_size', default=128, type=int, help='mini-batch size (default: 256)')\nparser.add_argument('--lr', default=0.1, type=float, help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, help='momentum')\nparser.add_argument('--weight_decay', default=5e-4, type=float, help='weight decay')\nargs = parser.parse_args()\n\ngpu_num = 0\nuse_cuda = torch.cuda.is_available()\n# data normalize\ntransform_train = transforms.Compose([\n transforms.Pad(4, padding_mode='reflect'),\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32),\n transforms.ToTensor(),\n transforms.Normalize(np.array([125.3, 123.0, 113.9]) / 255.0,\n np.array([63.0, 62.1, 66.7]) / 255.0)\n])\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(np.array([125.3, 123.0, 113.9]) / 255.0,\n np.array([63.0, 62.1, 66.7]) / 255.0),\n])\n\ntrainset = torchvision.datasets.CIFAR100(root=args.data_path, train=True, download=True, transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4)\ntestset = torchvision.datasets.CIFAR100(root=args.data_path, train=False, download=True, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=4)\n\n# Model load_paper_setttings 를 통해 불러오기\nt_net, s_net, args = load_settings.load_paper_settings(args)\n\n# Module for distillation\nd_net = distiller.Distiller(t_net, s_net)\n\nprint('the number of teacher model parameters: {}'.format(sum([p.data.nelement() for p in t_net.parameters()])))\nprint('the number of student model parameters: {}'.format(sum([p.data.nelement() for p in s_net.parameters()])))\n\nif use_cuda:\n torch.cuda.set_device(0)\n d_net.cuda()\n s_net.cuda()\n t_net.cuda()\n cudnn.benchmark = True\n\ncriterion_CE = nn.CrossEntropyLoss()\n\n# Training\ndef train_with_distill(d_net, epoch):\n epoch_start_time = time.time()\n print('\\nDistillation epoch: %d' % epoch)\n # Distiller class 를 사용해 feature distillation 하는 과정\n # 학습 시에는 Distiller class 에 있는 Connector를 사용해 feature distillation을 통한 학습을 진행한다.\n # d_net에서 학습되는 파라미터는 d_net의 Connecter와 d_net.s_net(student network)이다. (optimizer 확인)\n \n d_net.train()\n d_net.s_net.train()\n d_net.t_net.train()\n\n train_loss = 0\n correct = 0\n total = 0\n\n # global로 선언한 optimizer를 불러와 작업\n global optimizer\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n optimizer.zero_grad()\n\n batch_size = inputs.shape[0]\n outputs, loss_distill = d_net(inputs)\n loss_CE = criterion_CE(outputs, targets)\n\n # mini-batch를 통한 loss 계산. 논문의 수식 (6)에 있는 a = 1/1000 으로 지정.\n loss = loss_CE + loss_distill.sum() / batch_size / 1000\n\n loss.backward()\n optimizer.step()\n\n train_loss += loss_CE.item()\n\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum().float().item()\n\n b_idx = batch_idx\n\n print('Train \\t Time Taken: %.2f sec' % (time.time() - epoch_start_time))\n print('Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss / (b_idx + 1), 100. * correct / total, correct, total))\n\n return train_loss / (b_idx + 1)\n\ndef test(net):\n epoch_start_time = time.time()\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n outputs = net(inputs)\n loss = criterion_CE(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum().float().item()\n b_idx = batch_idx\n\n print('Test \\t Time Taken: %.2f sec' % (time.time() - epoch_start_time))\n print('Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss / (b_idx + 1), 100. * correct / total, correct, total))\n return test_loss / (b_idx + 1), correct / total\n\n\nprint('Performance of teacher network')\ntest(t_net)\n\n# epoch에 따라 optimizer learning rate 변화하게 하기 위해 아래와 같이 디자인\nfor epoch in range(args.epochs):\n if epoch == 0:\n optimizer = optim.SGD([{'params': s_net.parameters()}, {'params': d_net.Connectors.parameters()}],\n lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)\n elif epoch == (args.epochs // 2):\n optimizer = optim.SGD([{'params': s_net.parameters()}, {'params': d_net.Connectors.parameters()}],\n lr=args.lr / 10, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)\n elif epoch == (args.epochs * 3 // 4):\n optimizer = optim.SGD([{'params': s_net.parameters()}, {'params': d_net.Connectors.parameters()}],\n lr=args.lr / 100, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)\n\n train_loss = train_with_distill(d_net, epoch)\n test_loss, accuracy = test(s_net)\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.max", "torch.cuda.set_device", "torch.utils.data.DataLoader", "torch.cuda.is_available", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mlists/Mapping
[ "33dfbebd50463fc37a6367ce0f05ba254e870406" ]
[ "vector.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\nfig, ax = plt.subplots()\n\nX, Y = np.meshgrid(np.arange(15), np.arange(10))\nU = V = np.ones_like(X)\nphi = (np.random.rand(15, 10) - .5) * 150\nax.quiver(X, Y, U, V, angles=phi)\nprint(phi)\nplt.show()\n\n" ]
[ [ "numpy.ones_like", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.random.rand", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NeilKulikov/fourier
[ "d5aa7801628168115133a68aa9f358d782600983" ]
[ "proto/bench.py" ]
[ "#!/usr/bin/env python3\n\nimport ctypes\n\nbasic_dll = ctypes.CDLL('fourier_basic.so')\n\nbasic_dll.density_fourier_capi_float.restype = ctypes.c_int\nbasic_dll.density_fourier_capi_float.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_long, ctypes.c_long, ctypes.c_float, ctypes.c_float]\nbasic_dll.density_fourier_capi_double.restype = ctypes.c_int\nbasic_dll.density_fourier_capi_double.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_long, ctypes.c_long, ctypes.c_double, ctypes.c_double]\n\nimport numpy as np\n\ndef density_fourier_float(data: np.array, hcount = 32):\n fp_data = np.ascontiguousarray(data, dtype = np.float32)\n scount = len(fp_data)\n re_harm, im_harm = np.zeros(hcount, dtype = np.float32), np.zeros(hcount, dtype = np.float32) \n dmin, dmax = np.min(data), np.max(data)\n shift, basek = 0.5 * (dmax + dmin), 2 * np.pi / np.abs(dmax - dmin)\n res = basic_dll.density_fourier_capi_float( \\\n fp_data.ctypes.data_as(ctypes.c_void_p), \\\n re_harm.ctypes.data_as(ctypes.c_void_p), \\\n im_harm.ctypes.data_as(ctypes.c_void_p), \\\n scount, hcount, shift, basek)\n assert res == 0\n return (re_harm, im_harm)\n\ndef density_fourier_double(data: np.array, hcount = 32):\n fp_data = np.ascontiguousarray(data, dtype = np.float64)\n scount = len(fp_data)\n re_harm, im_harm = np.zeros(hcount, dtype = np.float64), np.zeros(hcount, dtype = np.float64) \n dmin, dmax = np.min(data), np.max(data)\n shift, basek = 0.5 * (dmax + dmin), 2 * np.pi / np.abs(dmax - dmin)\n res = basic_dll.density_fourier_capi_double( \\\n fp_data.ctypes.data_as(ctypes.c_void_p), \\\n re_harm.ctypes.data_as(ctypes.c_void_p), \\\n im_harm.ctypes.data_as(ctypes.c_void_p), \\\n scount, hcount, shift, basek)\n assert res == 0\n return (re_harm, im_harm)\n\ndef density_fourier(data: np.array, hcount = 16):\n if data.dtype == np.float32:\n return density_fourier_float(data, hcount)\n if data.dtype == np.float64:\n return density_fourier_double(data, hcount)\n return None\n\nnx = 80000000\nxs = 2 * np.random.rand(nx) - 1\nys = np.random.rand(nx)\nfs = np.exp(-4 *(xs-0.9)**2) + np.exp(-100*(xs+0.9)**2) + np.exp(-50*(xs+0.3)**2)\nzs = xs[ys < fs]\nzsf, zsd = zs.astype(dtype = np.float32), zs.astype(dtype = np.float64) \nnz = zs.shape[0]\n\nbasic_dll.evaluate_fourier_capi_float.restype = ctypes.c_float\nbasic_dll.evaluate_fourier_capi_float.argtypes = [ctypes.c_float, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_long, ctypes.c_float, ctypes.c_float]\nbasic_dll.evaluate_fourier_capi_double.restype = ctypes.c_double\nbasic_dll.evaluate_fourier_capi_double.argtypes = [ctypes.c_double, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_long, ctypes.c_double, ctypes.c_double]\n\ndef evaluate_fourier_float(arg: float, \n reharmonics: np.array, imharmonics: np.array, \n shift = 0.0, basek = np.pi) -> float:\n assert (imharmonics.ndim == 1) and (reharmonics.ndim == 1)\n assert imharmonics.shape == reharmonics.shape\n reh = np.ascontiguousarray(reharmonics, dtype = np.float32)\n imh = np.ascontiguousarray(imharmonics, dtype = np.float32)\n hcount = len(imh)\n return basic_dll.evaluate_fourier_capi_float( \\\n arg, \\\n reh.ctypes.data_as(ctypes.c_void_p), \\\n imh.ctypes.data_as(ctypes.c_void_p), \\\n hcount, shift, basek) / reharmonics[0]\n\ndef evaluate_fourier_double(arg: float, \n reharmonics: np.array, imharmonics: np.array, \n shift = 0.0, basek = np.pi) -> float:\n assert (imharmonics.ndim == 1) and (reharmonics.ndim == 1)\n assert imharmonics.shape == reharmonics.shape\n reh = np.ascontiguousarray(reharmonics, dtype = np.float64)\n imh = np.ascontiguousarray(imharmonics, dtype = np.float64)\n hcount = len(imh)\n return basic_dll.evaluate_fourier_capi_double( \\\n arg, \\\n reh.ctypes.data_as(ctypes.c_void_p), \\\n imh.ctypes.data_as(ctypes.c_void_p), \\\n hcount, shift, basek) / reharmonics[0]\n\ndef evaluate_fourier(arg: float, \n reharmonics: np.array, imharmonics: np.array, \n shift = 0.0, basek = np.pi):\n assert imharmonics.dtype == reharmonics.dtype\n if (imharmonics.dtype == np.float32) and (reharmonics.dtype == np.float32):\n return evaluate_fourier_float(arg, reharmonics, imharmonics, shift, basek)\n if (imharmonics.dtype == np.float64) and (reharmonics.dtype == np.float64):\n return evaluate_fourier_double(arg, reharmonics, imharmonics, shift, basek)\n return None\n\nfrom time import *\n\ndef time_it(func):\n s = perf_counter()\n func()\n e = perf_counter()\n return (e - s)\n\ndef box_filter(func, iters = 32, bs = (0.25, 0.5)):\n times = np.array([time_it(func) for i in range(iters)])\n times = np.sort(times)\n ifrom, ito = int(iters * bs[0]), int(iters * bs[1])\n filtered = times[ifrom:ito]\n return np.mean(filtered), np.std(filtered)\n\ninput(\"Any key...\")\nfuncf = lambda: density_fourier(zsf)\nresf, devf = box_filter(funcf)\nprint(resf, devf)" ]
[ [ "numpy.abs", "numpy.min", "numpy.ascontiguousarray", "numpy.sort", "numpy.max", "numpy.std", "numpy.mean", "numpy.random.rand", "numpy.exp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
matthiasdiener/mirgecom
[ "4fb879023ec124047be9f3001485c69a8f4660c6", "4fb879023ec124047be9f3001485c69a8f4660c6" ]
[ "test/test_eos.py", "test/test_init.py" ]
[ "\"\"\"Test the EOS interfaces.\"\"\"\n\n__copyright__ = \"\"\"\nCopyright (C) 2020 University of Illinois Board of Trustees\n\"\"\"\n\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport logging\nimport numpy as np\nimport numpy.linalg as la # noqa\nimport pyopencl as cl\nimport pyopencl.clrandom\nimport pyopencl.clmath\nfrom meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa\nfrom meshmode.array_context import PyOpenCLArrayContext\nfrom meshmode.dof_array import thaw\n\nfrom mirgecom.eos import IdealSingleGas\nfrom mirgecom.initializers import Vortex2D\nfrom mirgecom.initializers import Lump\nfrom mirgecom.euler import split_conserved\nfrom grudge.eager import EagerDGDiscretization\nfrom pyopencl.tools import ( # noqa\n pytest_generate_tests_for_pyopencl as pytest_generate_tests,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_idealsingle_lump(ctx_factory):\n \"\"\"Test EOS with mass lump.\n\n Tests that the IdealSingleGas EOS returns\n the correct (uniform) pressure for the Lump\n solution field.\n \"\"\"\n cl_ctx = ctx_factory()\n queue = cl.CommandQueue(cl_ctx)\n actx = PyOpenCLArrayContext(queue)\n\n dim = 2\n nel_1d = 4\n\n from meshmode.mesh.generation import generate_regular_rect_mesh\n\n mesh = generate_regular_rect_mesh(\n a=[(0.0,), (-5.0,)], b=[(10.0,), (5.0,)], n=(nel_1d,) * dim\n )\n\n order = 3\n logger.info(f\"Number of elements {mesh.nelements}\")\n\n discr = EagerDGDiscretization(actx, mesh, order=order)\n nodes = thaw(actx, discr.nodes())\n\n # Init soln with Vortex\n center = np.zeros(shape=(dim,))\n velocity = np.zeros(shape=(dim,))\n center[0] = 5\n velocity[0] = 1\n lump = Lump(center=center, velocity=velocity)\n eos = IdealSingleGas()\n lump_soln = lump(0, nodes)\n\n cv = split_conserved(dim, lump_soln)\n p = eos.pressure(cv)\n exp_p = 1.0\n errmax = discr.norm(p - exp_p, np.inf)\n\n exp_ke = 0.5 * cv.mass\n ke = eos.kinetic_energy(cv)\n kerr = discr.norm(ke - exp_ke, np.inf)\n\n te = eos.total_energy(cv, p)\n terr = discr.norm(te - cv.energy, np.inf)\n\n logger.info(f\"lump_soln = {lump_soln}\")\n logger.info(f\"pressure = {p}\")\n\n assert errmax < 1e-15\n assert kerr < 1e-15\n assert terr < 1e-15\n\n\ndef test_idealsingle_vortex(ctx_factory):\n r\"\"\"Test EOS with isentropic vortex.\n\n Tests that the IdealSingleGas EOS returns\n the correct pressure (p) for the Vortex2D solution\n field (i.e. :math:'p = \\rho^{\\gamma}').\n \"\"\"\n cl_ctx = ctx_factory()\n queue = cl.CommandQueue(cl_ctx)\n actx = PyOpenCLArrayContext(queue)\n\n dim = 2\n nel_1d = 4\n\n from meshmode.mesh.generation import generate_regular_rect_mesh\n\n mesh = generate_regular_rect_mesh(\n a=[(0.0,), (-5.0,)], b=[(10.0,), (5.0,)], n=(nel_1d,) * dim\n )\n\n order = 3\n logger.info(f\"Number of elements {mesh.nelements}\")\n\n discr = EagerDGDiscretization(actx, mesh, order=order)\n nodes = thaw(actx, discr.nodes())\n eos = IdealSingleGas()\n # Init soln with Vortex\n vortex = Vortex2D()\n vortex_soln = vortex(0, nodes)\n cv = split_conserved(dim, vortex_soln)\n gamma = eos.gamma()\n p = eos.pressure(cv)\n exp_p = cv.mass ** gamma\n errmax = discr.norm(p - exp_p, np.inf)\n\n exp_ke = 0.5 * np.dot(cv.momentum, cv.momentum) / cv.mass\n ke = eos.kinetic_energy(cv)\n kerr = discr.norm(ke - exp_ke, np.inf)\n\n te = eos.total_energy(cv, p)\n terr = discr.norm(te - cv.energy, np.inf)\n\n logger.info(f\"vortex_soln = {vortex_soln}\")\n logger.info(f\"pressure = {p}\")\n\n assert errmax < 1e-15\n assert kerr < 1e-15\n assert terr < 1e-15\n", "__copyright__ = \"\"\"\nCopyright (C) 2020 University of Illinois Board of Trustees\n\"\"\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport logging\nimport numpy as np\nimport numpy.linalg as la # noqa\nimport pyopencl as cl\nimport pyopencl.clrandom\nimport pyopencl.clmath\nimport pytest\n\nfrom meshmode.array_context import PyOpenCLArrayContext\nfrom meshmode.dof_array import thaw\nfrom meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa\n\nfrom mirgecom.initializers import Vortex2D\nfrom mirgecom.initializers import Lump\nfrom mirgecom.euler import split_conserved\nfrom mirgecom.initializers import SodShock1D\nfrom mirgecom.eos import IdealSingleGas\n\nfrom grudge.eager import EagerDGDiscretization\nfrom pyopencl.tools import ( # noqa\n pytest_generate_tests_for_pyopencl as pytest_generate_tests,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_lump_init(ctx_factory):\n \"\"\"\n Simple test to check that Lump initializer\n creates the expected solution field.\n \"\"\"\n cl_ctx = ctx_factory()\n queue = cl.CommandQueue(cl_ctx)\n actx = PyOpenCLArrayContext(queue)\n dim = 2\n nel_1d = 4\n\n from meshmode.mesh.generation import generate_regular_rect_mesh\n\n mesh = generate_regular_rect_mesh(\n a=[(0.0,), (-5.0,)], b=[(10.0,), (5.0,)], n=(nel_1d,) * dim\n )\n\n order = 3\n logger.info(f\"Number of elements: {mesh.nelements}\")\n\n discr = EagerDGDiscretization(actx, mesh, order=order)\n nodes = thaw(actx, discr.nodes())\n\n # Init soln with Vortex\n center = np.zeros(shape=(dim,))\n velocity = np.zeros(shape=(dim,))\n center[0] = 5\n velocity[0] = 1\n lump = Lump(center=center, velocity=velocity)\n lump_soln = lump(0, nodes)\n\n cv = split_conserved(dim, lump_soln)\n p = 0.4 * (cv.energy - 0.5 * np.dot(cv.momentum, cv.momentum) / cv.mass)\n exp_p = 1.0\n errmax = discr.norm(p - exp_p, np.inf)\n\n logger.info(f\"lump_soln = {lump_soln}\")\n logger.info(f\"pressure = {p}\")\n\n assert errmax < 1e-15\n\n\ndef test_vortex_init(ctx_factory):\n \"\"\"\n Simple test to check that Vortex2D initializer\n creates the expected solution field.\n \"\"\"\n cl_ctx = ctx_factory()\n queue = cl.CommandQueue(cl_ctx)\n actx = PyOpenCLArrayContext(queue)\n dim = 2\n nel_1d = 4\n\n from meshmode.mesh.generation import generate_regular_rect_mesh\n\n mesh = generate_regular_rect_mesh(\n a=[(0.0,), (-5.0,)], b=[(10.0,), (5.0,)], n=(nel_1d,) * dim\n )\n\n order = 3\n logger.info(f\"Number of elements: {mesh.nelements}\")\n\n discr = EagerDGDiscretization(actx, mesh, order=order)\n nodes = thaw(actx, discr.nodes())\n\n # Init soln with Vortex\n vortex = Vortex2D()\n vortex_soln = vortex(0, nodes)\n gamma = 1.4\n cv = split_conserved(dim, vortex_soln)\n p = 0.4 * (cv.energy - 0.5 * np.dot(cv.momentum, cv.momentum) / cv.mass)\n exp_p = cv.mass ** gamma\n errmax = discr.norm(p - exp_p, np.inf)\n\n logger.info(f\"vortex_soln = {vortex_soln}\")\n logger.info(f\"pressure = {p}\")\n\n assert errmax < 1e-15\n\n\ndef test_shock_init(ctx_factory):\n \"\"\"\n Simple test to check that Shock1D initializer\n creates the expected solution field.\n \"\"\"\n cl_ctx = ctx_factory()\n queue = cl.CommandQueue(cl_ctx)\n actx = PyOpenCLArrayContext(queue)\n\n nel_1d = 10\n dim = 2\n\n from meshmode.mesh.generation import generate_regular_rect_mesh\n\n mesh = generate_regular_rect_mesh(\n a=[(0.0,), (1.0,)], b=[(-0.5,), (0.5,)], n=(nel_1d,) * dim\n )\n\n order = 3\n print(f\"Number of elements: {mesh.nelements}\")\n\n discr = EagerDGDiscretization(actx, mesh, order=order)\n nodes = thaw(actx, discr.nodes())\n\n initr = SodShock1D()\n initsoln = initr(t=0.0, x_vec=nodes)\n print(\"Sod Soln:\", initsoln)\n xpl = 1.0\n xpr = 0.1\n tol = 1e-15\n nodes_x = nodes[0]\n eos = IdealSingleGas()\n cv = split_conserved(dim, initsoln)\n p = eos.pressure(cv)\n\n assert discr.norm(actx.np.where(nodes_x < 0.5, p-xpl, p-xpr), np.inf) < tol\n\n\[email protected](\"dim\", [1, 2, 3])\ndef test_uniform(ctx_factory, dim):\n \"\"\"\n Simple test to check that Uniform initializer\n creates the expected solution field.\n \"\"\"\n cl_ctx = ctx_factory()\n queue = cl.CommandQueue(cl_ctx)\n actx = PyOpenCLArrayContext(queue)\n\n nel_1d = 2\n\n from meshmode.mesh.generation import generate_regular_rect_mesh\n\n mesh = generate_regular_rect_mesh(\n a=(-0.5,) * dim, b=(0.5,) * dim, n=(nel_1d,) * dim\n )\n\n order = 1\n print(f\"Number of elements: {mesh.nelements}\")\n\n discr = EagerDGDiscretization(actx, mesh, order=order)\n nodes = thaw(actx, discr.nodes())\n print(f\"DIM = {dim}, {len(nodes)}\")\n print(f\"Nodes={nodes}\")\n\n from mirgecom.initializers import Uniform\n initr = Uniform(numdim=dim)\n initsoln = initr(t=0.0, x_vec=nodes)\n tol = 1e-15\n ssoln = split_conserved(dim, initsoln)\n\n assert discr.norm(ssoln.mass - 1.0, np.inf) < tol\n assert discr.norm(ssoln.energy - 2.5, np.inf) < tol\n\n print(f\"Uniform Soln:{initsoln}\")\n eos = IdealSingleGas()\n cv = split_conserved(dim, initsoln)\n p = eos.pressure(cv)\n print(f\"Press:{p}\")\n\n assert discr.norm(p - 1.0, np.inf) < tol\n\n\[email protected](\"dim\", [1, 2, 3])\ndef test_pulse(ctx_factory, dim):\n \"\"\"\n Test of Gaussian pulse generator.\n If it looks, walks, and quacks like a duck, then ...\n \"\"\"\n cl_ctx = ctx_factory()\n queue = cl.CommandQueue(cl_ctx)\n actx = PyOpenCLArrayContext(queue)\n\n nel_1d = 10\n\n from meshmode.mesh.generation import generate_regular_rect_mesh\n\n mesh = generate_regular_rect_mesh(\n a=(-0.5,) * dim, b=(0.5,) * dim, n=(nel_1d,) * dim\n )\n\n order = 1\n print(f\"Number of elements: {mesh.nelements}\")\n\n discr = EagerDGDiscretization(actx, mesh, order=order)\n nodes = thaw(actx, discr.nodes())\n print(f\"DIM = {dim}, {len(nodes)}\")\n print(f\"Nodes={nodes}\")\n\n tol = 1e-15\n from mirgecom.initializers import _make_pulse\n amp = 1.0\n w = .1\n rms2 = w * w\n r0 = np.zeros(dim)\n r2 = np.dot(nodes, nodes) / rms2\n pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)\n print(f\"Pulse = {pulse}\")\n\n # does it return the expected exponential?\n pulse_check = actx.np.exp(-.5 * r2)\n print(f\"exact: {pulse_check}\")\n pulse_resid = pulse - pulse_check\n print(f\"pulse residual: {pulse_resid}\")\n assert(discr.norm(pulse_resid, np.inf) < tol)\n\n # proper scaling with amplitude?\n amp = 2.0\n pulse = 0\n pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)\n pulse_resid = pulse - (pulse_check + pulse_check)\n assert(discr.norm(pulse_resid, np.inf) < tol)\n\n # proper scaling with r?\n amp = 1.0\n rcheck = np.sqrt(2.0) * nodes\n pulse = _make_pulse(amp=amp, r0=r0, w=w, r=rcheck)\n assert(discr.norm(pulse - (pulse_check * pulse_check), np.inf) < tol)\n\n # proper scaling with w?\n w = w / np.sqrt(2.0)\n pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)\n assert(discr.norm(pulse - (pulse_check * pulse_check), np.inf) < tol)\n" ]
[ [ "numpy.dot", "numpy.zeros" ], [ "numpy.dot", "numpy.zeros", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
beabevi/ESAN
[ "35dfc021f2a5c0d85c2a2cee4bcd6b4345b96d8f" ]
[ "main.py" ]
[ "import argparse\nimport multiprocessing as mp\nimport os\nimport random\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\nimport wandb\nfrom ogb.graphproppred import Evaluator\n\n# noinspection PyUnresolvedReferences\nfrom data import SubgraphData\nfrom utils import get_data, get_model, SimpleEvaluator, NonBinaryEvaluator, Evaluator\n\ntorch.set_num_threads(1)\n\n\ndef train(model, device, loader, optimizer, criterion, epoch, fold_idx):\n model.train()\n\n for step, batch in enumerate(loader):\n batch = batch.to(device)\n\n if batch.x.shape[0] == 1 or batch.batch[-1] == 0:\n pass\n else:\n pred = model(batch)\n optimizer.zero_grad()\n # ignore nan targets (unlabeled) when computing training loss.\n is_labeled = batch.y == batch.y\n\n y = batch.y.view(pred.shape).to(torch.float32) if pred.size(-1) == 1 else batch.y\n loss = criterion(pred.to(torch.float32)[is_labeled], y[is_labeled])\n\n wandb.log({f'Loss/train': loss.item()})\n loss.backward()\n optimizer.step()\n\n\ndef eval(model, device, loader, evaluator, voting_times=1):\n model.eval()\n\n all_y_pred = []\n for i in range(voting_times):\n y_true = []\n y_pred = []\n\n for step, batch in enumerate(loader):\n batch = batch.to(device)\n\n if batch.x.shape[0] == 1:\n pass\n else:\n with torch.no_grad():\n pred = model(batch)\n\n y = batch.y.view(pred.shape) if pred.size(-1) == 1 else batch.y\n y_true.append(y.detach().cpu())\n y_pred.append(pred.detach().cpu())\n\n all_y_pred.append(torch.cat(y_pred, dim=0).unsqueeze(-1).numpy())\n\n y_true = torch.cat(y_true, dim=0).numpy()\n input_dict = {\"y_true\": y_true, \"y_pred\": all_y_pred}\n return evaluator.eval(input_dict)\n\n\ndef reset_wandb_env():\n exclude = {\n \"WANDB_PROJECT\",\n \"WANDB_ENTITY\",\n \"WANDB_API_KEY\",\n }\n for k, v in os.environ.items():\n if k.startswith(\"WANDB_\") and k not in exclude:\n del os.environ[k]\n\n\ndef run(args, device, fold_idx, sweep_run_name, sweep_id, results_queue):\n # set seed\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n\n reset_wandb_env()\n run_name = \"{}-{}\".format(sweep_run_name, fold_idx)\n run = wandb.init(\n group=sweep_id,\n job_type=sweep_run_name,\n name=run_name,\n config=args,\n )\n\n train_loader, train_loader_eval, valid_loader, test_loader, attributes = get_data(args, fold_idx)\n in_dim, out_dim, task_type, eval_metric = attributes\n\n if 'ogb' in args.dataset:\n evaluator = Evaluator(args.dataset)\n else:\n evaluator = SimpleEvaluator(task_type) if args.dataset != \"IMDB-MULTI\" \\\n and args.dataset != \"CSL\" else NonBinaryEvaluator(out_dim)\n\n model = get_model(args, in_dim, out_dim, device)\n\n optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)\n if 'ZINC' in args.dataset:\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=args.patience)\n elif 'ogb' in args.dataset:\n scheduler = None\n else:\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.decay_step, gamma=args.decay_rate)\n\n if \"classification\" in task_type:\n criterion = torch.nn.BCEWithLogitsLoss() if args.dataset != \"IMDB-MULTI\" \\\n and args.dataset != \"CSL\" else torch.nn.CrossEntropyLoss()\n else:\n criterion = torch.nn.L1Loss()\n\n # If sampling, perform majority voting on the outputs of 5 independent samples\n voting_times = 5 if args.fraction != 1. else 1\n\n train_curve = []\n valid_curve = []\n test_curve = []\n\n for epoch in range(1, args.epochs + 1):\n\n train(model, device, train_loader, optimizer, criterion, epoch=epoch, fold_idx=fold_idx)\n\n # Only valid_perf is used for TUD\n train_perf = eval(model, device, train_loader_eval, evaluator, voting_times) \\\n if 'ogb' in args.dataset else {eval_metric: 300.}\n valid_perf = eval(model, device, valid_loader, evaluator, voting_times)\n test_perf = eval(model, device, test_loader, evaluator, voting_times) \\\n if 'ogb' in args.dataset or 'ZINC' in args.dataset else {eval_metric: 300.}\n\n if scheduler is not None:\n if 'ZINC' in args.dataset:\n scheduler.step(valid_perf[eval_metric])\n if optimizer.param_groups[0]['lr'] < 0.00001:\n break\n else:\n scheduler.step()\n\n train_curve.append(train_perf[eval_metric])\n valid_curve.append(valid_perf[eval_metric])\n test_curve.append(test_perf[eval_metric])\n\n run.log(\n {\n f'Metric/train': train_perf[eval_metric],\n f'Metric/valid': valid_perf[eval_metric],\n f'Metric/test': test_perf[eval_metric]\n }\n )\n\n wandb.join()\n\n results_queue.put((train_curve, valid_curve, test_curve))\n return\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='GNN baselines with Pytorch Geometrics')\n parser.add_argument('--device', type=int, default=0,\n help='which gpu to use if any (default: 0)')\n parser.add_argument('--gnn_type', type=str,\n help='Type of convolution {gin, originalgin, zincgin, graphconv}')\n parser.add_argument('--random_ratio', type=float, default=0.,\n help='Number of random features, > 0 only for RNI')\n parser.add_argument('--model', type=str,\n help='Type of model {deepsets, dss, gnn}')\n parser.add_argument('--drop_ratio', type=float, default=0.5,\n help='dropout ratio (default: 0.5)')\n parser.add_argument('--num_layer', type=int, default=5,\n help='number of GNN message passing layers (default: 5)')\n parser.add_argument('--channels', type=str,\n help='String with dimension of each DS layer, separated by \"-\"'\n '(considered only if args.model is deepsets)')\n parser.add_argument('--emb_dim', type=int, default=300,\n help='dimensionality of hidden units in GNNs (default: 300)')\n parser.add_argument('--jk', type=str, default=\"last\",\n help='JK strategy, either last or concat (default: last)')\n parser.add_argument('--batch_size', type=int, default=32,\n help='input batch size for training (default: 32)')\n parser.add_argument('--learning_rate', type=float, default=0.01,\n help='learning rate for training (default: 0.01)')\n parser.add_argument('--decay_rate', type=float, default=0.5,\n help='decay rate for training (default: 0.5)')\n parser.add_argument('--decay_step', type=int, default=50,\n help='decay step for training (default: 50)')\n parser.add_argument('--epochs', type=int, default=100,\n help='number of epochs to train (default: 100)')\n parser.add_argument('--num_workers', type=int, default=0,\n help='number of workers (default: 0)')\n parser.add_argument('--dataset', type=str, default=\"ogbg-molhiv\",\n help='dataset name (default: ogbg-molhiv)')\n parser.add_argument('--policy', type=str, default=\"edge_deleted\",\n help='Subgraph selection policy in {edge_deleted, node_deleted, ego_nets}'\n ' (default: edge_deleted)')\n parser.add_argument('--num_hops', type=int, default=2,\n help='Depth of the ego net if policy is ego_nets (default: 2)')\n parser.add_argument('--seed', type=int, default=0,\n help='random seed (default: 0)')\n parser.add_argument('--fraction', type=float, default=1.0,\n help='Fraction of subsampled subgraphs (1.0 means full bag aka no sampling)')\n parser.add_argument('--patience', type=int, default=20,\n help='patience (default: 20)')\n parser.add_argument('--test', action='store_true',\n help='quick test')\n\n parser.add_argument('--filename', type=str, default=\"\",\n help='filename to output result (default: )')\n\n args = parser.parse_args()\n\n args.channels = list(map(int, args.channels.split(\"-\")))\n device = torch.device(\"cuda:\" + str(args.device)) if torch.cuda.is_available() else torch.device(\"cpu\")\n\n # set seed\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n\n mp.set_start_method('spawn')\n\n sweep_run = wandb.init()\n sweep_id = sweep_run.sweep_id or \"unknown\"\n sweep_url = sweep_run.get_sweep_url()\n project_url = sweep_run.get_project_url()\n sweep_group_url = \"{}/groups/{}\".format(project_url, sweep_id)\n sweep_run.notes = sweep_group_url\n sweep_run.save()\n sweep_run_name = sweep_run.name or sweep_run.id or \"unknown\"\n\n if 'ogb' in args.dataset or 'ZINC' in args.dataset:\n n_folds = 1\n elif 'CSL' in args.dataset:\n n_folds = 5\n else:\n n_folds = 10\n\n # number of processes to run in parallel\n # TODO: make it dynamic\n if n_folds > 1 and 'REDDIT' not in args.dataset:\n if args.dataset == 'PROTEINS':\n num_proc = 2\n else:\n num_proc = 3 if args.batch_size == 128 and args.dataset != 'MUTAG' and args.dataset != 'PTC' else 5\n else:\n num_proc = 1\n\n if args.dataset in ['CEXP', 'EXP']:\n num_proc = 2\n if 'IMDB' in args.dataset and args.policy == 'edge_deleted':\n num_proc = 1\n\n num_free = num_proc\n results_queue = mp.Queue()\n\n curve_folds = []\n fold_idx = 0\n\n if args.test:\n run(args, device, fold_idx, sweep_run_name, sweep_id, results_queue)\n exit()\n\n while len(curve_folds) < n_folds:\n if num_free > 0 and fold_idx < n_folds:\n p = mp.Process(\n target=run, args=(args, device, fold_idx, sweep_run_name, sweep_id, results_queue)\n )\n fold_idx += 1\n num_free -= 1\n p.start()\n else:\n curve_folds.append(results_queue.get())\n num_free += 1\n\n train_curve_folds = np.array([l[0] for l in curve_folds])\n valid_curve_folds = np.array([l[1] for l in curve_folds])\n test_curve_folds = np.array([l[2] for l in curve_folds])\n\n # compute aggregated curves across folds\n train_curve = np.mean(train_curve_folds, 0)\n train_accs_std = np.std(train_curve_folds, 0)\n\n valid_curve = np.mean(valid_curve_folds, 0)\n valid_accs_std = np.std(valid_curve_folds, 0)\n\n test_curve = np.mean(test_curve_folds, 0)\n test_accs_std = np.std(test_curve_folds, 0)\n\n task_type = 'classification' if args.dataset != 'ZINC' else 'regression'\n if 'classification' in task_type:\n best_val_epoch = np.argmax(valid_curve)\n best_train = max(train_curve)\n else:\n best_val_epoch = len(valid_curve) - 1\n best_train = min(train_curve)\n\n sweep_run.summary[f'Metric/train_mean'] = train_curve[best_val_epoch]\n sweep_run.summary[f'Metric/valid_mean'] = valid_curve[best_val_epoch]\n sweep_run.summary[f'Metric/test_mean'] = test_curve[best_val_epoch]\n sweep_run.summary[f'Metric/train_std'] = train_accs_std[best_val_epoch]\n sweep_run.summary[f'Metric/valid_std'] = valid_accs_std[best_val_epoch]\n sweep_run.summary[f'Metric/test_std'] = test_accs_std[best_val_epoch]\n\n if not args.filename == '':\n torch.save({'Val': valid_curve[best_val_epoch], 'Val std': valid_accs_std[best_val_epoch],\n 'Test': test_curve[best_val_epoch], 'Test std': test_accs_std[best_val_epoch],\n 'Train': train_curve[best_val_epoch], 'Train std': train_accs_std[best_val_epoch],\n 'BestTrain': best_train}, args.filename)\n\n wandb.join()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.optim.lr_scheduler.StepLR", "torch.nn.CrossEntropyLoss", "torch.optim.lr_scheduler.ReduceLROnPlateau", "numpy.random.seed", "torch.cat", "torch.manual_seed", "numpy.std", "numpy.argmax", "torch.set_num_threads", "numpy.mean", "torch.cuda.is_available", "torch.nn.BCEWithLogitsLoss", "torch.device", "torch.no_grad", "numpy.array", "torch.nn.L1Loss", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]