repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
jmackereth/eaglepy | [
"a1d4e621d04fb9312688588a03babd3fd1028be8"
]
| [
"eaglepy/h5read.py"
]
| [
"import h5py\nimport os\nimport glob\nimport re\nimport numpy as np\nfrom . import peano\nimport warnings\nfrom scipy.integrate import quad\n\nbase_path = os.environ['EAGLE_BASE_PATH']\nrelease = os.environ['EAGLE_ACCESS_TYPE']\n\nclass Snapshot:\n \"\"\" Basic SnapShot superclass which finds the relevant files and gets relevant information\n regarding the snapshot specified.\n\n arguments:\n run - the run (e.g. L0012N0188)\n model - an EAGLE model (e.g. Ref)\n tag - a tag string specifying a snapshot output (e.g. 028_z000p000)\n\n history:\n written - Mackereth (UoB) - 22/11/2019\n \"\"\"\n def __init__(self, run, model, tag, load_particles=False):\n #store the snapshot identity info\n self.run = run\n self.model = model\n self.tag = tag\n if release == 'public':\n self.simlabel = self.model+self.run\n self.snaplabel = 'snapshot_'+self.tag\n self.base_subfile = 'snap_'+self.tag\n self.path = os.path.join(base_path, self.simlabel, self.snaplabel)\n elif release == 'ARI':\n self.snaplabel = 'snapshot_'+self.tag\n self.base_subfile = 'snap_'+self.tag\n self.path = os.path.join(base_path, self.run, self.model, 'data', self.snaplabel)\n else:\n raise Exception('private/custom data access is not yet implemented!')\n if not os.path.exists(os.path.join(self.path, self.base_subfile+'.0.hdf5')):\n raise Exception('could not see snapshot data in directory: '+self.path)\n #get the files related to this snapshot and load some of their metadata\n self.files = natural_sort(glob.glob(os.path.join(self.path, self.base_subfile+'*.hdf5')))\n self.nfiles = len(self.files)\n self.header_dict = dict(h5py.File(self.files[0], 'r')['/Header'].attrs.items())\n self.abundance_dict = dict(h5py.File(self.files[0], 'r')['/Parameters/ChemicalElements'].attrs.items())\n self.elements = ['Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen', 'Silicon', 'Sulphur', 'Magnesium', 'Iron']\n self.solar_abundances = dict([(self.elements[i],self.abundance_dict['SolarAbundance_%s' % self.elements[i]]) for i in range(len(self.elements))])\n self.BoxSize = self.header_dict['BoxSize']\n self.HubbleParam = self.header_dict['HubbleParam']\n self.Omega0, self.OmegaLambda, self.OmegaBaryon, self.a0 = self.header_dict['Omega0'], self.header_dict['OmegaLambda'], self.header_dict['OmegaBaryon'], self.header_dict['ExpansionFactor']\n self.NumPartTotal = self.header_dict['NumPart_Total']\n self.ParticleTypes = np.array([0,1,2,3,4,5])\n self.ParticleTypePresent = self.NumPartTotal > 0\n self.ParticleTypePresent_file = np.zeros((len(self.files),len(self.NumPartTotal)), dtype=bool)\n for ii, file in enumerate(self.files):\n head = dict(h5py.File(file, 'r')['/Header'].attrs.items())\n self.ParticleTypePresent_file[ii, head['NumPart_ThisFile'] > 0] = True\n self._ptypeind = {self.ParticleTypes[self.ParticleTypePresent][i]:i for i in range(len(self.ParticleTypes[self.ParticleTypePresent]))}\n #get the Hash Table info for P-H key sorting\n self.HashBits = dict(h5py.File(self.files[0], 'r')['/HashTable'].attrs.items())['HashBits']\n self.HashGridSideLength = 2**self.HashBits\n self.HashGridCellSize = self.BoxSize/self.HashGridSideLength\n self.firstkeys = np.zeros((len(self.ParticleTypes[self.ParticleTypePresent]),self.nfiles))\n self.lastkeys = np.zeros((len(self.ParticleTypes[self.ParticleTypePresent]),self.nfiles))\n self.datasets = {}\n for ii,parttype in enumerate(self.ParticleTypes[self.ParticleTypePresent]):\n self.firstkeys[ii] = np.array(h5py.File(self.files[0], 'r')['/HashTable/PartType'+str(parttype)+'/FirstKeyInFile'])\n self.lastkeys[ii] = np.array(h5py.File(self.files[0], 'r')['/HashTable/PartType'+str(parttype)+'/LastKeyInFile'])\n #be sure we get a file with this parttype (only really an issue for when low N stars!!)\n ind = np.nonzero(h5py.File(self.files[0], 'r')['/HashTable/PartType'+str(parttype)+'/LastKeyInFile'][:])[0][0]\n self.datasets['PartType'+str(parttype)] = list(h5py.File(self.files[ind], 'r')['/PartType'+str(parttype)].keys())\n if load_particles:\n self._get_coordinates()\n\n def _get_coordinates(self):\n \"\"\" Load all the coordinates of the available particles\n \"\"\"\n #load coordinates and velocities\n coordinates = []\n velocities = []\n for ii,type in enumerate(self.ParticleTypes[self.ParticleTypePresent]):\n #now load the coordinates in these files and save the indices for each particle type\n thistypecoord, thistypevels = self._get_parttype_indices(type, self.files)\n coordinates.append(thistypecoord)\n velocities.append(thistypevels)\n self.velocities = velocities\n self.coordinates = coordinates\n\n def _get_parttype_indices(self, parttype, files):\n \"\"\"get the coordinates and indices for a given particle type in a given region\"\"\"\n coords, velocities, indices = [], [], []\n for ii,file in enumerate(files):\n #check this particle type is present here\n if not _particle_type_present(parttype, file):\n return None, None\n # load the file\n thisfilecoords = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Coordinates'])\n thisfilevels = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Velocity'])\n #store the coordinates and the indices of these particles in the file\n coords.append(thisfilecoords)\n velocities.append(thisfilevels)\n return np.concatenate(coords), np.concatenate(velocities)\n\n def _get_coords_vels(self, parttype, files):\n \"\"\"get the coordinates and velocities for all particles of a certain type\"\"\"\n if not self.ParticleTypePresent[parttype]:\n warnings.warn('Particle type is not present, returning empty arrays...')\n return np.array([]), np.array([]), np.array([])\n coords, velocities, indices = [], [], []\n for file in files:\n # load the file\n thisfilecoords = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Coordinates'])\n thisfilevels = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Velocity'])\n #store the coordinates and the indices of these particles in the file\n coords.append(thisfilecoords)\n velocities.append(thisfilevels)\n return np.concatenate(coords), np.concatenate(velocities)\n\n def get_dataset(self, parttype, dataset, physical=False, cgs=False):\n \"\"\" get the data for a given entry in the HDF5 file for the given region \"\"\"\n if not self.ParticleTypePresent[parttype]:\n warnings.warn('Particle type is not present, returning empty arrays...')\n return np.array([])\n key = os.path.join('/PartType'+str(parttype),dataset)\n if physical:\n #find conversion factor\n factor = self._conversion_factor(key, self.a0, self.HubbleParam, cgs=cgs)\n elif not physical and cgs:\n factor = h5py.File(self.files[0], 'r')[key].attrs['CGSConversionFactor']\n else:\n #else just multiply by 1!\n factor = 1.\n out = []\n for ii,file in enumerate(self.files):\n # load this file and get the particles\n out.append(np.array(h5py.File(file, 'r')[key]) * factor)\n return np.concatenate(out)\n\n def _conversion_factor(self, key, a, h, cgs=False):\n aexp_scale, h_scale = self._get_conversion_factor_exponents(key)\n if cgs:\n cgs_factor = h5py.File(self.files[0], 'r')[key].attrs['CGSConversionFactor']\n else:\n cgs_factor = 1.\n return a**(aexp_scale)*h**(h_scale)*cgs_factor\n\n def _get_conversion_factor_exponents(self, key):\n aexp_scale = h5py.File(self.files[0], 'r')[key].attrs['aexp-scale-exponent']\n h_scale = h5py.File(self.files[0], 'r')[key].attrs['h-scale-exponent']\n return aexp_scale, h_scale\n\n def _single_X_H(self,X,H,element):\n solar = self.solar_abundances[element]\n solarH = self.solar_abundances['Hydrogen']\n return np.log10(X/H)-np.log10(solar/solarH)\n\n def abundance_ratios(self,gas=False,smoothed=True):\n \"\"\" Compute element abundance ratios for the region, returns a dict of [X/H] \"\"\"\n if smoothed:\n e_key = 'SmoothedElementAbundance'\n else:\n e_key = 'ElementAbundance'\n if gas:\n parttype = 0\n else:\n parttype = 4\n entries = []\n H = self.get_dataset(parttype,os.path.join(e_key,'Hydrogen'))\n for i in range(len(self.elements)):\n if self.elements[i] == 'Hydrogen' or self.elements[i] == 'Sulphur':\n continue\n X = self.get_dataset(parttype,os.path.join(e_key,self.elements[i]))\n entries.append((self.elements[i],self._single_X_H(X,H,self.elements[i])))\n return dict(entries)\n\n def t_lookback(self,a):\n return a / (np.sqrt(self.Omega0 * a + self.OmegaLambda * (a ** 4)))\n\n def z2age(self,z):\n a = 1 / (1 + z)\n t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])\n return (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t\n\n def a2age(self,a):\n t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])\n return (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t\n\n def z2tau(self,z):\n t_em = quad(self.t_lookback, 0., self.a0)[0]\n t_em = (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t_em\n a = 1 / (1 + z)\n t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])\n return t_em - ((1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t)\n\n def a2tau(self,a):\n t_em = quad(self.t_lookback, 0., self.a0)[0]\n t_em = (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t_em\n t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])\n return t_em - ((1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t)\n\n\n\n\n\n\nclass SnapshotRegion(Snapshot):\n \"\"\" A class inheriting from SnapShot, which defines a region inside a larger simulation snapshot.\n when initialised, this will read the files in that region, and get the indices of the particles inside the\n desired region. The necessary datasets can then be loaded by using get_dataset.\n\n arguments:\n run - the run (e.g. L0012N0188)\n model - an EAGLE model (e.g. Ref)\n tag - a tag string specifying a snapshot output (e.g. 028_z000p000)\n center - the center of the desired region\n sidelength - the length of a side of the volume required\n\n history:\n written - Mackereth (UoB) - 22/11/2019\n \"\"\"\n def __init__(self, run, model, tag, center, sidelength, just_get_files=False):\n #we want everything from SnapShot plus some extras\n super().__init__(run, model, tag)\n self.center = center\n self.sidelength = sidelength\n self.centered = False\n self._index_region(self.center, self.sidelength, justfiles=just_get_files)\n\n def _index_region(self, center, side_length, phgrid_n=70, justfiles=False):\n \"\"\" Load a region defined by a central cordinate and a side length\n arguments:\n center - the [x,y,z] coordinate of the desired center (simulation units)\n side_length - the desired side length (in the simulation units)\n\n keyword arguments:\n phgrid_n - the number of grid points along a side length to look for PH cells (default 70)\n \"\"\"\n #work out which files contain the desired region\n grid = peano.coordinate_grid(center, side_length, self.BoxSize, n=phgrid_n)\n keys = peano.get_unique_grid_keys(grid, self.HashGridCellSize, self.BoxSize, bits=self.HashBits)\n particles_in_volume = self.ParticleTypes[self.ParticleTypePresent]\n self.files_for_region = []\n self.file_indices = []\n coordinates = []\n velocities = []\n indices = []\n for ii in self.ParticleTypes:\n if not self.ParticleTypePresent[ii]:\n continue\n Nfiles = self._get_parttype_files(ii, keys)\n if len(Nfiles) < 1:\n #particle is not present in the region - remove from here\n self.ParticleTypePresent[ii] = 0\n continue\n thisfiles = np.array(self.files)[Nfiles]\n thisindices = Nfiles\n self.files_for_region.append(thisfiles)\n self.file_indices.append(Nfiles)\n if justfiles:\n continue\n present = False\n for file in thisfiles:\n present += _particle_type_present(ii, file)\n if present:\n #now load the coordinates in these files and save the indices for each particle type\n thistypecoord, thistypevels, thistypeindices = self._get_parttype_indices(ii, thisfiles, thisindices)\n if thistypecoord is None:\n self.ParticleTypePresent[ii] = 0\n continue\n coordinates.append(thistypecoord)\n velocities.append(thistypevels)\n indices.append(thistypeindices)\n else:\n self.ParticleTypePresent[ii] = 0\n if not justfiles:\n self.velocities = velocities\n self.coordinates = coordinates\n self.indices = indices\n self.NumPart_ThisRegion = np.zeros(len(self.NumPartTotal),dtype=np.int64)\n for ii,type in enumerate(self.ParticleTypes[self.ParticleTypePresent]):\n self.NumPart_ThisRegion[type] = len(self.coordinates[ii])\n\n\n def _get_parttype_indices(self, parttype, files, file_indices):\n \"\"\"get the coordinates and indices for a given particle type in a given region\"\"\"\n coords, velocities, indices = [], [], []\n for ii,file in enumerate(files):\n #check this particle type is present here\n if not _particle_type_present(parttype, file):\n return None, None, None\n # load the file\n thisfilecoords = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Coordinates'])\n thisfilevels = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Velocity'])\n if (np.array(self.center)+self.sidelength > self.BoxSize).any():\n thisfilecoords = thisfilecoords - (self.center - self.BoxSize/2.)\n thisfilecoords = np.mod(thisfilecoords,self.BoxSize)\n thisfilecoords -= self.BoxSize/2.\n thisfilecoords += self.center\n # mask it to the region desired\n mask = (np.fabs(thisfilecoords[:,0]-self.center[0]) < self.sidelength/2.) &\\\n (np.fabs(thisfilecoords[:,1]-self.center[1]) < self.sidelength/2.) &\\\n (np.fabs(thisfilecoords[:,2]-self.center[2]) < self.sidelength/2.)\n #store the coordinates and the indices of these particles in the file\n thisfileindices = np.where(mask)[0]\n coords.append(thisfilecoords[mask])\n velocities.append(thisfilevels[mask])\n indices.append(thisfileindices)\n return np.concatenate(coords), np.concatenate(velocities), indices\n\n def get_dataset(self, parttype, dataset, physical=False, cgs=False):\n \"\"\" get the data for a given entry in the HDF5 file for the given region \"\"\"\n if not self.ParticleTypePresent[parttype]:\n warnings.warn('Particle type is not present, returning empty arrays...')\n return np.array([])\n key = os.path.join('/PartType'+str(parttype),dataset)\n if physical:\n #find conversion factor\n factor = self._conversion_factor(key, self.a0, self.HubbleParam, cgs=cgs)\n elif not physical and cgs:\n factor = h5py.File(self.files[0], 'r')[key].attrs['CGSConversionFactor']\n else:\n #else just multiply by 1!\n factor = 1.\n out = []\n ptypeind = self._ptypeind[parttype]\n for ii,file in enumerate(self.files_for_region[ptypeind]):\n if not _particle_type_present(parttype, file):\n continue\n # load this file and get the particles\n out.append(np.array(h5py.File(file, 'r')[key])[self.indices[ptypeind][ii]] * factor)\n if len(out) < 2:\n return out[0]\n return np.concatenate(out)\n\n def _get_parttype_files(self, parttype, keys):\n \"\"\" get the files containing this region for a given particle type \"\"\"\n Nfiles = []\n ptypeind = self._ptypeind[parttype]\n for i in range(len(keys)):\n if len(np.where(self.firstkeys[ptypeind] < keys[i])[0]) < 1:\n start = 0\n else:\n start = np.where(self.firstkeys[ptypeind] < keys[i])[0][-1]\n if len(np.where(self.firstkeys[ptypeind] > keys[i])[0]) < 1:\n end = len(self.firstkeys[ptypeind])-1\n else:\n end = np.where(self.firstkeys[ptypeind] > keys[i])[0][0]\n Nfiles.extend(np.arange(start,end+1,1))\n Nfiles = np.unique(Nfiles)\n return Nfiles\n\n\n def angular_momentum(self, parttype=1, percentile=10):\n \"\"\"Compute the angular momentum of particles within some percentile of their\n radii\n \"\"\"\n ptypeind = self._ptypeind[parttype]\n pos, vel = self.coordinates[ptypeind], self.velocities[ptypeind]\n radii = np.linalg.norm(self.coordinates[ptypeind], axis=1)\n inside = radii < np.percentile(radii, percentile)\n if parttype == 1:\n mass= np.ones(len(pos))*self.header_dict['MassTable'][1]\n else:\n mass = self.get_dataset(parttype, 'Mass')\n vec = np.cross(pos[inside],vel[inside]*mass[inside][:,np.newaxis])\n tot = np.sum(vec, axis=0)\n return tot/np.linalg.norm(tot)\n\n def _transform(self,vector):\n \"\"\"Build a transformation matrix\"\"\"\n a = vector\n b = np.matrix([0,0,1])\n v = np.cross(a,b)\n s = np.linalg.norm(v)\n c = np.dot(a,b.T)\n vx = np.matrix([[0,-v[0,2],v[0,1]],[v[0,2],0,-v[0,0]],[-v[0,1],v[0,0],0]])\n transform = np.eye(3,3) + vx + (vx*vx)*(1/(1+c[0,0]))\n return transform\n\n\n def center_and_align(self, parttype=1, align_percentile=10, return_transform=False, use_transform=False, trans=None, verbose=False, centeronly=False):\n \"\"\"Center and align the particles in the region either with a supplied transformation matrix\n or by computation of the mean angular momentum of some range of particles (defined by some percentile)\n of the radii\n \"\"\"\n ptypeind = self._ptypeind[parttype]\n if not self.centered:\n for i in range(len(self.coordinates)):\n self.coordinates[i] -= np.array(self.center)\n radii = np.linalg.norm(self.coordinates[ptypeind], axis=1)\n inside = radii < np.percentile(radii, align_percentile)\n self.bulkvel = np.median(self.velocities[ptypeind][inside], axis=0)\n for i in range(len(self.velocities)):\n self.velocities[i] -= np.array(self.bulkvel)\n if centeronly:\n return None\n self.centered = True\n if use_transform:\n t = trans\n else:\n t = self._transform(self.angular_momentum(parttype=parttype, percentile=align_percentile))\n if verbose:\n print('Transforming Coordinates...')\n for i in range(len(self.coordinates)):\n self.coordinates[i] = np.einsum('ij,aj->ai', t, self.coordinates[i])\n self.velocities[i] = np.einsum('ij,aj->ai', t, self.velocities[i])\n if return_transform:\n return t\n\n def _single_X_H(self,X,H,element):\n solar = self.solar_abundances[element]\n solarH = self.solar_abundances['Hydrogen']\n return np.log10(X/H)-np.log10(solar/solarH)\n\n\n def abundance_ratios(self,gas=False,smoothed=True):\n \"\"\" Compute element abundance ratios for the region, returns a dict of [X/H] \"\"\"\n if smoothed:\n e_key = 'SmoothedElementAbundance'\n else:\n e_key = 'ElementAbundance'\n if gas:\n parttype = 0\n else:\n parttype = 4\n entries = []\n H = self.get_dataset(parttype,os.path.join(e_key,'Hydrogen'))\n for i in range(len(self.elements)):\n if self.elements[i] == 'Hydrogen' or self.elements[i] == 'Sulphur':\n continue\n X = self.get_dataset(parttype,os.path.join(e_key,self.elements[i]))\n entries.append((self.elements[i],self._single_X_H(X,H,self.elements[i])))\n return dict(entries)\n\nclass Subfind:\n \"\"\" Basic Subfind superclass which finds the relevant files.\n\n arguments:\n run - the run (e.g. L0012N0188)\n model - an EAGLE model (e.g. Ref)\n tag - a tag string specifying a snapshot output (e.g. 028_z000p000)\n\n history:\n written - Mackereth (UoB) - 22/11/2019\n \"\"\"\n def __init__(self, run, model, tag):\n #store the snapshot identity info\n self.run = run\n self.model = model\n self.tag = tag\n if release == 'public':\n self.simlabel = self.model+self.run\n self.snaplabel = 'groups_'+self.tag\n self.base_subfile = 'eagle_subfind_tab_'+self.tag\n self.path = os.path.join(base_path, self.simlabel, self.snaplabel)\n elif release == 'ARI':\n self.snaplabel = 'groups_'+self.tag\n self.base_subfile = 'eagle_subfind_tab_'+self.tag\n self.path = os.path.join(base_path, self.run, self.model, 'data', self.snaplabel)\n else:\n raise Exception('private/custom data access is not yet implemented!')\n if not os.path.exists(os.path.join(self.path, self.base_subfile+'.0.hdf5')):\n raise Exception('could not see snapshot data in directory: '+self.path)\n #get the files related to this snapshot and load some of their metadata\n self.files = natural_sort(glob.glob(os.path.join(self.path, self.base_subfile+'*.hdf5')))\n self.nfiles = len(self.files)\n self.header_dict = dict(h5py.File(self.files[0], 'r')['/Header'].attrs.items())\n self.BoxSize = self.header_dict['BoxSize']\n self.HubbleParam = self.header_dict['HubbleParam']\n self.Omega0, self.OmegaLambda, self.OmegaBaryon, self.a0 = self.header_dict['Omega0'], self.header_dict['OmegaLambda'], self.header_dict['OmegaBaryon'], self.header_dict['ExpansionFactor']\n self.datasets = {}\n bases = ['FOF', 'Subhalo']\n for base in bases:\n self.datasets[base] = list(h5py.File(self.files[0], 'r')[base].keys())\n\n def get_dataset(self, dataset, physical=False, cgs=False):\n \"\"\" get the data for a given entry in the HDF5 files \"\"\"\n out = []\n if physical:\n #find conversion factor\n factor = self._conversion_factor(dataset, self.a0, self.HubbleParam, cgs=cgs)\n elif not physical and cgs:\n factor = h5py.File(self.files[0], 'r')[dataset].attrs['CGSConversionFactor']\n else:\n #else just multiply by 1!\n factor = 1\n for file in self.files:\n # load this file and get the particles\n out.append(np.array(h5py.File(file, 'r')[dataset])[:] * factor)\n return np.concatenate(out)\n\n\n def _conversion_factor(self, key, a, h, cgs=False):\n aexp_scale, h_scale = self._get_conversion_factor_exponents(key)\n if cgs:\n cgs_factor = h5py.File(self.files[0], 'r')[key].attrs['CGSConversionFactor']\n else:\n cgs_factor = 1.\n return a**(aexp_scale)*h**(h_scale)*cgs_factor\n\n def _get_conversion_factor_exponents(self, key):\n aexp_scale = h5py.File(self.files[0], 'r')[key].attrs['aexp-scale-exponent']\n h_scale = h5py.File(self.files[0], 'r')[key].attrs['h-scale-exponent']\n return aexp_scale, h_scale\n\n\ndef natural_sort(l):\n \"\"\"natural sort using regex (adapted by Mark Byers on StackOverflow\n from http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html)\"\"\"\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n return sorted(l, key = alphanum_key)\n\ndef _particle_type_present(type, file):\n head = dict(h5py.File(file, 'r')['/Header'].attrs.items())\n return head['NumPart_ThisFile'][type] > 0\n"
]
| [
[
"numpy.dot",
"numpy.median",
"numpy.where",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.eye",
"numpy.fabs",
"numpy.arange",
"numpy.sqrt",
"numpy.log10",
"numpy.cross",
"numpy.mod",
"numpy.array",
"numpy.matrix",
"numpy.percentile",
"numpy.einsum",
"scipy.integrate.quad",
"numpy.sum",
"numpy.unique"
]
]
|
cgangEE/pva-regression4 | [
"4bc79af0d5a6bd964f2f230ab81ec219c285aeda"
]
| [
"lib/datasets/imdb.py"
]
| [
"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nimport os\nimport os.path as osp\nimport PIL\nfrom utils.cython_bbox import bbox_overlaps\nimport numpy as np\nimport scipy.sparse\nfrom fast_rcnn.config import cfg\n\nclass imdb(object):\n \"\"\"Image database.\"\"\"\n\n def __init__(self, name):\n self._name = name\n self._num_classes = 0\n self._classes = []\n self._image_index = []\n self._obj_proposer = 'selective_search'\n self._roidb = None\n self._roidb_handler = self.default_roidb\n # Use this dict for storing dataset specific config options\n self.config = {}\n\n @property\n def name(self):\n return self._name\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def image_index(self):\n return self._image_index\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n\n @roidb_handler.setter\n def roidb_handler(self, val):\n self._roidb_handler = val\n\n def set_proposal_method(self, method):\n method = eval('self.' + method + '_roidb')\n self.roidb_handler = method\n\n @property\n def roidb(self):\n # A roidb is a list of dictionaries, each with the following keys:\n # boxes\n # gt_overlaps\n # gt_classes\n # flipped\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n\n @property\n def num_images(self):\n return len(self.image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def default_roidb(self):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0]\n for i in xrange(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in xrange(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n parts = self.roidb[i]['parts'].copy()\n\n for j in range(3):\n oldx1 = parts[:, j * 4].copy()\n oldx2 = parts[:, j * 4 + 2].copy()\n oldx1[oldx1>=widths[i]] = widths[i] - 1\n oldx2[oldx2>=widths[i]] = widths[i] - 1\n parts[:, j * 4] = widths[i] - oldx2 - 1\n parts[:, j * 4 + 2] = widths[i] - oldx1 - 1\n assert (parts[:, j * 4 + 2] >= parts[:, j * 4]).all()\n\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n\n\n oldx1[oldx1>=widths[i]] = widths[i] - 1\n oldx2[oldx2>=widths[i]] = widths[i] - 1\n\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes' : boxes,\n 'gt_overlaps' : self.roidb[i]['gt_overlaps'],\n 'gt_classes' : self.roidb[i]['gt_classes'],\n 'flipped' : True,\n 'parts': parts\n }\n\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n def evaluate_recall(self, candidate_boxes=None, thresholds=None,\n area='all', limit=None):\n \"\"\"Evaluate detection proposal recall metrics.\n\n Returns:\n results: dictionary of results with keys\n 'ar': average recall\n 'recalls': vector recalls at each IoU overlap threshold\n 'thresholds': vector of IoU overlap thresholds\n 'gt_overlaps': vector of all ground-truth overlaps\n \"\"\"\n # Record max overlap value for each gt box\n # Return vector of overlap values\n areas = { 'all': 0, 'small': 1, 'medium': 2, 'large': 3,\n '96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}\n area_ranges = [ [0**2, 1e5**2], # all\n [0**2, 32**2], # small\n [32**2, 96**2], # medium\n [96**2, 1e5**2], # large\n [96**2, 128**2], # 96-128\n [128**2, 256**2], # 128-256\n [256**2, 512**2], # 256-512\n [512**2, 1e5**2], # 512-inf\n ]\n assert areas.has_key(area), 'unknown area range: {}'.format(area)\n area_range = area_ranges[areas[area]]\n gt_overlaps = np.zeros(0)\n num_pos = 0\n for i in xrange(self.num_images):\n # Checking for max_overlaps == 1 avoids including crowd annotations\n # (...pretty hacking :/)\n max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)\n gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &\n (max_gt_overlaps == 1))[0]\n gt_boxes = self.roidb[i]['boxes'][gt_inds, :]\n gt_areas = self.roidb[i]['seg_areas'][gt_inds]\n valid_gt_inds = np.where((gt_areas >= area_range[0]) &\n (gt_areas <= area_range[1]))[0]\n gt_boxes = gt_boxes[valid_gt_inds, :]\n num_pos += len(valid_gt_inds)\n\n if candidate_boxes is None:\n # If candidate_boxes is not supplied, the default is to use the\n # non-ground-truth boxes from this roidb\n non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]\n boxes = self.roidb[i]['boxes'][non_gt_inds, :]\n else:\n boxes = candidate_boxes[i]\n if boxes.shape[0] == 0:\n continue\n if limit is not None and boxes.shape[0] > limit:\n boxes = boxes[:limit, :]\n\n overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n\n _gt_overlaps = np.zeros((gt_boxes.shape[0]))\n for j in xrange(gt_boxes.shape[0]):\n # find which proposal box maximally covers each gt box\n argmax_overlaps = overlaps.argmax(axis=0)\n # and get the iou amount of coverage for each gt box\n max_overlaps = overlaps.max(axis=0)\n # find which gt box is 'best' covered (i.e. 'best' = most iou)\n gt_ind = max_overlaps.argmax()\n gt_ovr = max_overlaps.max()\n assert(gt_ovr >= 0)\n # find the proposal box that covers the best covered gt box\n box_ind = argmax_overlaps[gt_ind]\n # record the iou coverage of this gt box\n _gt_overlaps[j] = overlaps[box_ind, gt_ind]\n assert(_gt_overlaps[j] == gt_ovr)\n # mark the proposal box and the gt box as used\n overlaps[box_ind, :] = -1\n overlaps[:, gt_ind] = -1\n # append recorded iou coverage level\n gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))\n\n gt_overlaps = np.sort(gt_overlaps)\n if thresholds is None:\n step = 0.05\n thresholds = np.arange(0.5, 0.95 + 1e-5, step)\n recalls = np.zeros_like(thresholds)\n # compute recall for each iou threshold\n for i, t in enumerate(thresholds):\n recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)\n # ar = 2 * np.trapz(recalls, thresholds)\n ar = recalls.mean()\n return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,\n 'gt_overlaps': gt_overlaps}\n\n def create_roidb_from_box_list(self, box_list, gt_roidb):\n assert len(box_list) == self.num_images, \\\n 'Number of boxes must match number of ground-truth images'\n roidb = []\n for i in xrange(self.num_images):\n boxes = box_list[i]\n num_boxes = boxes.shape[0]\n overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)\n\n if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:\n gt_boxes = gt_roidb[i]['boxes']\n gt_classes = gt_roidb[i]['gt_classes']\n gt_overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n argmaxes = gt_overlaps.argmax(axis=1)\n maxes = gt_overlaps.max(axis=1)\n I = np.where(maxes > 0)[0]\n overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n roidb.append({\n 'boxes' : boxes,\n 'gt_classes' : np.zeros((num_boxes,), dtype=np.int32),\n 'gt_overlaps' : overlaps,\n 'flipped' : False,\n 'seg_areas' : np.zeros((num_boxes,), dtype=np.float32),\n })\n return roidb\n\n @staticmethod\n def merge_roidbs(a, b):\n assert len(a) == len(b)\n for i in xrange(len(a)):\n a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))\n a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],\n b[i]['gt_classes']))\n a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],\n b[i]['gt_overlaps']])\n a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],\n b[i]['seg_areas']))\n return a\n\n def competition_mode(self, on):\n \"\"\"Turn competition mode on or off.\"\"\"\n pass\n"
]
| [
[
"numpy.zeros_like",
"numpy.zeros",
"numpy.where",
"numpy.arange",
"numpy.sort",
"numpy.hstack",
"numpy.vstack"
]
]
|
iMurfyD/poliastro | [
"d4664854683018ea2420efa3f4a11e5e73d1b876"
]
| [
"tests/tests_twobody/test_orbit.py"
]
| [
"import pickle\n\nimport matplotlib\nimport numpy as np\nimport pytest\nfrom astropy import units as u\nfrom astropy.coordinates import (\n ITRS,\n CartesianDifferential,\n CartesianRepresentation,\n SkyCoord,\n)\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.time import Time\nfrom numpy.testing import assert_allclose, assert_array_equal\n\nfrom poliastro.bodies import (\n Body,\n Earth,\n Jupiter,\n Mars,\n Mercury,\n Moon,\n Neptune,\n Pluto,\n Saturn,\n Sun,\n Uranus,\n Venus,\n)\nfrom poliastro.constants import J2000, J2000_TDB\nfrom poliastro.examples import iss\nfrom poliastro.frames.ecliptic import HeliocentricEclipticJ2000\nfrom poliastro.frames.enums import Planes\nfrom poliastro.frames.equatorial import (\n GCRS,\n HCRS,\n ICRS,\n JupiterICRS,\n MarsICRS,\n MercuryICRS,\n NeptuneICRS,\n SaturnICRS,\n UranusICRS,\n VenusICRS,\n)\nfrom poliastro.frames.util import get_frame\nfrom poliastro.twobody.orbit import Orbit\nfrom poliastro.warnings import (\n OrbitSamplingWarning,\n PatchedConicsWarning,\n TimeScaleWarning,\n)\n\n\[email protected]()\ndef hyperbolic():\n r = [1.197659243752796e09, -4.443716685978071e09, -1.747610548576734e09] * u.km\n v = (\n [5.540549267188614e00, -1.251544669134140e01, -4.848892572767733e00]\n * u.km\n / u.s\n )\n epoch = Time(\"2015-07-14 07:59\", scale=\"tdb\")\n return Orbit.from_vectors(Sun, r, v, epoch)\n\n\ndef test_default_time_for_new_state():\n _d = 1.0 * u.AU # Unused distance\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n _body = Sun # Unused body\n expected_epoch = J2000\n ss = Orbit.from_classical(_body, _d, _, _a, _a, _a, _a)\n assert ss.epoch == expected_epoch\n\n\ndef test_state_raises_unitserror_if_elements_units_are_wrong():\n _d = 1.0 * u.AU # Unused distance\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n wrong_angle = 1.0 * u.AU\n with pytest.raises(u.UnitsError) as excinfo:\n Orbit.from_classical(Sun, _d, _, _a, _a, _a, wrong_angle)\n assert (\n \"UnitsError: Argument 'nu' to function 'from_classical' must be in units convertible to 'rad'.\"\n in excinfo.exconly()\n )\n\n\ndef test_state_raises_unitserror_if_rv_units_are_wrong():\n _d = [1.0, 0.0, 0.0] * u.AU\n wrong_v = [0.0, 1.0e-6, 0.0] * u.AU\n with pytest.raises(u.UnitsError) as excinfo:\n Orbit.from_vectors(Sun, _d, wrong_v)\n assert (\n \"UnitsError: Argument 'v' to function 'from_vectors' must be in units convertible to 'm / s'.\"\n in excinfo.exconly()\n )\n\n\ndef test_parabolic_elements_fail_early():\n attractor = Earth\n ecc = 1.0 * u.one\n _d = 1.0 * u.AU # Unused distance\n _a = 1.0 * u.deg # Unused angle\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_classical(attractor, _d, ecc, _a, _a, _a, _a)\n assert (\n \"ValueError: For parabolic orbits use Orbit.parabolic instead\"\n in excinfo.exconly()\n )\n\n\ndef test_bad_inclination_raises_exception():\n _d = 1.0 * u.AU # Unused distance\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n _body = Sun # Unused body\n bad_inc = 200 * u.deg\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_classical(_body, _d, _, bad_inc, _a, _a, _a)\n assert (\n \"ValueError: Inclination must be between 0 and 180 degrees\" in excinfo.exconly()\n )\n\n\ndef test_bad_hyperbolic_raises_exception():\n bad_a = 1.0 * u.AU\n ecc = 1.5 * u.one\n _inc = 100 * u.deg # Unused inclination\n _a = 1.0 * u.deg # Unused angle\n _body = Sun # Unused body\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_classical(_body, bad_a, ecc, _inc, _a, _a, _a)\n assert \"Hyperbolic orbits have negative semimajor axis\" in excinfo.exconly()\n\n\ndef test_apply_maneuver_changes_epoch():\n _d = 1.0 * u.AU # Unused distance\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n ss = Orbit.from_classical(Sun, _d, _, _a, _a, _a, _a)\n dt = 1 * u.h\n dv = [0, 0, 0] * u.km / u.s\n orbit_new = ss.apply_maneuver([(dt, dv)])\n assert orbit_new.epoch == ss.epoch + dt\n\n\ndef test_orbit_from_ephem_with_no_epoch_is_today():\n # This is not that obvious http://stackoverflow.com/q/6407362/554319\n body = Earth\n ss = Orbit.from_body_ephem(body)\n assert (Time.now() - ss.epoch).sec < 1\n\n\ndef test_from_ephem_raises_warning_if_time_is_not_tdb_with_proper_time(recwarn):\n body = Earth\n epoch = Time(\"2017-09-29 07:31:26\", scale=\"utc\")\n expected_epoch_string = \"2017-09-29 07:32:35.182\" # epoch.tdb.value\n\n Orbit.from_body_ephem(body, epoch)\n\n w = recwarn.pop(TimeScaleWarning)\n assert expected_epoch_string in str(w.message)\n\n\[email protected](\"body\", [Moon, Pluto])\ndef test_from_ephem_raises_error_for_pluto_moon(body):\n with pytest.raises(RuntimeError) as excinfo:\n Orbit.from_body_ephem(body)\n assert \"To compute the position and velocity\" in excinfo.exconly()\n\n\ndef test_circular_has_proper_semimajor_axis():\n alt = 500 * u.km\n attractor = Earth\n expected_a = Earth.R + alt\n ss = Orbit.circular(attractor, alt)\n assert ss.a == expected_a\n\n\ndef test_geosync_has_proper_period():\n expected_period = 1436 * u.min\n\n ss = Orbit.circular(Earth, alt=42164 * u.km - Earth.R)\n\n assert_quantity_allclose(ss.period, expected_period, rtol=1e-4)\n\n\ndef test_parabolic_has_proper_eccentricity():\n attractor = Earth\n _d = 1.0 * u.AU # Unused distance\n _a = 1.0 * u.deg # Unused angle\n expected_ecc = 1.0 * u.one\n ss = Orbit.parabolic(attractor, _d, _a, _a, _a, _a)\n assert_allclose(ss.ecc, expected_ecc)\n\n\ndef test_parabolic_has_zero_energy():\n attractor = Earth\n _d = 1.0 * u.AU # Unused distance\n _a = 1.0 * u.deg # Unused angle\n ss = Orbit.parabolic(attractor, _d, _a, _a, _a, _a)\n assert_allclose(ss.energy.value, 0.0, atol=1e-16)\n\n\ndef test_pqw_for_circular_equatorial_orbit():\n ss = Orbit.circular(Earth, 600 * u.km)\n expected_p = [1, 0, 0] * u.one\n expected_q = [0, 1, 0] * u.one\n expected_w = [0, 0, 1] * u.one\n p, q, w = ss.pqw()\n assert_allclose(p, expected_p)\n assert_allclose(q, expected_q)\n assert_allclose(w, expected_w)\n\n\[email protected](\n \"attractor,alt,argp,expected_argp,expected_inc\",\n [\n (\n Earth,\n 1e6 * u.m,\n 3 * np.pi / 2 * u.rad,\n 3 * np.pi / 2 * u.rad,\n 63.4349 * np.pi / 180 * u.rad,\n ),\n (Mars, 3e8 * u.m, 0 * u.deg, 0 * u.deg, 63.4349 * np.pi / 180 * u.rad),\n ],\n)\ndef test_frozen_orbit_argp(attractor, alt, argp, expected_argp, expected_inc):\n orbit = Orbit.frozen(attractor, alt, argp=argp)\n assert_allclose(orbit.argp, expected_argp)\n assert_allclose(orbit.inc, expected_inc)\n\n\[email protected](\n \"attractor,alt,inc,argp,expected_inc,expected_argp\",\n [\n (\n Earth,\n 1e6 * u.m,\n 116.5651 * np.pi / 180 * u.rad,\n 3 * np.pi / 2 * u.rad,\n 116.5651 * np.pi / 180 * u.rad,\n 3 * np.pi / 2 * u.rad,\n ),\n (\n Mars,\n 3e8 * u.m,\n 63.4349 * np.pi / 180 * u.rad,\n np.pi / 2 * u.rad,\n 63.4349 * np.pi / 180 * u.rad,\n np.pi / 2 * u.rad,\n ),\n ],\n)\ndef test_frozen_orbit_with_critical_argp_and_critical_inc(\n attractor, alt, inc, argp, expected_inc, expected_argp\n):\n orbit = Orbit.frozen(attractor, alt, inc=inc, argp=argp)\n assert_allclose(orbit.argp, expected_argp)\n assert_allclose(orbit.inc, expected_inc)\n\n\[email protected](\n \"attractor,alt,expected_inc,expected_argp\",\n [\n (Earth, 1e6 * u.m, 63.4349 * np.pi / 180 * u.rad, np.pi / 2 * u.rad),\n (Mars, 3e8 * u.m, 63.4349 * np.pi / 180 * u.rad, np.pi / 2 * u.rad),\n ],\n)\ndef test_frozen_orbit_no_args(attractor, alt, expected_inc, expected_argp):\n orbit = Orbit.frozen(attractor, alt)\n argp = orbit.argp\n inc = orbit.inc\n assert_allclose(argp, expected_argp)\n assert_allclose(inc, expected_inc)\n\n\[email protected](\n \"attractor,alt,argp,expected_inc,ecc,expected_ecc\",\n [\n (\n Earth,\n 1e6 * u.m,\n 2 * u.deg, # Non critical value\n 63.4349 * np.pi / 180 * u.rad,\n None,\n 0.0549 * u.one,\n ),\n (\n Mars,\n 3e8 * u.m,\n 0 * u.deg, # Non critical value\n 63.4349 * np.pi / 180 * u.rad,\n 0.04 * u.one,\n 0.04 * u.one,\n ),\n ],\n)\ndef test_frozen_orbit_with_non_critical_argp(\n attractor, alt, argp, expected_inc, ecc, expected_ecc\n):\n orbit = Orbit.frozen(attractor, alt, argp=argp, ecc=ecc) # Non-critical value\n assert_allclose(orbit.inc, expected_inc)\n assert_allclose(orbit.ecc, expected_ecc)\n\n\ndef test_frozen_orbit_non_critical_inclination():\n orbit = Orbit.frozen(Earth, 1e3 * u.km, inc=0 * u.deg) # Non-critical value\n assert orbit.argp in [np.pi / 2, 3 * np.pi / 2] * u.rad\n\n\ndef test_frozen_orbit_venus_special_case():\n with pytest.raises(NotImplementedError) as excinfo:\n Orbit.frozen(Venus, 1 * u.m)\n assert excinfo.type == NotImplementedError\n assert str(excinfo.value) == \"This has not been implemented for Venus\"\n\n\ndef test_frozen_orbit_non_spherical_arguments():\n with pytest.raises(AttributeError) as excinfo:\n Orbit.frozen(Jupiter, 1 * u.m)\n assert excinfo.type == AttributeError\n assert (\n str(excinfo.value)\n == \"Attractor Jupiter has not spherical harmonics implemented\"\n )\n\n\ndef test_frozen_orbit_altitude():\n with pytest.raises(ValueError) as excinfo:\n Orbit.frozen(Earth, -1 * u.m)\n assert excinfo.type == ValueError\n assert (\n str(excinfo.value)\n == \"The semimajor axis may not be smaller that Earth's radius\"\n )\n\n\ndef test_orbit_representation():\n ss = Orbit.circular(\n Earth, 600 * u.km, 20 * u.deg, epoch=Time(\"2018-09-08 09:04:00\", scale=\"tdb\")\n )\n expected_str = \"6978 x 6978 km x 20.0 deg (GCRS) orbit around Earth (\\u2641) at epoch 2018-09-08 09:04:00.000 (TDB)\"\n\n assert str(ss) == repr(ss) == expected_str\n\n\ndef test_orbit_no_frame_representation():\n date_launch = Time(\"2011-11-26 15:02\", scale=\"utc\")\n r = [61_445.76498656, 24_827.93010168, 0.0] * u.km\n v = [-0.42581645, -0.18867869, 0.0] * u.km / u.s\n ss = Orbit.from_vectors(Moon, r, v, date_launch)\n expected_str = \"106 x -142299 km x 180.0 deg orbit around Moon (\\u263E) at epoch 2011-11-26 15:02:00.000 (UTC)\"\n\n assert str(ss) == repr(ss) == expected_str\n\n\ndef test_sample_numpoints():\n _d = 1.0 * u.AU # Unused distance\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n _body = Sun # Unused body\n ss = Orbit.from_classical(_body, _d, _, _a, _a, _a, _a)\n positions = ss.sample(values=50)\n assert len(positions) == 50\n\n\[email protected](\"num_points\", [3, 5, 7, 9, 11, 101])\ndef test_sample_num_points(num_points):\n # Data from Vallado, example 2.4\n r0 = [1_131.340, -2_282.343, 6_672.423] * u.km\n v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s\n ss0 = Orbit.from_vectors(Earth, r0, v0)\n\n # TODO: Test against the perigee and apogee\n # expected_ss = ss0.propagate(ss0.period / 2)\n\n rr = ss0.sample(num_points)\n\n assert len(rr) == num_points\n # assert_quantity_allclose(rr[num_points // 2].data.xyz, expected_ss.r)\n\n\ndef test_sample_big_orbits():\n # See https://github.com/poliastro/poliastro/issues/265\n ss = Orbit.from_vectors(\n Sun,\n [-9_018_878.6, -94_116_055, 22_619_059] * u.km,\n [-49.950923, -12.948431, -4.2925158] * u.km / u.s,\n )\n positions = ss.sample(15)\n assert len(positions) == 15\n\n\ndef test_hyperbolic_nu_value_check(hyperbolic):\n positions = hyperbolic.sample(100)\n\n assert isinstance(positions, CartesianRepresentation)\n assert len(positions) == 100\n\n\ndef test_hyperbolic_modulus_wrapped_nu():\n ss = Orbit.from_vectors(\n Sun,\n [-9.77441841e07, 1.01000539e08, 4.37584668e07] * u.km,\n [23.75936985, -43.09599568, -8.7084724] * u.km / u.s,\n )\n num_values = 3\n\n positions = ss.sample(num_values)\n\n assert_quantity_allclose(positions[0].xyz, ss.r)\n\n\[email protected](\"min_anomaly\", [-30 * u.deg, -10 * u.deg])\[email protected](\"max_anomaly\", [10 * u.deg, 30 * u.deg])\ndef test_sample_hyperbolic_limits(hyperbolic, min_anomaly, max_anomaly):\n num_points = 50\n\n coords = hyperbolic.sample(\n num_points, min_anomaly=min_anomaly, max_anomaly=max_anomaly\n )\n\n assert len(coords) == num_points\n\n\ndef test_sample_hyperbolic_outside_limits(hyperbolic):\n with pytest.warns(OrbitSamplingWarning, match=\"anomaly outside range, clipping\"):\n hyperbolic.sample(3, min_anomaly=-np.pi * u.rad)\n\n with pytest.warns(OrbitSamplingWarning, match=\"anomaly outside range, clipping\"):\n hyperbolic.sample(3, max_anomaly=np.pi * u.rad)\n\n\ndef test_orbit_is_pickable(hyperbolic):\n pickled = pickle.dumps(hyperbolic)\n ss_result = pickle.loads(pickled)\n\n assert_array_equal(hyperbolic.r, ss_result.r)\n assert_array_equal(hyperbolic.v, ss_result.v)\n assert ss_result.epoch == hyperbolic.epoch\n\n\ndef test_orbit_plot_is_static():\n # Data from Curtis, example 4.3\n r = [-6_045, -3_490, 2_500] * u.km\n v = [-3.457, 6.618, 2.533] * u.km / u.s\n ss = Orbit.from_vectors(Earth, r, v)\n\n plot = ss.plot()\n\n assert isinstance(plot[0], matplotlib.lines.Line2D)\n assert isinstance(plot[1], matplotlib.lines.Line2D)\n\n\ndef test_orbit_plot_static_3d():\n # Data from Curtis, example 4.3\n r = [-6_045, -3_490, 2_500] * u.km\n v = [-3.457, 6.618, 2.533] * u.km / u.s\n ss = Orbit.from_vectors(Earth, r, v)\n with pytest.raises(\n ValueError,\n match=\"The static plotter does not support 3D, use `interactive=True`\",\n ):\n ss.plot(use_3d=True)\n\n\[email protected](\"use_3d\", [False, True])\ndef test_orbit_plot_is_not_static(use_3d):\n from plotly.graph_objects import Figure\n\n # Data from Curtis, example 4.3\n r = [-6_045, -3_490, 2_500] * u.km\n v = [-3.457, 6.618, 2.533] * u.km / u.s\n ss = Orbit.from_vectors(Earth, r, v)\n\n plot = ss.plot(interactive=True, use_3d=use_3d)\n\n assert isinstance(plot, Figure)\n\n\[email protected](\n \"attractor, expected_frame_class\",\n [\n (Sun, HCRS),\n (Mercury, MercuryICRS),\n (Venus, VenusICRS),\n (Earth, GCRS),\n (Mars, MarsICRS),\n (Jupiter, JupiterICRS),\n (Saturn, SaturnICRS),\n (Uranus, UranusICRS),\n (Neptune, NeptuneICRS),\n ],\n)\ndef test_orbit_get_frame_returns_proper_frame(attractor, expected_frame_class):\n # Dummy data\n r = [1e09, -4e09, -1e09] * u.km\n v = [5e00, -1e01, -4e00] * u.km / u.s\n epoch = Time(\"2015-07-14 07:59\", scale=\"tdb\")\n\n ss = Orbit.from_vectors(attractor, r, v, epoch)\n frame = ss.get_frame()\n\n assert frame.is_equivalent_frame(expected_frame_class(obstime=epoch))\n assert frame.obstime == epoch\n\n\ndef test_orbit_from_custom_body_raises_error_when_asked_frame():\n attractor = Body(Sun, 1 * u.km ** 3 / u.s ** 2, \"_DummyPlanet\")\n\n r = [1e09, -4e09, -1e09] * u.km\n v = [5e00, -1e01, -4e00] * u.km / u.s\n\n ss = Orbit.from_vectors(attractor, r, v)\n\n with pytest.raises(NotImplementedError) as excinfo:\n ss.get_frame()\n assert (\n \"Frames for orbits around custom bodies are not yet supported\"\n in excinfo.exconly()\n )\n\n\[email protected](\n \"body\", [Sun, Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune]\n)\ndef test_orbit_from_ephem_is_in_icrs_frame(body):\n ss = Orbit.from_body_ephem(body)\n\n assert ss.get_frame().is_equivalent_frame(ICRS())\n\n\ndef test_orbit_accepts_ecliptic_plane():\n r = [1e09, -4e09, -1e09] * u.km\n v = [5e00, -1e01, -4e00] * u.km / u.s\n\n ss = Orbit.from_vectors(Sun, r, v, plane=Planes.EARTH_ECLIPTIC)\n\n assert ss.get_frame().is_equivalent_frame(HeliocentricEclipticJ2000(obstime=J2000))\n\n\ndef test_orbit_represent_as_produces_correct_data():\n r = [1e09, -4e09, -1e09] * u.km\n v = [5e00, -1e01, -4e00] * u.km / u.s\n\n ss = Orbit.from_vectors(Sun, r, v)\n\n expected_result = CartesianRepresentation(\n *r, differentials=CartesianDifferential(*v)\n )\n\n result = ss.represent_as(CartesianRepresentation, CartesianDifferential)\n\n # We can't directly compare the objects, see\n # https://github.com/astropy/astropy/issues/7793\n assert (result.xyz == expected_result.xyz).all()\n assert (\n result.differentials[\"s\"].d_xyz == expected_result.differentials[\"s\"].d_xyz\n ).all()\n\n\ndef test_orbit_propagate_retains_plane():\n r = [1e09, -4e09, -1e09] * u.km\n v = [5e00, -1e01, -4e00] * u.km / u.s\n\n ss = Orbit.from_vectors(Sun, r, v, plane=Planes.EARTH_ECLIPTIC)\n\n orig_frame = ss.get_frame()\n\n final_ss = ss.propagate(1 * u.h)\n expected_frame = orig_frame.replicate_without_data(obstime=final_ss.epoch)\n\n assert final_ss.get_frame().is_equivalent_frame(expected_frame)\n\n\[email protected]_data\ndef test_from_horizons_raise_valueerror():\n with pytest.raises(ValueError) as exep:\n Orbit.from_horizons(name=\"Dummy\", attractor=Sun)\n assert (\n \"ValueError: Unknown target (Dummy). Maybe try different id_type?\"\n in exep.exconly()\n )\n\n\[email protected]_data\ndef test_orbit_from_horizons_has_expected_elements():\n epoch = Time(\"2018-07-23\", scale=\"tdb\")\n # Orbit Parameters of Ceres\n # Taken from https://ssd.jpl.nasa.gov/horizons.cgi\n ss = Orbit.from_classical(\n Sun,\n 2.76710759221651 * u.au,\n 0.07554803091400027 * u.one,\n 27.18502494739172 * u.deg,\n 23.36913218336299 * u.deg,\n 132.2919809219236 * u.deg,\n 21.28957916690369 * u.deg,\n epoch,\n )\n ss1 = Orbit.from_horizons(name=\"Ceres\", attractor=Sun, epoch=epoch)\n assert ss.pqw()[0].value.all() == ss1.pqw()[0].value.all()\n assert ss.r_a == ss1.r_a\n assert ss.a == ss1.a\n\n\[email protected]_data\ndef test_plane_is_set_in_horizons():\n plane = Planes.EARTH_ECLIPTIC\n ss = Orbit.from_horizons(name=\"Ceres\", attractor=Sun, plane=plane)\n assert ss.plane == plane\n\n\[email protected](\n \"attractor,angular_velocity,expected_a,expected_period\",\n [\n (\n Earth,\n (2 * np.pi / 23.9345) * u.rad / u.hour,\n 42_164_205 * u.m,\n 23.9345 * u.hour,\n ),\n (\n Mars,\n (2 * np.pi / 24.6228) * u.rad / u.hour,\n 20_427_595 * u.m,\n 24.6228 * u.hour,\n ),\n ],\n)\ndef test_geostationary_creation_from_angular_velocity(\n attractor, angular_velocity, expected_a, expected_period\n):\n ss = Orbit.geostationary(attractor=attractor, angular_velocity=angular_velocity)\n assert_quantity_allclose(ss.a, expected_a, rtol=1.0e-7)\n assert_quantity_allclose(ss.period, expected_period, rtol=1.0e-7)\n\n\[email protected](\n \"attractor,period,expected_a\",\n [\n (Earth, 23.9345 * u.hour, 42_164_205 * u.m),\n (Mars, 24.6228 * u.hour, 20_427_595 * u.m),\n ],\n)\ndef test_geostationary_creation_from_period(attractor, period, expected_a):\n ss = Orbit.geostationary(attractor=attractor, period=period)\n assert_quantity_allclose(ss.a, expected_a, rtol=1.0e-7)\n assert_quantity_allclose(ss.period, period, rtol=1.0e-7)\n\n\[email protected](\n \"attractor,period,hill_radius,expected_a\",\n [\n (Earth, 23.9345 * u.hour, 0.01 * u.AU, 42_164_205 * u.m),\n (Mars, 24.6228 * u.hour, 1_000_000 * u.km, 20_427_595 * u.m),\n ],\n)\ndef test_geostationary_creation_with_Hill_radius(\n attractor, period, hill_radius, expected_a\n):\n ss = Orbit.geostationary(\n attractor=attractor, period=period, hill_radius=hill_radius\n )\n assert_quantity_allclose(ss.a, expected_a, rtol=1.0e-7)\n assert_quantity_allclose(ss.period, period, rtol=1.0e-7)\n\n\[email protected](\"attractor\", [Earth, Mars])\ndef test_geostationary_input(attractor):\n with pytest.raises(ValueError) as excinfo:\n Orbit.geostationary(attractor=attractor)\n\n assert (\n \"ValueError: At least one among angular_velocity or period must be passed\"\n in excinfo.exconly()\n )\n\n\[email protected](\n \"attractor,period,hill_radius\", [(Venus, 243.025 * u.day, 1_000_000 * u.km)]\n)\ndef test_geostationary_non_existence_condition(attractor, period, hill_radius):\n with pytest.raises(ValueError) as excinfo:\n Orbit.geostationary(attractor=attractor, period=period, hill_radius=hill_radius)\n\n assert (\n \"Geostationary orbit for the given parameters doesn't exist\"\n in excinfo.exconly()\n )\n\n\ndef test_heliosynchronous_orbit_enough_arguments():\n with pytest.raises(ValueError) as excinfo:\n Orbit.heliosynchronous(Earth, a=None, ecc=None, inc=None)\n\n assert (\n \"At least two parameters of the set {a, ecc, inc} are required.\"\n in excinfo.exconly()\n )\n\n\ndef test_heliosynchronous_orbit_without_earth():\n with pytest.raises(NotImplementedError) as excinfo:\n Orbit.heliosynchronous(Mars, a=800 * u.km + Mars.R, ecc=0 * u.one)\n assert \"Attractors other than Earth not supported yet\" in excinfo.exconly()\n\n\ndef test_heliosynchronous_orbit_inc():\n # Vallado, example 11-2a\n expected_ecc = 0 * u.one\n expected_a = 800 * u.km + Earth.R\n expected_inc = 98.6 * u.deg\n ss0 = Orbit.heliosynchronous(Earth, a=expected_a, ecc=expected_ecc)\n\n assert_quantity_allclose(ss0.inc, expected_inc, rtol=1e-4)\n assert_quantity_allclose(ss0.a, expected_a)\n assert_quantity_allclose(ss0.ecc, expected_ecc)\n\n\ndef test_heliosynchronous_orbit_a():\n # Vallado, example 11-2b\n expected_ecc = 0.2 * u.one\n expected_inc = 98.6 * u.deg\n expected_a = 7346.846 * u.km\n ss0 = Orbit.heliosynchronous(Earth, ecc=expected_ecc, inc=expected_inc)\n\n assert_quantity_allclose(ss0.inc, expected_inc, rtol=1e-4)\n assert_quantity_allclose(ss0.a, expected_a, rtol=1e-5)\n assert_quantity_allclose(ss0.ecc, expected_ecc)\n\n\ndef test_perigee_and_apogee():\n expected_r_a = 500 * u.km\n expected_r_p = 300 * u.km\n a = (expected_r_a + expected_r_p) / 2\n ecc = expected_r_a / a - 1\n _a = 1.0 * u.deg # Unused angle\n ss = Orbit.from_classical(Earth, a, ecc, _a, _a, _a, _a)\n assert_allclose(ss.r_a.to(u.km).value, expected_r_a.to(u.km).value)\n assert_allclose(ss.r_p.to(u.km).value, expected_r_p.to(u.km).value)\n\n\ndef test_expected_mean_anomaly():\n # Example from Curtis\n expected_mean_anomaly = 77.93 * u.deg\n\n attractor = Earth\n\n _a = 1.0 * u.deg # Unused angle\n a = 15_300 * u.km\n ecc = 0.37255 * u.one\n nu = 120 * u.deg\n\n orbit = Orbit.from_classical(attractor, a, ecc, _a, _a, _a, nu)\n orbit_M = orbit.M\n\n assert_quantity_allclose(orbit_M.value, expected_mean_anomaly.value, rtol=1e-2)\n\n\ndef test_expected_angular_momentum():\n # Example from Curtis\n expected_ang_mag = 72472 * u.km ** 2\n\n attractor = Earth\n\n _a = 1.0 * u.deg # Unused angle\n a = 15_300 * u.km\n ecc = 0.37255 * u.one\n nu = 120 * u.deg\n\n orbit = Orbit.from_classical(attractor, a, ecc, _a, _a, _a, nu)\n orbit_h_mag = orbit.h_mag\n\n assert_quantity_allclose(orbit_h_mag.value, expected_ang_mag.value, rtol=1e-2)\n\n\ndef test_expected_last_perifocal_passage():\n # Example from Curtis\n expected_t_p = 4077 * u.s\n\n attractor = Earth\n\n _a = 1.0 * u.deg # Unused angle\n a = 15_300 * u.km\n ecc = 0.37255 * u.one\n nu = 120 * u.deg\n\n orbit = Orbit.from_classical(attractor, a, ecc, _a, _a, _a, nu)\n orbit_t_p = orbit.t_p\n\n assert_quantity_allclose(orbit_t_p.value, expected_t_p.value, rtol=1e-2)\n\n\ndef test_convert_from_rv_to_coe():\n # Data from Vallado, example 2.6\n attractor = Earth\n p = 11_067.790 * u.km\n ecc = 0.83285 * u.one\n inc = 87.87 * u.deg\n raan = 227.89 * u.deg\n argp = 53.38 * u.deg\n nu = 92.335 * u.deg\n expected_r = [6_525.344, 6_861.535, 6_449.125] * u.km\n expected_v = [4.902276, 5.533124, -1.975709] * u.km / u.s\n\n r, v = Orbit.from_classical(\n attractor, p / (1 - ecc ** 2), ecc, inc, raan, argp, nu\n ).rv()\n\n assert_quantity_allclose(r, expected_r, rtol=1e-5)\n assert_quantity_allclose(v, expected_v, rtol=1e-5)\n\n\ndef test_convert_from_coe_to_rv():\n # Data from Vallado, example 2.5\n attractor = Earth\n r = [6_524.384, 6_862.875, 6_448.296] * u.km\n v = [4.901327, 5.533756, -1.976341] * u.km / u.s\n\n expected_p = 11_067.79 * u.km\n expected_ecc = 0.832853 * u.one\n expected_inc = 87.870 * u.deg\n expected_raan = 227.89 * u.deg\n expected_argp = 53.38 * u.deg\n expected_nu = 92.335 * u.deg\n\n ss = Orbit.from_vectors(attractor, r, v)\n\n _, ecc, inc, raan, argp, nu = ss.classical()\n p = ss.p\n\n assert_quantity_allclose(p, expected_p, rtol=1e-4)\n assert_quantity_allclose(ecc, expected_ecc, rtol=1e-4)\n assert_quantity_allclose(inc, expected_inc, rtol=1e-4)\n assert_quantity_allclose(raan, expected_raan, rtol=1e-4)\n assert_quantity_allclose(argp, expected_argp, rtol=1e-4)\n assert_quantity_allclose(nu, expected_nu, rtol=1e-4)\n\n\ndef test_perifocal_points_to_perigee():\n _d = 1.0 * u.AU # Unused distance\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n ss = Orbit.from_classical(Sun, _d, _, _a, _a, _a, _a)\n p, _, _ = ss.pqw()\n assert_allclose(p, ss.e_vec / ss.ecc)\n\n\ndef test_arglat_within_range():\n r = [3_539.08827417, 5_310.19903462, 3_066.31301457] * u.km\n v = [-6.49780849, 3.24910291, 1.87521413] * u.km / u.s\n ss = Orbit.from_vectors(Earth, r, v)\n assert 0 * u.deg <= ss.arglat <= 360 * u.deg\n\n\ndef test_pqw_returns_dimensionless():\n r_0 = ([1, 0, 0] * u.au).to(u.km) # type: ignore\n v_0 = ([0, 6, 0] * u.au / u.year).to(u.km / u.day)\n ss = Orbit.from_vectors(Sun, r_0, v_0)\n\n p, q, w = ss.pqw()\n\n assert p.unit == u.one\n assert q.unit == u.one\n assert w.unit == u.one\n\n\ndef test_from_coord_fails_if_no_time_differential():\n pos = [30_000, 0, 0] * u.km\n cartrep = CartesianRepresentation(*pos)\n\n # Method fails if coordinate instance doesn't contain a differential with\n # respect to time\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_coords(Earth, SkyCoord(cartrep))\n assert (\n \"ValueError: Coordinate instance doesn't have a differential with respect to time\"\n in excinfo.exconly()\n )\n\n\[email protected](\n \"attractor\", [Earth, Jupiter, Mars, Mercury, Neptune, Saturn, Sun, Uranus, Venus]\n)\ndef test_orbit_creation_using_skycoord(attractor):\n vel = [0, 2, 0] * u.km / u.s\n cartdiff = CartesianDifferential(*vel)\n\n pos = [30_000, 0, 0] * u.km\n cartrep = CartesianRepresentation(*pos, differentials=cartdiff)\n\n coord = SkyCoord(cartrep, frame=\"icrs\")\n o = Orbit.from_coords(attractor, coord)\n\n inertial_frame_at_body_centre = get_frame(\n attractor, Planes.EARTH_EQUATOR, obstime=coord.obstime\n )\n\n coord_transformed_to_irf = coord.transform_to(inertial_frame_at_body_centre)\n pos_transformed_to_irf = coord_transformed_to_irf.cartesian.xyz\n vel_transformed_to_irf = coord_transformed_to_irf.cartesian.differentials[\"s\"].d_xyz\n\n assert (o.r == pos_transformed_to_irf).all()\n assert (o.v == vel_transformed_to_irf).all()\n\n\[email protected]_data\[email protected](\n \"attractor\", [Earth, Jupiter, Mars, Mercury, Neptune, Saturn, Sun, Uranus, Venus]\n)\[email protected](\"frame\", [ITRS, GCRS])\[email protected](\"obstime\", [J2000, J2000_TDB])\ndef test_orbit_creation_using_frame_obj(attractor, frame, obstime):\n vel = [0, 2, 0] * u.km / u.s\n cartdiff = CartesianDifferential(*vel)\n\n pos = [30_000, 0, 0] * u.km\n cartrep = CartesianRepresentation(*pos, differentials=cartdiff)\n\n coord = frame(cartrep, obstime=obstime)\n o = Orbit.from_coords(attractor, coord)\n\n inertial_frame_at_body_centre = get_frame(\n attractor, Planes.EARTH_EQUATOR, obstime=coord.obstime\n )\n\n coord_transformed_to_irf = coord.transform_to(inertial_frame_at_body_centre)\n\n pos_transformed_to_irf = coord_transformed_to_irf.cartesian.xyz\n vel_transformed_to_irf = coord_transformed_to_irf.cartesian.differentials[\"s\"].d_xyz\n\n assert_quantity_allclose(o.r, pos_transformed_to_irf, atol=1e-5 * u.km)\n assert_quantity_allclose(o.v, vel_transformed_to_irf, atol=1e-5 * u.km / u.s)\n\n\[email protected](\"obstime\", [J2000, J2000_TDB])\ndef test_from_coord_fails_for_multiple_positions(obstime):\n cartdiff = CartesianDifferential(\n [[0, 1, 0], [-0.1, 0.9, 0]] * u.km / u.s, xyz_axis=1\n )\n cartrep = CartesianRepresentation(\n [[1, 0, 0], [0.9, 0.1, 0]] * u.km, differentials=cartdiff, xyz_axis=1\n )\n coords = GCRS(cartrep, representation_type=CartesianRepresentation, obstime=obstime)\n\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_coords(Earth, coords)\n assert (\n \"ValueError: Coordinate instance must represents exactly 1 position, found: 2\"\n in excinfo.exconly()\n )\n\n\ndef test_from_coord_if_coord_is_not_of_shape_zero():\n pos = [0, 1, 0]\n vel = [1, 0, 0]\n cartdiff = CartesianDifferential([vel] * u.km / u.s, xyz_axis=1)\n cartrep = CartesianRepresentation([pos] * u.km, differentials=cartdiff, xyz_axis=1)\n coords = GCRS(cartrep, representation_type=CartesianRepresentation, obstime=J2000)\n\n ss = Orbit.from_coords(Earth, coords)\n\n assert_quantity_allclose(ss.r, pos * u.km, rtol=1e-5)\n assert_quantity_allclose(ss.v, vel * u.km / u.s, rtol=1e-5)\n\n\[email protected]_data\[email protected](\n \"target_name\", [\"Ceres\", \"Vesta\", \"Eros\"]\n) # Objects in both JPL SBDB and JPL Horizons\ndef test_from_sbdb_and_from_horizons_give_similar_results(target_name):\n ss_target = Orbit.from_sbdb(target_name)\n ss_classical = ss_target.classical()\n\n ss_ref = Orbit.from_horizons(\n name=target_name, attractor=Sun, plane=Planes.EARTH_ECLIPTIC\n )\n\n ss_ref = ss_ref.propagate_to_anomaly(\n ss_classical[5]\n ) # Catch reference orbit to same epoch\n ss_ref_class = ss_ref.classical()\n\n for test_elm, ref_elm in zip(ss_classical, ss_ref_class):\n assert_quantity_allclose(\n test_elm, ref_elm, rtol=1e-2\n ) # Maximum error of 1% (chosen arbitrarily)\n\n\[email protected]_data\ndef test_from_sbdb_raise_valueerror():\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_sbdb(name=\"Halley\")\n\n assert (\n str(excinfo.value)\n == \"2 different objects found: \\n2688 Halley (1982 HG1)\\n1P/Halley\\n\"\n )\n\n\ndef test_from_vectors_wrong_dimensions_fails():\n bad_r = [[1000, 0, 0]] * u.km\n bad_v = [[[0, 10, 0]]] * u.km / u.s\n\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_vectors(Earth, bad_r, bad_v)\n assert \"ValueError: Vectors must have dimension 1, got 2 and 3\" in excinfo.exconly()\n\n\ndef test_from_classical_wrong_dimensions_fails():\n bad_a = [1.0] * u.AU\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_classical(Earth, bad_a, _, _a, _a, _a, _a)\n assert \"ValueError: Elements must be scalar, got [1.] AU\" in excinfo.exconly()\n\n\[email protected]_data\ndef test_orbit_change_attractor():\n Io = 501 # Id for Io moon\n ss_io = Orbit.from_horizons(Io, Sun, epoch=J2000, id_type=\"majorbody\")\n ss_io = ss_io.change_attractor(Jupiter)\n assert Jupiter == ss_io.attractor\n\n\ndef test_orbit_change_attractor_returns_self():\n assert iss.change_attractor(iss.attractor) is iss\n\n\ndef test_orbit_change_attractor_out_of_SOI():\n ss = Orbit.from_vectors(\n Sun,\n r=[5.98967334e08, 4.09500684e08, 1.60955500e08] * u.km,\n v=[-13.30694373, 25.15256978, 11.59846936] * u.km / u.s,\n epoch=J2000,\n )\n\n with pytest.raises(ValueError) as excinfo:\n ss.change_attractor(Earth)\n assert \"ValueError: Orbit is out of new attractor's SOI\" in excinfo.exconly()\n\n\ndef test_orbit_change_attractor_force():\n ss = Orbit.from_vectors(\n Sun,\n r=[5.98967334e08, 4.09500684e08, 1.60955500e08] * u.km,\n v=[-13.30694373, 25.15256978, 11.59846936] * u.km / u.s,\n epoch=J2000,\n )\n\n ss_new_attractor = ss.change_attractor(Earth, force=True)\n assert ss_new_attractor.attractor == Earth\n\n\ndef test_orbit_change_attractor_unrelated_body():\n with pytest.raises(ValueError) as excinfo:\n iss.change_attractor(Mars)\n assert \"ValueError: Cannot change to unrelated attractor\" in excinfo.exconly()\n\n\ndef test_orbit_change_attractor_closed():\n with pytest.raises(ValueError) as excinfo:\n iss.change_attractor(Sun)\n assert (\n \"ValueError: Orbit will never leave the SOI of its current attractor\"\n in excinfo.exconly()\n )\n\n\ndef test_orbit_change_attractor_open():\n r = [-6_045, -3_490, 2_500] * u.km\n v = [-15.457, 6.618, 2.533] * u.km / u.s\n ss = Orbit.from_vectors(Earth, r, v)\n\n with pytest.warns(PatchedConicsWarning) as record:\n ss.change_attractor(Sun)\n\n w = record.pop(PatchedConicsWarning)\n assert \"Leaving the SOI of the current attractor\" in w.message.args[0]\n\n\[email protected](\n \"expected_plane\", [Planes.EARTH_ECLIPTIC, Planes.EARTH_EQUATOR]\n)\ndef test_change_plane_sets_correct_plane(expected_plane):\n new_ss = iss.change_plane(expected_plane)\n\n assert new_ss.plane is expected_plane\n\n\ndef test_change_plane_same_returns_self():\n same_iss = iss.change_plane(iss.plane)\n\n assert same_iss is iss\n\n\ndef test_change_plane_twice_restores_original_data():\n new_ss = iss.change_plane(Planes.EARTH_ECLIPTIC).change_plane(iss.plane)\n\n assert_quantity_allclose(new_ss.r, iss.r)\n assert_quantity_allclose(new_ss.v, iss.v)\n\n\ndef test_time_to_anomaly():\n expected_tof = iss.period / 2\n iss_180 = iss.propagate_to_anomaly(180 * u.deg)\n tof = iss_180.time_to_anomaly(0 * u.deg)\n\n assert_quantity_allclose(tof, expected_tof)\n\n\[email protected]\ndef test_issue_798():\n epoch = Time(\"2019-11-10 12:00:00\")\n iss = Orbit.from_horizons(\n \"International Space Station\", Sun, epoch=epoch, id_type=\"majorbody\"\n )\n iss = iss.change_attractor(Earth)\n assert iss.attractor == Earth\n"
]
| [
[
"numpy.testing.assert_allclose",
"numpy.testing.assert_array_equal"
]
]
|
yongsukki/keraspp | [
"03fd37cad14dba3f270b226621415938ae77f4ec"
]
| [
"tf2/ex2_1_ann_mnist_cl.py"
]
| [
"##############################################\n# Modeling\n##############################################\nfrom tensorflow.keras import layers, models\n\n\ndef ANN_models_func(Nin, Nh, Nout):\n x = layers.Input(shape=(Nin,))\n h = layers.Activation('relu')(layers.Dense(Nh)(x))\n y = layers.Activation('softmax')(layers.Dense(Nout)(h))\n model = models.Model(x, y)\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\ndef ANN_seq_func(Nin, Nh, Nout):\n model = models.Sequential()\n model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))\n model.add(layers.Dense(Nout, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n return model\n\n\nclass ANN_models_class(models.Model):\n def __init__(self, Nin, Nh, Nout):\n # Prepare network layers and activate functions\n hidden = layers.Dense(Nh)\n output = layers.Dense(Nout)\n relu = layers.Activation('relu')\n softmax = layers.Activation('softmax')\n\n # Connect network elements\n x = layers.Input(shape=(Nin,))\n h = relu(hidden(x))\n y = softmax(output(h))\n\n super().__init__(x, y)\n self.compile(loss='categorical_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n\n\nclass ANN_seq_class(models.Sequential):\n def __init__(self, Nin, Nh, Nout):\n super().__init__()\n self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))\n self.add(layers.Dense(Nout, activation='softmax'))\n self.compile(loss='categorical_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n\n\n##############################################\n# Data\n##############################################\nimport numpy as np\nfrom tensorflow.keras import datasets # mnist\nfrom tensorflow.keras import utils # to_categorical\n\n\ndef Data_func():\n (X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()\n\n Y_train = utils.to_categorical(y_train)\n Y_test = utils.to_categorical(y_test)\n\n L, W, H = X_train.shape\n X_train = X_train.reshape(-1, W * H)\n X_test = X_test.reshape(-1, W * H)\n\n X_train = X_train / 255.0\n X_test = X_test / 255.0\n\n return (X_train, Y_train), (X_test, Y_test)\n\n\n##############################################\n# Plotting\n##############################################\nimport matplotlib.pyplot as plt\n\n\ndef plot_acc(history, title=None):\n # summarize history for accuracy\n if not isinstance(history, dict):\n history = history.history\n\n plt.plot(history['acc'])\n plt.plot(history['val_acc'])\n if title is not None:\n plt.title(title)\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Training', 'Verification'], loc=0)\n # plt.show()\n\n\ndef plot_loss(history, title=None):\n # summarize history for loss\n if not isinstance(history, dict):\n history = history.history\n\n plt.plot(history['loss'])\n plt.plot(history['val_loss'])\n if title is not None:\n plt.title(title)\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Training', 'Verification'], loc=0)\n # plt.show()\n\n\n##############################################\n# Main\n##############################################\ndef main():\n Nin = 784\n Nh = 100\n number_of_class = 10\n Nout = number_of_class\n\n # model = ANN_models_func(Nin, Nh, Nout)\n # model = ANN_models_class(Nin, Nh, Nout)\n model = ANN_seq_class(Nin, Nh, Nout)\n (X_train, Y_train), (X_test, Y_test) = Data_func()\n\n ##############################################\n # Training\n ##############################################\n history = model.fit(X_train, Y_train, epochs=15, batch_size=100, validation_split=0.2)\n performace_test = model.evaluate(X_test, Y_test, batch_size=100, verbose=0)\n print('Test Loss and Accuracy ->', performace_test)\n\n plot_loss(history)\n plt.show()\n plot_acc(history)\n plt.show()\n\n\n# Run code\nif __name__ == '__main__':\n main()\n"
]
| [
[
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.layers.Input",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.layers.Activation",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.Sequential",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
]
|
Neronjust2017/TransferBed | [
"eaa703a4bc10eaf6216fe1394cd272f6e75489e2"
]
| [
"dalib/adaptation/dann.py"
]
| [
"from typing import Optional\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..modules.grl import WarmStartGradientReverseLayer\nfrom common.modules.classifier import Classifier as ClassifierBase\nfrom common.utils.metric import binary_accuracy\n\n__all__ = ['DomainAdversarialLoss']\n\n\nclass DomainAdversarialLoss(nn.Module):\n \"\"\"\n The Domain Adversarial Loss proposed in\n `Domain-Adversarial Training of Neural Networks (ICML 2015) <https://arxiv.org/abs/1505.07818>`_\n\n Domain adversarial loss measures the domain discrepancy through training a domain discriminator.\n Given domain discriminator :math:`D`, feature representation :math:`f`, the definition of DANN loss is\n\n .. math::\n loss(\\mathcal{D}_s, \\mathcal{D}_t) = \\mathbb{E}_{x_i^s \\sim \\mathcal{D}_s} log[D(f_i^s)]\n + \\mathbb{E}_{x_j^t \\sim \\mathcal{D}_t} log[1-D(f_j^t)].\n\n Args:\n domain_discriminator (torch.nn.Module): A domain discriminator object, which predicts the domains of features. Its input shape is (N, F) and output shape is (N, 1)\n reduction (str, optional): Specifies the reduction to apply to the output:\n ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,\n ``'mean'``: the sum of the output will be divided by the number of\n elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``\n grl (WarmStartGradientReverseLayer, optional): Default: None.\n\n Inputs:\n - f_s (tensor): feature representations on source domain, :math:`f^s`\n - f_t (tensor): feature representations on target domain, :math:`f^t`\n - w_s (tensor, optional): a rescaling weight given to each instance from source domain.\n - w_t (tensor, optional): a rescaling weight given to each instance from target domain.\n\n Shape:\n - f_s, f_t: :math:`(N, F)` where F means the dimension of input features.\n - Outputs: scalar by default. If :attr:`reduction` is ``'none'``, then :math:`(N, )`.\n\n Examples::\n\n >>> from dalib.modules.domain_discriminator import DomainDiscriminator\n >>> discriminator = DomainDiscriminator(in_feature=1024, hidden_size=1024)\n >>> loss = DomainAdversarialLoss(discriminator, reduction='mean')\n >>> # features from source domain and target domain\n >>> f_s, f_t = torch.randn(20, 1024), torch.randn(20, 1024)\n >>> # If you want to assign different weights to each instance, you should pass in w_s and w_t\n >>> w_s, w_t = torch.randn(20), torch.randn(20)\n >>> output = loss(f_s, f_t, w_s, w_t)\n \"\"\"\n\n def __init__(self, domain_discriminator: nn.Module, reduction: Optional[str] = 'mean',\n grl: Optional = None):\n super(DomainAdversarialLoss, self).__init__()\n self.grl = WarmStartGradientReverseLayer(alpha=1., lo=0., hi=1., max_iters=1000, auto_step=True) if grl is None else grl\n self.domain_discriminator = domain_discriminator\n self.bce = lambda input, target, weight: \\\n F.binary_cross_entropy(input, target, weight=weight, reduction=reduction)\n self.domain_discriminator_accuracy = None\n\n def forward(self, f_s: torch.Tensor, f_t: torch.Tensor,\n w_s: Optional[torch.Tensor] = None, w_t: Optional[torch.Tensor] = None) -> torch.Tensor:\n f = self.grl(torch.cat((f_s, f_t), dim=0))\n d = self.domain_discriminator(f)\n d_s, d_t = d.chunk(2, dim=0)\n d_label_s = torch.ones((f_s.size(0), 1)).to(f_s.device)\n d_label_t = torch.zeros((f_t.size(0), 1)).to(f_t.device)\n self.domain_discriminator_accuracy = 0.5 * (binary_accuracy(d_s, d_label_s) + binary_accuracy(d_t, d_label_t))\n\n if w_s is None:\n w_s = torch.ones_like(d_label_s)\n if w_t is None:\n w_t = torch.ones_like(d_label_t)\n return 0.5 * (self.bce(d_s, d_label_s, w_s.view_as(d_s)) + self.bce(d_t, d_label_t, w_t.view_as(d_t)))\n\n# class MultiSourceDomainAdversarialLoss(nn.Module):\n# \"\"\"\n# The Domain Adversarial Loss proposed in\n# `Domain-Adversarial Training of Neural Networks (ICML 2015) <https://arxiv.org/abs/1505.07818>`_\n#\n# Domain adversarial loss measures the domain discrepancy through training a domain discriminator.\n# Given domain discriminator :math:`D`, feature representation :math:`f`, the definition of DANN loss is\n#\n# .. math::\n# loss(\\mathcal{D}_s, \\mathcal{D}_t) = \\mathbb{E}_{x_i^s \\sim \\mathcal{D}_s} log[D(f_i^s)]\n# + \\mathbb{E}_{x_j^t \\sim \\mathcal{D}_t} log[1-D(f_j^t)].\n#\n# Args:\n# domain_discriminator (torch.nn.Module): A domain discriminator object, which predicts the domains of features. Its input shape is (N, F) and output shape is (N, 1)\n# reduction (str, optional): Specifies the reduction to apply to the output:\n# ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,\n# ``'mean'``: the sum of the output will be divided by the number of\n# elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``\n# grl (WarmStartGradientReverseLayer, optional): Default: None.\n#\n# Inputs:\n# - f_s (tensor): feature representations on source domain, :math:`f^s`\n# - f_t (tensor): feature representations on target domain, :math:`f^t`\n# - w_s (tensor, optional): a rescaling weight given to each instance from source domain.\n# - w_t (tensor, optional): a rescaling weight given to each instance from target domain.\n#\n# Shape:\n# - f_s, f_t: :math:`(N, F)` where F means the dimension of input features.\n# - Outputs: scalar by default. If :attr:`reduction` is ``'none'``, then :math:`(N, )`.\n#\n# Examples::\n#\n# >>> from dalib.modules.domain_discriminator import DomainDiscriminator\n# >>> discriminator = DomainDiscriminator(in_feature=1024, hidden_size=1024)\n# >>> loss = DomainAdversarialLoss(discriminator, reduction='mean')\n# >>> # features from source domain and target domain\n# >>> f_s, f_t = torch.randn(20, 1024), torch.randn(20, 1024)\n# >>> # If you want to assign different weights to each instance, you should pass in w_s and w_t\n# >>> w_s, w_t = torch.randn(20), torch.randn(20)\n# >>> output = loss(f_s, f_t, w_s, w_t)\n# \"\"\"\n#\n# def __init__(self, domain_discriminator: nn.Module, num_domains: int, reduction: Optional[str] = 'mean',\n# grl: Optional = None):\n# super(MultiSourceDomainAdversarialLoss, self).__init__()\n# self.grl = WarmStartGradientReverseLayer(alpha=1., lo=0., hi=1., max_iters=1000, auto_step=True) if grl is None else grl\n# self.domain_discriminator = domain_discriminator\n# self.ce = lambda input, target, weight: \\\n# F.cross_entropy(input, target, weight=weight, reduction=reduction)\n# self.domain_discriminator_accuracy = None\n# self.num_domains = num_domains\n#\n# def forward(self, f_s: torch.Tensor, f_t: torch.Tensor,\n# w_s: Optional[torch.Tensor] = None, w_t: Optional[torch.Tensor] = None) -> torch.Tensor:\n# f = self.grl(torch.cat((f_s, f_t), dim=0))\n# d = self.domain_discriminator(f)\n# ds = d.chunk(self.num_domains, dim=0)\n#\n# for\n# d_label_s = torch.ones((f_s.size(0), 1)).to(f_s.device)\n# d_label_t = torch.zeros((f_t.size(0), 1)).to(f_t.device)\n#\n#\n# self.domain_discriminator_accuracy = 0.5 * (binary_accuracy(d_s, d_label_s) + binary_accuracy(d_t, d_label_t))\n#\n# if w_s is None:\n# w_s = torch.ones_like(d_label_s)\n# if w_t is None:\n# w_t = torch.ones_like(d_label_t)\n# return 0.5 * (self.ce(d_s, d_label_s, w_s.view_as(d_s)) + self.ce(d_t, d_label_t, w_t.view_as(d_t)))\n#\nclass ImageClassifier(ClassifierBase):\n def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_dim: Optional[int] = 256, **kwargs):\n bottleneck = nn.Sequential(\n nn.AdaptiveAvgPool2d(output_size=(1, 1)),\n nn.Flatten(),\n nn.Linear(backbone.out_features, bottleneck_dim),\n nn.BatchNorm1d(bottleneck_dim),\n nn.ReLU()\n )\n super(ImageClassifier, self).__init__(backbone, num_classes, bottleneck, bottleneck_dim, **kwargs)\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.ones_like",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.binary_cross_entropy",
"torch.nn.Flatten"
]
]
|
mitaki28/onnx-chainer | [
"845aa6c168d912ce044183c6dff6f21ce498d17c"
]
| [
"tests/functions_tests/test_activations.py"
]
| [
"import unittest\n\nimport numpy as np\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport onnx\nimport onnx_chainer\nfrom chainer import testing\nfrom onnx_chainer.testing import test_onnxruntime\n\n\[email protected](\n {'name': 'clipped_relu'},\n {'name': 'elu'},\n {'name': 'hard_sigmoid'},\n {'name': 'leaky_relu'},\n {'name': 'log_softmax'},\n {'name': 'relu'},\n {'name': 'sigmoid'},\n {'name': 'softmax'},\n {'name': 'softplus'},\n {'name': 'tanh'},\n)\nclass TestActivations(unittest.TestCase):\n\n def setUp(self):\n\n class Model(chainer.Chain):\n\n def __init__(self, ops):\n super(Model, self).__init__()\n self.ops = ops\n\n def __call__(self, x):\n return self.ops(x)\n\n ops = getattr(F, self.name)\n self.model = Model(ops)\n self.x = np.random.randn(1, 5).astype(np.float32)\n self.fn = self.name + '.onnx'\n\n def test_output(self):\n for opset_version in range(\n test_onnxruntime.MINIMUM_OPSET_VERSION,\n onnx.defs.onnx_opset_version() + 1):\n test_onnxruntime.check_output(\n self.model, self.x, self.fn, opset_version=opset_version)\n\n\nclass TestPReLU(unittest.TestCase):\n\n def setUp(self):\n\n class Model(chainer.Chain):\n\n def __init__(self):\n super(Model, self).__init__()\n with self.init_scope():\n self.prelu = L.PReLU()\n\n def __call__(self, x):\n return self.prelu(x)\n\n self.model = Model()\n self.x = np.zeros((1, 5), dtype=np.float32)\n self.fn = 'PReLU.onnx'\n\n def test_output(self):\n for opset_version in range(\n test_onnxruntime.MINIMUM_OPSET_VERSION,\n onnx.defs.onnx_opset_version() + 1):\n test_onnxruntime.check_output(\n self.model, self.x, self.fn, opset_version=opset_version)\n"
]
| [
[
"numpy.random.randn",
"numpy.zeros"
]
]
|
Ahiganbana/PCLSTM_implement | [
"54ce2d61312700a1ea58c435e4e5e827d847b148"
]
| [
"train_model.py"
]
| [
"import os\nimport sys\nimport torch\nimport torch.autograd as autograd\nimport torch.nn.functional as F\nimport torch.nn.utils as utils\nimport torch.optim.lr_scheduler as lr_scheduler\nimport shutil\nimport random\nseed_num = 223\ntorch.manual_seed(seed_num)\nrandom.seed(seed_num)\n\n\ndef train(train_iter, dev_iter, test_iter, model, args):\n if args.cuda:\n model.cuda()\n\n if args.Adam is True:\n print(\"Adam Training......\")\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.init_weight_decay)\n elif args.SGD is True:\n print(\"SGD Training.......\")\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.init_weight_decay,\n momentum=args.momentum_value)\n elif args.Adadelta is True:\n print(\"Adadelta Training.......\")\n optimizer = torch.optim.Adadelta(model.parameters(), lr=args.lr, weight_decay=args.init_weight_decay)\n\n steps = 0\n epoch_step = 0\n model_count = 0\n best_accuracy = Best_Result()\n model.train()\n for epoch in range(1, args.epochs+1):\n steps = 0\n corrects_ = 0\n accuracy_ = 0\n loss_ = 0\n print(\"\\n## The {} Epoch, All {} Epochs ! ##\".format(epoch, args.epochs))\n for batch in train_iter:\n feature, target = batch\n if args.cuda:\n feature, target = feature.cuda(), target.cuda()\n\n optimizer.zero_grad()\n # model.zero_grad()\n\n logit = model(feature)\n # print(logit.shape)\n \n # print(target.shape)\n loss = F.cross_entropy(logit, target.long())\n loss.backward()\n loss_ += loss\n if args.init_clip_max_norm is not None:\n utils.clip_grad_norm_(model.parameters(), max_norm=args.init_clip_max_norm)\n optimizer.step()\n\n # c = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()\n # print(c)\n\n steps += 1\n if steps % args.log_interval == 0:\n train_size = len(train_iter.dataset)\n corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()\n accuracy = float(corrects)/args.batch_size * 100.0\n sys.stdout.write(\n '\\rBatch[{}/{}] - loss: {:.6f} acc: {:.4f}%({}/{})'.format(steps,\n train_size,\n loss.item(),\n accuracy,\n corrects,\n args.batch_size))\n corrects_ += (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()\n size = len(train_iter.dataset)\n accuracy_ = float(corrects_) / size * 100.0\n loss_ = float(loss_) / size\n sys.stdout.write(\n '\\rEpoch[{}/{}] - loss: {:.6f} acc: {:.4f}%({}/{})'.format(epoch,\n args.epochs,\n loss_,\n accuracy_,\n corrects_,\n size))\n\n print(\"\\nDev Accuracy: \", end=\"\")\n eval(dev_iter, model, args, best_accuracy, epoch, optimizer, test=False)\n print(\"Test Accuracy: \", end=\"\")\n eval(test_iter, model, args, best_accuracy, epoch, optimizer, test=True)\n if steps % args.save_interval == 0:\n if not os.path.isdir(args.save_dir):\n os.makedirs(args.save_dir)\n save_prefix = os.path.join(args.save_dir, 'snapshot')\n save_path = '{}_steps{}.pt'.format(save_prefix, steps)\n torch.save(model.state_dict(), save_path)\n if os.path.isfile(save_path) and args.rm_model is True:\n os.remove(save_path)\n model_count += 1\n return model_count\n\n\ndef eval(data_iter, model, args, best_accuracy, epoch, optimizer, test=False):\n model.eval()\n corrects, avg_loss = 0, 0\n for batch in data_iter:\n feature, target = batch\n if args.cuda:\n feature, target = feature.cuda(), target.cuda()\n\n logit = model(feature)\n loss = F.cross_entropy(logit, target.long())\n avg_loss += loss.item()\n corrects += (torch.max(logit, 1)[1].view(target.size()) == target).sum()\n # print('predict value: ', torch.max(logit, 1)[1].view(target.size()))\n # print('true value: ', target)\n # import pdb; pdb.set_trace()\n\n size = len(data_iter.dataset)\n avg_loss = avg_loss/size\n accuracy = 100.0 * float(corrects)/size\n model.train()\n print(' Evaluation - loss: {:.6f} acc: {:.4f}%({}/{})'.format(avg_loss, accuracy, corrects, size))\n if test is False:\n if accuracy >= best_accuracy.best_dev_accuracy:\n best_accuracy.best_dev_accuracy = accuracy\n best_accuracy.best_epoch = epoch\n best_accuracy.best_test = True\n state = {'net': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch}\n torch.save(state, './result/modelpara.pth')\n if test is True and best_accuracy.best_test is True:\n best_accuracy.accuracy = accuracy\n\n if test is True:\n print(\"The Current Best Dev Accuracy: {:.4f}, and Test Accuracy is :{:.4f}, locate on {} epoch.\\n\".format(\n best_accuracy.best_dev_accuracy, best_accuracy.accuracy, best_accuracy.best_epoch))\n if test is True:\n best_accuracy.best_test = False\n\n\nclass Best_Result:\n def __init__(self):\n self.best_dev_accuracy = -1\n self.best_accuracy = -1\n self.best_epoch = 1\n self.best_test = False\n self.accuracy = -1"
]
| [
[
"torch.manual_seed",
"torch.save",
"torch.max"
]
]
|
SP12893678/keras-contrib | [
"88e1ea27896fc4d42952b243dad3841b9e449554"
]
| [
"keras_contrib/utils/test_utils.py"
]
| [
"\"\"\"Utilities related to Keras unit tests.\"\"\"\nimport sys\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport inspect\n\nimport keras\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom keras import backend as K\n\n\ndef get_test_data(num_train=1000, num_test=500, input_shape=(10,),\n output_shape=(2,),\n classification=True, num_classes=2):\n \"\"\"Generates test data to train a model on.\n\n classification=True overrides output_shape\n (i.e. output_shape is set to (1,)) and the output\n consists in integers in [0, num_class-1].\n\n Otherwise: float output with shape output_shape.\n \"\"\"\n samples = num_train + num_test\n if classification:\n y = np.random.randint(0, num_classes, size=(samples,))\n X = np.zeros((samples,) + input_shape)\n for i in range(samples):\n X[i] = np.random.normal(loc=y[i], scale=0.7, size=input_shape)\n else:\n y_loc = np.random.random((samples,))\n X = np.zeros((samples,) + input_shape)\n y = np.zeros((samples,) + output_shape)\n for i in range(samples):\n X[i] = np.random.normal(loc=y_loc[i], scale=0.7, size=input_shape)\n y[i] = np.random.normal(loc=y_loc[i], scale=0.7, size=output_shape)\n\n return (X[:num_train], y[:num_train]), (X[num_train:], y[num_train:])\n\n\ndef layer_test(layer_cls, kwargs={}, input_shape=None, input_dtype=None,\n input_data=None, expected_output=None,\n expected_output_dtype=None, fixed_batch_size=False):\n \"\"\"Test routine for a layer with a single input tensor\n and single output tensor.\n\n Copy of the function in keras-team/keras because it's not in the public API.\n If we use the one from keras-team/keras it won't work with tf.keras.\n \"\"\"\n # generate input data\n if input_data is None:\n assert input_shape\n if not input_dtype:\n input_dtype = K.floatx()\n input_data_shape = list(input_shape)\n for i, e in enumerate(input_data_shape):\n if e is None:\n input_data_shape[i] = np.random.randint(1, 4)\n input_data = (10 * np.random.random(input_data_shape))\n input_data = input_data.astype(input_dtype)\n else:\n if input_shape is None:\n input_shape = input_data.shape\n if input_dtype is None:\n input_dtype = input_data.dtype\n if expected_output_dtype is None:\n expected_output_dtype = input_dtype\n\n # instantiation\n layer = layer_cls(**kwargs)\n\n # test get_weights , set_weights at layer level\n weights = layer.get_weights()\n layer.set_weights(weights)\n\n expected_output_shape = layer.compute_output_shape(input_shape)\n\n # test in functional API\n if fixed_batch_size:\n x = Input(batch_shape=input_shape, dtype=input_dtype)\n else:\n x = Input(shape=input_shape[1:], dtype=input_dtype)\n y = layer(x)\n assert K.dtype(y) == expected_output_dtype\n\n # check with the functional API\n model = Model(x, y)\n\n actual_output = model.predict(input_data)\n actual_output_shape = actual_output.shape\n for expected_dim, actual_dim in zip(expected_output_shape,\n actual_output_shape):\n if expected_dim is not None:\n assert expected_dim == actual_dim\n\n if expected_output is not None:\n assert_allclose(actual_output, expected_output, rtol=1e-3)\n\n # test serialization, weight setting at model level\n model_config = model.get_config()\n custom_objects = {layer.__class__.__name__: layer.__class__}\n recovered_model = model.__class__.from_config(model_config, custom_objects)\n if model.weights:\n weights = model.get_weights()\n recovered_model.set_weights(weights)\n _output = recovered_model.predict(input_data)\n assert_allclose(_output, actual_output, rtol=1e-3)\n\n # test training mode (e.g. useful when the layer has a\n # different behavior at training and testing time).\n if has_arg(layer.call, 'training'):\n model.compile('rmsprop', 'mse')\n model.train_on_batch(input_data, actual_output)\n\n # test instantiation from layer config\n layer_config = layer.get_config()\n layer_config['batch_input_shape'] = input_shape\n layer = layer.__class__.from_config(layer_config)\n\n # for further checks in the caller function\n return actual_output\n\n\ndef has_arg(fn, name, accept_all=False):\n \"\"\"Checks if a callable accepts a given keyword argument.\n\n For Python 2, checks if there is an argument with the given name.\n\n For Python 3, checks if there is an argument with the given name, and\n also whether this argument can be called with a keyword (i.e. if it is\n not a positional-only argument).\n\n This function is a copy of the one in keras-team/keras because it's not\n in the public API.\n\n # Arguments\n fn: Callable to inspect.\n name: Check if `fn` can be called with `name` as a keyword argument.\n accept_all: What to return if there is no parameter called `name`\n but the function accepts a `**kwargs` argument.\n\n # Returns\n bool, whether `fn` accepts a `name` keyword argument.\n \"\"\"\n if sys.version_info < (3,):\n arg_spec = inspect.getargspec(fn)\n if accept_all and arg_spec.keywords is not None:\n return True\n return name in arg_spec.args\n elif sys.version_info < (3, 3):\n arg_spec = inspect.getfullargspec(fn)\n if accept_all and arg_spec.varkw is not None:\n return True\n return (name in arg_spec.args or\n name in arg_spec.kwonlyargs)\n else:\n signature = inspect.signature(fn)\n parameter = signature.parameters.get(name)\n if parameter is None:\n if accept_all:\n for param in signature.parameters.values():\n if param.kind == inspect.Parameter.VAR_KEYWORD:\n return True\n return False\n return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD,\n inspect.Parameter.KEYWORD_ONLY))\n\n\ndef to_list(x, allow_tuple=False):\n if isinstance(x, list):\n return x\n if allow_tuple and isinstance(x, tuple):\n return list(x)\n return [x]\n\n\ndef unpack_singleton(x):\n if len(x) == 1:\n return x[0]\n return x\n\n\nif keras.__name__ == 'keras':\n is_tf_keras = False\nelif 'tensorflow' in keras.__name__:\n is_tf_keras = True\nelse:\n raise KeyError('Cannot detect if using keras or tf.keras.')\n\n\ndef to_tuple(shape):\n \"\"\"This functions is here to fix an inconsistency between keras and tf.keras.\n\n In tf.keras, the input_shape argument is an tuple with `Dimensions` objects.\n In keras, the input_shape is a simple tuple of ints or `None`.\n\n We'll work with tuples of ints or `None` to be consistent\n with keras-team/keras. So we must apply this function to\n all input_shapes of the build methods in custom layers.\n \"\"\"\n if is_tf_keras:\n import tensorflow as tf\n return tuple(tf.TensorShape(shape).as_list())\n else:\n return shape\n"
]
| [
[
"numpy.testing.assert_allclose",
"numpy.random.normal",
"numpy.zeros",
"tensorflow.TensorShape",
"numpy.random.randint",
"numpy.random.random"
]
]
|
EOMYS-Public/SciDataTool | [
"dd74504fbed8e4071582981ff8736c37f1c25bdc"
]
| [
"SciDataTool/Classes/RequestedAxis.py"
]
| [
"# -*- coding: utf-8 -*-\r\n# File generated according to Generator/ClassesRef/RequestedAxis.csv\r\n# WARNING! All changes made in this file will be lost!\r\n\"\"\"Method code available at https://github.com/Eomys/SciDataTool/tree/master/SciDataTool/Methods//RequestedAxis\r\n\"\"\"\r\n\r\nfrom os import linesep\r\nfrom sys import getsizeof\r\nfrom ._check import set_array, check_var, raise_\r\nfrom ..Functions.save import save\r\nfrom ..Functions.copy import copy\r\nfrom ..Functions.load import load_init_dict\r\nfrom ..Functions.Load.import_class import import_class\r\nfrom ._frozen import FrozenClass\r\n\r\n# Import all class method\r\n# Try/catch to remove unnecessary dependencies in unused method\r\ntry:\r\n from ..Methods.RequestedAxis.get_axis import get_axis\r\nexcept ImportError as error:\r\n get_axis = error\r\n\r\n\r\nfrom numpy import array, array_equal\r\nfrom ._check import InitUnKnowClassError\r\n\r\n\r\nclass RequestedAxis(FrozenClass):\r\n \"\"\"Class to handle requested axes during get_along methods\"\"\"\r\n\r\n VERSION = 1\r\n\r\n # cf Methods.RequestedAxis.get_axis\r\n if isinstance(get_axis, ImportError):\r\n get_axis = property(\r\n fget=lambda x: raise_(\r\n ImportError(\"Can't use RequestedAxis method get_axis: \" + str(get_axis))\r\n )\r\n )\r\n else:\r\n get_axis = get_axis\r\n # save and copy methods are available in all object\r\n save = save\r\n copy = copy\r\n\r\n def __init__(\r\n self,\r\n name=\"\",\r\n corr_name=\"\",\r\n unit=\"\",\r\n corr_unit=\"\",\r\n extension=\"\",\r\n values=None,\r\n indices=None,\r\n input_data=None,\r\n operation=None,\r\n index=None,\r\n transform=None,\r\n is_pattern=False,\r\n rebuild_indices=None,\r\n is_step=False,\r\n noct=None,\r\n corr_values=None,\r\n is_components=False,\r\n init_dict=None,\r\n init_str=None,\r\n ):\r\n \"\"\"Constructor of the class. Can be use in three ways :\r\n - __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values\r\n for SciDataTool type, -1 will call the default constructor\r\n - __init__ (init_dict = d) d must be a dictionary with property names as keys\r\n - __init__ (init_str = s) s must be a string\r\n s is the file path to load\r\n\r\n ndarray or list can be given for Vector and Matrix\r\n object or dict can be given for SciDataTool Object\"\"\"\r\n\r\n if init_str is not None: # Load from a file\r\n init_dict = load_init_dict(init_str)[1]\r\n if init_dict is not None: # Initialisation by dict\r\n assert type(init_dict) is dict\r\n # Overwrite default value with init_dict content\r\n if \"name\" in list(init_dict.keys()):\r\n name = init_dict[\"name\"]\r\n if \"corr_name\" in list(init_dict.keys()):\r\n corr_name = init_dict[\"corr_name\"]\r\n if \"unit\" in list(init_dict.keys()):\r\n unit = init_dict[\"unit\"]\r\n if \"corr_unit\" in list(init_dict.keys()):\r\n corr_unit = init_dict[\"corr_unit\"]\r\n if \"extension\" in list(init_dict.keys()):\r\n extension = init_dict[\"extension\"]\r\n if \"values\" in list(init_dict.keys()):\r\n values = init_dict[\"values\"]\r\n if \"indices\" in list(init_dict.keys()):\r\n indices = init_dict[\"indices\"]\r\n if \"input_data\" in list(init_dict.keys()):\r\n input_data = init_dict[\"input_data\"]\r\n if \"operation\" in list(init_dict.keys()):\r\n operation = init_dict[\"operation\"]\r\n if \"index\" in list(init_dict.keys()):\r\n index = init_dict[\"index\"]\r\n if \"transform\" in list(init_dict.keys()):\r\n transform = init_dict[\"transform\"]\r\n if \"is_pattern\" in list(init_dict.keys()):\r\n is_pattern = init_dict[\"is_pattern\"]\r\n if \"rebuild_indices\" in list(init_dict.keys()):\r\n rebuild_indices = init_dict[\"rebuild_indices\"]\r\n if \"is_step\" in list(init_dict.keys()):\r\n is_step = init_dict[\"is_step\"]\r\n if \"noct\" in list(init_dict.keys()):\r\n noct = init_dict[\"noct\"]\r\n if \"corr_values\" in list(init_dict.keys()):\r\n corr_values = init_dict[\"corr_values\"]\r\n if \"is_components\" in list(init_dict.keys()):\r\n is_components = init_dict[\"is_components\"]\r\n # Set the properties (value check and convertion are done in setter)\r\n self.parent = None\r\n self.name = name\r\n self.corr_name = corr_name\r\n self.unit = unit\r\n self.corr_unit = corr_unit\r\n self.extension = extension\r\n self.values = values\r\n self.indices = indices\r\n self.input_data = input_data\r\n self.operation = operation\r\n self.index = index\r\n self.transform = transform\r\n self.is_pattern = is_pattern\r\n self.rebuild_indices = rebuild_indices\r\n self.is_step = is_step\r\n self.noct = noct\r\n self.corr_values = corr_values\r\n self.is_components = is_components\r\n\r\n # The class is frozen, for now it's impossible to add new properties\r\n self._freeze()\r\n\r\n def __str__(self):\r\n \"\"\"Convert this object in a readeable string (for print)\"\"\"\r\n\r\n RequestedAxis_str = \"\"\r\n if self.parent is None:\r\n RequestedAxis_str += \"parent = None \" + linesep\r\n else:\r\n RequestedAxis_str += (\r\n \"parent = \" + str(type(self.parent)) + \" object\" + linesep\r\n )\r\n RequestedAxis_str += 'name = \"' + str(self.name) + '\"' + linesep\r\n RequestedAxis_str += 'corr_name = \"' + str(self.corr_name) + '\"' + linesep\r\n RequestedAxis_str += 'unit = \"' + str(self.unit) + '\"' + linesep\r\n RequestedAxis_str += 'corr_unit = \"' + str(self.corr_unit) + '\"' + linesep\r\n RequestedAxis_str += 'extension = \"' + str(self.extension) + '\"' + linesep\r\n RequestedAxis_str += (\r\n \"values = \"\r\n + linesep\r\n + str(self.values).replace(linesep, linesep + \"\\t\")\r\n + linesep\r\n + linesep\r\n )\r\n RequestedAxis_str += (\r\n \"indices = \"\r\n + linesep\r\n + str(self.indices).replace(linesep, linesep + \"\\t\")\r\n + linesep\r\n )\r\n RequestedAxis_str += (\r\n \"input_data = \"\r\n + linesep\r\n + str(self.input_data).replace(linesep, linesep + \"\\t\")\r\n + linesep\r\n + linesep\r\n )\r\n RequestedAxis_str += 'operation = \"' + str(self.operation) + '\"' + linesep\r\n RequestedAxis_str += \"index = \" + str(self.index) + linesep\r\n RequestedAxis_str += 'transform = \"' + str(self.transform) + '\"' + linesep\r\n RequestedAxis_str += \"is_pattern = \" + str(self.is_pattern) + linesep\r\n RequestedAxis_str += (\r\n \"rebuild_indices = \"\r\n + linesep\r\n + str(self.rebuild_indices).replace(linesep, linesep + \"\\t\")\r\n + linesep\r\n + linesep\r\n )\r\n RequestedAxis_str += \"is_step = \" + str(self.is_step) + linesep\r\n RequestedAxis_str += \"noct = \" + str(self.noct) + linesep\r\n RequestedAxis_str += (\r\n \"corr_values = \"\r\n + linesep\r\n + str(self.corr_values).replace(linesep, linesep + \"\\t\")\r\n + linesep\r\n + linesep\r\n )\r\n RequestedAxis_str += \"is_components = \" + str(self.is_components) + linesep\r\n return RequestedAxis_str\r\n\r\n def __eq__(self, other):\r\n \"\"\"Compare two objects (skip parent)\"\"\"\r\n\r\n if type(other) != type(self):\r\n return False\r\n if other.name != self.name:\r\n return False\r\n if other.corr_name != self.corr_name:\r\n return False\r\n if other.unit != self.unit:\r\n return False\r\n if other.corr_unit != self.corr_unit:\r\n return False\r\n if other.extension != self.extension:\r\n return False\r\n if not array_equal(other.values, self.values):\r\n return False\r\n if other.indices != self.indices:\r\n return False\r\n if not array_equal(other.input_data, self.input_data):\r\n return False\r\n if other.operation != self.operation:\r\n return False\r\n if other.index != self.index:\r\n return False\r\n if other.transform != self.transform:\r\n return False\r\n if other.is_pattern != self.is_pattern:\r\n return False\r\n if not array_equal(other.rebuild_indices, self.rebuild_indices):\r\n return False\r\n if other.is_step != self.is_step:\r\n return False\r\n if other.noct != self.noct:\r\n return False\r\n if not array_equal(other.corr_values, self.corr_values):\r\n return False\r\n if other.is_components != self.is_components:\r\n return False\r\n return True\r\n\r\n def compare(self, other, name=\"self\", ignore_list=None):\r\n \"\"\"Compare two objects and return list of differences\"\"\"\r\n\r\n if ignore_list is None:\r\n ignore_list = list()\r\n if type(other) != type(self):\r\n return [\"type(\" + name + \")\"]\r\n diff_list = list()\r\n if other._name != self._name:\r\n diff_list.append(name + \".name\")\r\n if other._corr_name != self._corr_name:\r\n diff_list.append(name + \".corr_name\")\r\n if other._unit != self._unit:\r\n diff_list.append(name + \".unit\")\r\n if other._corr_unit != self._corr_unit:\r\n diff_list.append(name + \".corr_unit\")\r\n if other._extension != self._extension:\r\n diff_list.append(name + \".extension\")\r\n if not array_equal(other.values, self.values):\r\n diff_list.append(name + \".values\")\r\n if other._indices != self._indices:\r\n diff_list.append(name + \".indices\")\r\n if not array_equal(other.input_data, self.input_data):\r\n diff_list.append(name + \".input_data\")\r\n if other._operation != self._operation:\r\n diff_list.append(name + \".operation\")\r\n if other._index != self._index:\r\n diff_list.append(name + \".index\")\r\n if other._transform != self._transform:\r\n diff_list.append(name + \".transform\")\r\n if other._is_pattern != self._is_pattern:\r\n diff_list.append(name + \".is_pattern\")\r\n if not array_equal(other.rebuild_indices, self.rebuild_indices):\r\n diff_list.append(name + \".rebuild_indices\")\r\n if other._is_step != self._is_step:\r\n diff_list.append(name + \".is_step\")\r\n if other._noct != self._noct:\r\n diff_list.append(name + \".noct\")\r\n if not array_equal(other.corr_values, self.corr_values):\r\n diff_list.append(name + \".corr_values\")\r\n if other._is_components != self._is_components:\r\n diff_list.append(name + \".is_components\")\r\n # Filter ignore differences\r\n diff_list = list(filter(lambda x: x not in ignore_list, diff_list))\r\n return diff_list\r\n\r\n def __sizeof__(self):\r\n \"\"\"Return the size in memory of the object (including all subobject)\"\"\"\r\n\r\n S = 0 # Full size of the object\r\n S += getsizeof(self.name)\r\n S += getsizeof(self.corr_name)\r\n S += getsizeof(self.unit)\r\n S += getsizeof(self.corr_unit)\r\n S += getsizeof(self.extension)\r\n S += getsizeof(self.values)\r\n if self.indices is not None:\r\n for value in self.indices:\r\n S += getsizeof(value)\r\n S += getsizeof(self.input_data)\r\n S += getsizeof(self.operation)\r\n S += getsizeof(self.index)\r\n S += getsizeof(self.transform)\r\n S += getsizeof(self.is_pattern)\r\n S += getsizeof(self.rebuild_indices)\r\n S += getsizeof(self.is_step)\r\n S += getsizeof(self.noct)\r\n S += getsizeof(self.corr_values)\r\n S += getsizeof(self.is_components)\r\n return S\r\n\r\n def as_dict(self, type_handle_ndarray=0, keep_function=False, **kwargs):\r\n \"\"\"\r\n Convert this object in a json serializable dict (can be use in __init__).\r\n type_handle_ndarray: int\r\n How to handle ndarray (0: tolist, 1: copy, 2: nothing)\r\n keep_function : bool\r\n True to keep the function object, else return str\r\n Optional keyword input parameter is for internal use only\r\n and may prevent json serializability.\r\n \"\"\"\r\n\r\n RequestedAxis_dict = dict()\r\n RequestedAxis_dict[\"name\"] = self.name\r\n RequestedAxis_dict[\"corr_name\"] = self.corr_name\r\n RequestedAxis_dict[\"unit\"] = self.unit\r\n RequestedAxis_dict[\"corr_unit\"] = self.corr_unit\r\n RequestedAxis_dict[\"extension\"] = self.extension\r\n if self.values is None:\r\n RequestedAxis_dict[\"values\"] = None\r\n else:\r\n if type_handle_ndarray == 0:\r\n RequestedAxis_dict[\"values\"] = self.values.tolist()\r\n elif type_handle_ndarray == 1:\r\n RequestedAxis_dict[\"values\"] = self.values.copy()\r\n elif type_handle_ndarray == 2:\r\n RequestedAxis_dict[\"values\"] = self.values\r\n else:\r\n raise Exception(\r\n \"Unknown type_handle_ndarray: \" + str(type_handle_ndarray)\r\n )\r\n RequestedAxis_dict[\"indices\"] = (\r\n self.indices.copy() if self.indices is not None else None\r\n )\r\n if self.input_data is None:\r\n RequestedAxis_dict[\"input_data\"] = None\r\n else:\r\n if type_handle_ndarray == 0:\r\n RequestedAxis_dict[\"input_data\"] = self.input_data.tolist()\r\n elif type_handle_ndarray == 1:\r\n RequestedAxis_dict[\"input_data\"] = self.input_data.copy()\r\n elif type_handle_ndarray == 2:\r\n RequestedAxis_dict[\"input_data\"] = self.input_data\r\n else:\r\n raise Exception(\r\n \"Unknown type_handle_ndarray: \" + str(type_handle_ndarray)\r\n )\r\n RequestedAxis_dict[\"operation\"] = self.operation\r\n RequestedAxis_dict[\"index\"] = self.index\r\n RequestedAxis_dict[\"transform\"] = self.transform\r\n RequestedAxis_dict[\"is_pattern\"] = self.is_pattern\r\n if self.rebuild_indices is None:\r\n RequestedAxis_dict[\"rebuild_indices\"] = None\r\n else:\r\n if type_handle_ndarray == 0:\r\n RequestedAxis_dict[\"rebuild_indices\"] = self.rebuild_indices.tolist()\r\n elif type_handle_ndarray == 1:\r\n RequestedAxis_dict[\"rebuild_indices\"] = self.rebuild_indices.copy()\r\n elif type_handle_ndarray == 2:\r\n RequestedAxis_dict[\"rebuild_indices\"] = self.rebuild_indices\r\n else:\r\n raise Exception(\r\n \"Unknown type_handle_ndarray: \" + str(type_handle_ndarray)\r\n )\r\n RequestedAxis_dict[\"is_step\"] = self.is_step\r\n RequestedAxis_dict[\"noct\"] = self.noct\r\n if self.corr_values is None:\r\n RequestedAxis_dict[\"corr_values\"] = None\r\n else:\r\n if type_handle_ndarray == 0:\r\n RequestedAxis_dict[\"corr_values\"] = self.corr_values.tolist()\r\n elif type_handle_ndarray == 1:\r\n RequestedAxis_dict[\"corr_values\"] = self.corr_values.copy()\r\n elif type_handle_ndarray == 2:\r\n RequestedAxis_dict[\"corr_values\"] = self.corr_values\r\n else:\r\n raise Exception(\r\n \"Unknown type_handle_ndarray: \" + str(type_handle_ndarray)\r\n )\r\n RequestedAxis_dict[\"is_components\"] = self.is_components\r\n # The class name is added to the dict for deserialisation purpose\r\n RequestedAxis_dict[\"__class__\"] = \"RequestedAxis\"\r\n return RequestedAxis_dict\r\n\r\n def _set_None(self):\r\n \"\"\"Set all the properties to None (except SciDataTool object)\"\"\"\r\n\r\n self.name = None\r\n self.corr_name = None\r\n self.unit = None\r\n self.corr_unit = None\r\n self.extension = None\r\n self.values = None\r\n self.indices = None\r\n self.input_data = None\r\n self.operation = None\r\n self.index = None\r\n self.transform = None\r\n self.is_pattern = None\r\n self.rebuild_indices = None\r\n self.is_step = None\r\n self.noct = None\r\n self.corr_values = None\r\n self.is_components = None\r\n\r\n def _get_name(self):\r\n \"\"\"getter of name\"\"\"\r\n return self._name\r\n\r\n def _set_name(self, value):\r\n \"\"\"setter of name\"\"\"\r\n check_var(\"name\", value, \"str\")\r\n self._name = value\r\n\r\n name = property(\r\n fget=_get_name,\r\n fset=_set_name,\r\n doc=u\"\"\"Name requested in get_along command\r\n\r\n :Type: str\r\n \"\"\",\r\n )\r\n\r\n def _get_corr_name(self):\r\n \"\"\"getter of corr_name\"\"\"\r\n return self._corr_name\r\n\r\n def _set_corr_name(self, value):\r\n \"\"\"setter of corr_name\"\"\"\r\n check_var(\"corr_name\", value, \"str\")\r\n self._corr_name = value\r\n\r\n corr_name = property(\r\n fget=_get_corr_name,\r\n fset=_set_corr_name,\r\n doc=u\"\"\"Corresponding axis if a transform is required\r\n\r\n :Type: str\r\n \"\"\",\r\n )\r\n\r\n def _get_unit(self):\r\n \"\"\"getter of unit\"\"\"\r\n return self._unit\r\n\r\n def _set_unit(self, value):\r\n \"\"\"setter of unit\"\"\"\r\n check_var(\"unit\", value, \"str\")\r\n self._unit = value\r\n\r\n unit = property(\r\n fget=_get_unit,\r\n fset=_set_unit,\r\n doc=u\"\"\"Unit requested in get_along command\r\n\r\n :Type: str\r\n \"\"\",\r\n )\r\n\r\n def _get_corr_unit(self):\r\n \"\"\"getter of corr_unit\"\"\"\r\n return self._corr_unit\r\n\r\n def _set_corr_unit(self, value):\r\n \"\"\"setter of corr_unit\"\"\"\r\n check_var(\"corr_unit\", value, \"str\")\r\n self._corr_unit = value\r\n\r\n corr_unit = property(\r\n fget=_get_corr_unit,\r\n fset=_set_corr_unit,\r\n doc=u\"\"\"Corresponding unit if a transform is required\r\n\r\n :Type: str\r\n \"\"\",\r\n )\r\n\r\n def _get_extension(self):\r\n \"\"\"getter of extension\"\"\"\r\n return self._extension\r\n\r\n def _set_extension(self, value):\r\n \"\"\"setter of extension\"\"\"\r\n check_var(\"extension\", value, \"str\")\r\n self._extension = value\r\n\r\n extension = property(\r\n fget=_get_extension,\r\n fset=_set_extension,\r\n doc=u\"\"\"Extension of the requested axis (single or interval)\r\n\r\n :Type: str\r\n \"\"\",\r\n )\r\n\r\n def _get_values(self):\r\n \"\"\"getter of values\"\"\"\r\n return self._values\r\n\r\n def _set_values(self, value):\r\n \"\"\"setter of values\"\"\"\r\n if type(value) is int and value == -1:\r\n value = array([])\r\n elif type(value) is list:\r\n try:\r\n value = array(value)\r\n except:\r\n pass\r\n check_var(\"values\", value, \"ndarray\")\r\n self._values = value\r\n\r\n values = property(\r\n fget=_get_values,\r\n fset=_set_values,\r\n doc=u\"\"\"Values of the axis\r\n\r\n :Type: ndarray\r\n \"\"\",\r\n )\r\n\r\n def _get_indices(self):\r\n \"\"\"getter of indices\"\"\"\r\n return self._indices\r\n\r\n def _set_indices(self, value):\r\n \"\"\"setter of indices\"\"\"\r\n if type(value) is int and value == -1:\r\n value = list()\r\n check_var(\"indices\", value, \"list\")\r\n self._indices = value\r\n\r\n indices = property(\r\n fget=_get_indices,\r\n fset=_set_indices,\r\n doc=u\"\"\"Indices of the axis\r\n\r\n :Type: list\r\n \"\"\",\r\n )\r\n\r\n def _get_input_data(self):\r\n \"\"\"getter of input_data\"\"\"\r\n return self._input_data\r\n\r\n def _set_input_data(self, value):\r\n \"\"\"setter of input_data\"\"\"\r\n if type(value) is int and value == -1:\r\n value = array([])\r\n elif type(value) is list:\r\n try:\r\n value = array(value)\r\n except:\r\n pass\r\n check_var(\"input_data\", value, \"ndarray\")\r\n self._input_data = value\r\n\r\n input_data = property(\r\n fget=_get_input_data,\r\n fset=_set_input_data,\r\n doc=u\"\"\"Input data to interpolate on\r\n\r\n :Type: ndarray\r\n \"\"\",\r\n )\r\n\r\n def _get_operation(self):\r\n \"\"\"getter of operation\"\"\"\r\n return self._operation\r\n\r\n def _set_operation(self, value):\r\n \"\"\"setter of operation\"\"\"\r\n check_var(\"operation\", value, \"str\")\r\n self._operation = value\r\n\r\n operation = property(\r\n fget=_get_operation,\r\n fset=_set_operation,\r\n doc=u\"\"\"Operation to perform on the axis (coordinate change, etc)\r\n\r\n :Type: str\r\n \"\"\",\r\n )\r\n\r\n def _get_index(self):\r\n \"\"\"getter of index\"\"\"\r\n return self._index\r\n\r\n def _set_index(self, value):\r\n \"\"\"setter of index\"\"\"\r\n check_var(\"index\", value, \"int\")\r\n self._index = value\r\n\r\n index = property(\r\n fget=_get_index,\r\n fset=_set_index,\r\n doc=u\"\"\"Index of the axis in the axes list\r\n\r\n :Type: int\r\n \"\"\",\r\n )\r\n\r\n def _get_transform(self):\r\n \"\"\"getter of transform\"\"\"\r\n return self._transform\r\n\r\n def _set_transform(self, value):\r\n \"\"\"setter of transform\"\"\"\r\n check_var(\"transform\", value, \"str\")\r\n self._transform = value\r\n\r\n transform = property(\r\n fget=_get_transform,\r\n fset=_set_transform,\r\n doc=u\"\"\"Transform to perform on the axis (fft, ifft)\r\n\r\n :Type: str\r\n \"\"\",\r\n )\r\n\r\n def _get_is_pattern(self):\r\n \"\"\"getter of is_pattern\"\"\"\r\n return self._is_pattern\r\n\r\n def _set_is_pattern(self, value):\r\n \"\"\"setter of is_pattern\"\"\"\r\n check_var(\"is_pattern\", value, \"bool\")\r\n self._is_pattern = value\r\n\r\n is_pattern = property(\r\n fget=_get_is_pattern,\r\n fset=_set_is_pattern,\r\n doc=u\"\"\"To indicate if the axis is a DataPattern\r\n\r\n :Type: bool\r\n \"\"\",\r\n )\r\n\r\n def _get_rebuild_indices(self):\r\n \"\"\"getter of rebuild_indices\"\"\"\r\n return self._rebuild_indices\r\n\r\n def _set_rebuild_indices(self, value):\r\n \"\"\"setter of rebuild_indices\"\"\"\r\n if type(value) is int and value == -1:\r\n value = array([])\r\n elif type(value) is list:\r\n try:\r\n value = array(value)\r\n except:\r\n pass\r\n check_var(\"rebuild_indices\", value, \"ndarray\")\r\n self._rebuild_indices = value\r\n\r\n rebuild_indices = property(\r\n fget=_get_rebuild_indices,\r\n fset=_set_rebuild_indices,\r\n doc=u\"\"\"Indices to rebuild pattern\r\n\r\n :Type: ndarray\r\n \"\"\",\r\n )\r\n\r\n def _get_is_step(self):\r\n \"\"\"getter of is_step\"\"\"\r\n return self._is_step\r\n\r\n def _set_is_step(self, value):\r\n \"\"\"setter of is_step\"\"\"\r\n check_var(\"is_step\", value, \"bool\")\r\n self._is_step = value\r\n\r\n is_step = property(\r\n fget=_get_is_step,\r\n fset=_set_is_step,\r\n doc=u\"\"\"To indicate if the pattern axis is step (for interpolation)\r\n\r\n :Type: bool\r\n \"\"\",\r\n )\r\n\r\n def _get_noct(self):\r\n \"\"\"getter of noct\"\"\"\r\n return self._noct\r\n\r\n def _set_noct(self, value):\r\n \"\"\"setter of noct\"\"\"\r\n check_var(\"noct\", value, \"int\")\r\n self._noct = value\r\n\r\n noct = property(\r\n fget=_get_noct,\r\n fset=_set_noct,\r\n doc=u\"\"\"To store 1/nth octave band\r\n\r\n :Type: int\r\n \"\"\",\r\n )\r\n\r\n def _get_corr_values(self):\r\n \"\"\"getter of corr_values\"\"\"\r\n return self._corr_values\r\n\r\n def _set_corr_values(self, value):\r\n \"\"\"setter of corr_values\"\"\"\r\n if type(value) is int and value == -1:\r\n value = array([])\r\n elif type(value) is list:\r\n try:\r\n value = array(value)\r\n except:\r\n pass\r\n check_var(\"corr_values\", value, \"ndarray\")\r\n self._corr_values = value\r\n\r\n corr_values = property(\r\n fget=_get_corr_values,\r\n fset=_set_corr_values,\r\n doc=u\"\"\"To store original axis values (useful in case of non uniform fft)\r\n\r\n :Type: ndarray\r\n \"\"\",\r\n )\r\n\r\n def _get_is_components(self):\r\n \"\"\"getter of is_components\"\"\"\r\n return self._is_components\r\n\r\n def _set_is_components(self, value):\r\n \"\"\"setter of is_components\"\"\"\r\n check_var(\"is_components\", value, \"bool\")\r\n self._is_components = value\r\n\r\n is_components = property(\r\n fget=_get_is_components,\r\n fset=_set_is_components,\r\n doc=u\"\"\"To indicate if the values are strings\r\n\r\n :Type: bool\r\n \"\"\",\r\n )\r\n"
]
| [
[
"numpy.array",
"numpy.array_equal"
]
]
|
carlo-cor/DRONE-REPOSITORY | [
"dd09640da997c175bab8d73862a254cdb8537283"
]
| [
"ColorDetection.py"
]
| [
"import cv2\r\nimport numpy as np\r\nimport numpy as npObj\r\n\r\nwidth = 640\r\nheight = 480\r\nwindow = cv2.VideoCapture(0)\r\nwindow.set(3, width)\r\nwindow.set(4, height)\r\n\r\ndef empty(a):\r\n pass\r\n\r\ncv2.namedWindow(\"HSV\")\r\ncv2.resizeWindow(\"HSV\", 640, 240)\r\ncv2.createTrackbar(\"HUE MIN TRACKBAR\", \"HSV\", 0, 179, empty)\r\ncv2.createTrackbar(\"HUE MAX TRACKBAR\", \"HSV\", 179, 179, empty)\r\ncv2.createTrackbar(\"SAT MIN TRACKBAR\", \"HSV\", 0, 255, empty)\r\ncv2.createTrackbar(\"SAT MAX TRACKBAR\", \"HSV\", 255, 255, empty)\r\ncv2.createTrackbar(\"VALUE MIN TRACKBAR\", \"HSV\", 0, 255, empty)\r\ncv2.createTrackbar(\"VALUE MAX TRACKBAR\", \"HSV\", 255, 255, empty)\r\n\r\nwhile True:\r\n _, img = window.read()\r\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n\r\n h_min = cv2.getTrackbarPos(\"HUE MIN TRACKBAR\", \"HSV\")\r\n h_max = cv2.getTrackbarPos(\"HUE MAX TRACKBAR\", \"HSV\")\r\n s_min = cv2.getTrackbarPos(\"SAT MIN TRACKBAR\", \"HSV\")\r\n s_max = cv2.getTrackbarPos(\"SAT MAX TRACKBAR\", \"HSV\")\r\n v_min = cv2.getTrackbarPos(\"VALUE MIN TRACKBAR\", \"HSV\")\r\n v_max = cv2.getTrackbarPos(\"VALUE MAX TRACKBAR\", \"HSV\")\r\n print(h_min)\r\n\r\n minimums = np.array([h_min, s_min, v_min])\r\n maximums = np.array([h_max, s_max, v_max])\r\n mask = cv2.inRange(imgHSV, minimums, maximums)\r\n result = cv2.bitwise_and(img, img, mask = mask)\r\n\r\n mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)\r\n horizontalStck = np.hstack([img, mask, result])\r\n\r\n # cv2.imshow(\"ORIGINAL\", img)\r\n # cv2.imshow(\"HSV COLOR SPACE\", imgHSV)\r\n # cv2.imshow(\"MASK\", mask)\r\n # cv2.imshow(\"RESULTS\", result)\r\n cv2.imshow(\"HORIZONTAL STACK\", horizontalStck)\r\n\r\n if cv2.waitKey(1) and 0xFF == ord('q'):\r\n break"
]
| [
[
"numpy.hstack",
"numpy.array"
]
]
|
Deech08/whampy | [
"befc8ff0d9d13f397e1d79a793283be2a7fc1eb9"
]
| [
"whampy/tests/test_click_map.py"
]
| [
"import pytest\nimport matplotlib.pyplot as plt\nimport astropy.units as u\nimport numpy as np\nfrom ..skySurvey import SkySurvey \nfrom unittest.mock import Mock\n\n# Set up the random number generator.\nnp.random.seed(1234)\n\n# Load survey\nsurvey = SkySurvey()\n\nBASELINE_DIR = 'baseline'\n\[email protected]_image_compare(baseline_dir=BASELINE_DIR, tolerance = 20)\ndef test_basic_click_event():\n\t\"\"\"\n\tTest click map with sample click event\n\t\"\"\"\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tclick_map = survey.click_map(fig = fig, image_ax = ax)\n\tevent = Mock()\n\tevent.button = 1\n\tevent.inaxes = ax\n\tevent.xdata = 30\n\tevent.ydata = 5\n\tclick_map.on_click(event)\n\treturn plt.gcf()\n\[email protected]_image_compare(baseline_dir=BASELINE_DIR, tolerance = 20)\ndef test_basic_click_event_outside():\n\t\"\"\"\n\tTest click map with sample click event\n\t\"\"\"\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tclick_map = survey.click_map(fig = fig, image_ax = ax)\n\tevent = Mock()\n\tevent.button = 1\n\tevent.inaxes = None\n\tevent.xdata = 30\n\tevent.ydata = 5\n\tclick_map.on_click(event)\n\treturn plt.gcf()\n\[email protected]_image_compare(baseline_dir=BASELINE_DIR, tolerance = 20)\ndef test_basic_click_no_fig():\n\t\"\"\"\n\tTest init without fig\n\t\"\"\"\n\tclick_map = survey.click_map()\n\treturn plt.gcf()\n\[email protected]_image_compare(baseline_dir=BASELINE_DIR, tolerance = 20)\ndef test_basic_click_no_imax():\n\t\"\"\"\n\tTest init without fig\n\t\"\"\"\n\tfig = plt.figure()\n\tclick_map = survey.click_map(fig = fig)\n\treturn plt.gcf()\n\[email protected]_image_compare(baseline_dir=BASELINE_DIR, tolerance = 20)\ndef test_basic_click_no_specax():\n\t\"\"\"\n\tTest init without fig\n\t\"\"\"\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tclick_map = survey.click_map(fig = fig, image_ax = ax)\n\treturn plt.gcf()\n\[email protected]_image_compare(baseline_dir=BASELINE_DIR, tolerance = 20)\ndef test_basic_click_projection():\n\timport cartopy.crs as ccrs\n\t\"\"\"\n\tTest init without fig\n\t\"\"\"\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection = ccrs.Mollweide())\n\tclick_map = survey.click_map(fig = fig, image_ax = ax)\n\tevent = Mock()\n\tevent.button = 1\n\tevent.inaxes = ax\n\tevent.xdata = 30\n\tevent.ydata = 5\n\tclick_map.on_click(event)\n\treturn plt.gcf()\n\[email protected]_image_compare(baseline_dir=BASELINE_DIR, tolerance = 20)\ndef test_basic_click_projection_kwarg():\n\timport cartopy.crs as ccrs\n\t\"\"\"\n\tTest init without fig\n\t\"\"\"\n\tfig = plt.figure()\n\tclick_map = survey.click_map(fig = fig, projection = ccrs.Mollweide())\n\treturn plt.gcf()"
]
| [
[
"numpy.random.seed",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.figure"
]
]
|
therooler/pennylane | [
"88a8a5960a2ffd218a12f85ace632021eef2abf5"
]
| [
"tests/devices/test_default_qubit_torch.py"
]
| [
"# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUnit tests and integration tests for the ``default.qubit.torch`` device.\n\"\"\"\nfrom itertools import product\n\nimport numpy as np\nimport pytest\nimport cmath\nimport math\nimport functools\n\npytestmark = pytest.mark.gpu\n\ntorch = pytest.importorskip(\"torch\", minversion=\"1.8.1\")\n\ntorch_devices = [None]\n\nif torch.cuda.is_available():\n torch_devices.append(\"cuda\")\n\n\nimport pennylane as qml\nfrom pennylane import numpy as pnp\nfrom pennylane import DeviceError\nfrom pennylane.wires import Wires\nfrom pennylane.devices.default_qubit_torch import DefaultQubitTorch\nfrom gate_data import (\n I,\n X,\n Y,\n Z,\n H,\n S,\n T,\n CNOT,\n CZ,\n SWAP,\n CNOT,\n Toffoli,\n CSWAP,\n Rphi,\n Rotx,\n Roty,\n Rotz,\n Rot3,\n CRotx,\n CRoty,\n CRotz,\n CRot3,\n IsingXX,\n IsingYY,\n IsingZZ,\n MultiRZ1,\n MultiRZ2,\n ControlledPhaseShift,\n SingleExcitation,\n SingleExcitationPlus,\n SingleExcitationMinus,\n DoubleExcitation,\n DoubleExcitationPlus,\n DoubleExcitationMinus,\n OrbitalRotation,\n)\n\nnp.random.seed(42)\n\n#####################################################\n# Test matrices\n#####################################################\n\nU = np.array(\n [\n [0.83645892 - 0.40533293j, -0.20215326 + 0.30850569j],\n [-0.23889780 - 0.28101519j, -0.88031770 - 0.29832709j],\n ]\n)\n\nU2 = np.array([[0, 1, 1, 1], [1, 0, 1, -1], [1, -1, 0, 1], [1, 1, -1, 0]]) / np.sqrt(3)\n\n##################################\n# Define standard qubit operations\n##################################\n\n# Note: determining the torch device of the input parameters is done in the\n# test cases\n\nsingle_qubit = [\n (qml.S, S),\n (qml.T, T),\n (qml.PauliX, X),\n (qml.PauliY, Y),\n (qml.PauliZ, Z),\n (qml.Hadamard, H),\n]\n\nsingle_qubit_param = [\n (qml.PhaseShift, Rphi),\n (qml.RX, Rotx),\n (qml.RY, Roty),\n (qml.RZ, Rotz),\n (qml.MultiRZ, MultiRZ1),\n]\ntwo_qubit = [(qml.CZ, CZ), (qml.CNOT, CNOT), (qml.SWAP, SWAP)]\ntwo_qubit_param = [\n (qml.CRX, CRotx),\n (qml.CRY, CRoty),\n (qml.CRZ, CRotz),\n (qml.IsingXX, IsingXX),\n (qml.IsingYY, IsingYY),\n (qml.IsingZZ, IsingZZ),\n (qml.MultiRZ, MultiRZ2),\n (qml.ControlledPhaseShift, ControlledPhaseShift),\n (qml.SingleExcitation, SingleExcitation),\n (qml.SingleExcitationPlus, SingleExcitationPlus),\n (qml.SingleExcitationMinus, SingleExcitationMinus),\n]\nthree_qubit = [(qml.Toffoli, Toffoli), (qml.CSWAP, CSWAP)]\nfour_qubit_param = [\n (qml.DoubleExcitation, DoubleExcitation),\n (qml.DoubleExcitationPlus, DoubleExcitationPlus),\n (qml.DoubleExcitationMinus, DoubleExcitationMinus),\n (qml.OrbitalRotation, OrbitalRotation),\n]\n\n\n#####################################################\n# Fixtures\n#####################################################\n\n\[email protected]\ndef init_state(scope=\"session\"):\n \"\"\"Generates a random initial state\"\"\"\n\n def _init_state(n, torch_device):\n \"\"\"random initial state\"\"\"\n torch.manual_seed(42)\n state = torch.rand([2**n], dtype=torch.complex128) + torch.rand([2**n]) * 1j\n state /= torch.linalg.norm(state)\n return state.to(torch_device)\n\n return _init_state\n\n\[email protected]\ndef device(scope=\"function\"):\n \"\"\"Creates a Torch device\"\"\"\n\n def _dev(wires, torch_device=None):\n \"\"\"Torch device\"\"\"\n dev = DefaultQubitTorch(wires=wires, torch_device=torch_device)\n return dev\n\n return _dev\n\n\n#####################################################\n# Initialization test\n#####################################################\n\n\ndef test_analytic_deprecation():\n \"\"\"Tests if the kwarg `analytic` is used and displays error message.\"\"\"\n msg = \"The analytic argument has been replaced by shots=None. \"\n msg += \"Please use shots=None instead of analytic=True.\"\n\n with pytest.raises(DeviceError, match=msg):\n qml.device(\"default.qubit.torch\", wires=1, shots=1, analytic=True)\n\n\n#####################################################\n# Device-level integration tests\n#####################################################\n\n\[email protected](\"torch_device\", torch_devices)\nclass TestApply:\n \"\"\"Test application of PennyLane operations.\"\"\"\n\n def test_basis_state(self, device, torch_device, tol):\n \"\"\"Test basis state initialization\"\"\"\n\n dev = device(wires=4, torch_device=torch_device)\n state = torch.tensor([0, 0, 1, 0], dtype=torch.complex128, device=torch_device)\n\n dev.apply([qml.BasisState(state, wires=[0, 1, 2, 3])])\n\n res = dev.state\n expected = torch.zeros([2**4], dtype=torch.complex128, device=torch_device)\n expected[2] = 1\n\n assert isinstance(res, torch.Tensor)\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_invalid_basis_state_length(self, device, torch_device, tol):\n \"\"\"Test that an exception is raised if the basis state is the wrong size\"\"\"\n dev = device(wires=4, torch_device=torch_device)\n state = torch.tensor([0, 0, 1, 0])\n\n with pytest.raises(\n ValueError, match=r\"BasisState parameter and wires must be of equal length\"\n ):\n dev.apply([qml.BasisState(state, wires=[0, 1, 2])])\n\n def test_invalid_basis_state(self, device, torch_device, tol):\n \"\"\"Test that an exception is raised if the basis state is invalid\"\"\"\n dev = device(wires=4, torch_device=torch_device)\n state = torch.tensor([0, 0, 1, 2])\n\n with pytest.raises(\n ValueError, match=r\"BasisState parameter must consist of 0 or 1 integers\"\n ):\n dev.apply([qml.BasisState(state, wires=[0, 1, 2, 3])])\n\n def test_qubit_state_vector(self, device, torch_device, init_state, tol):\n \"\"\"Test qubit state vector application\"\"\"\n dev = device(wires=1, torch_device=torch_device)\n state = init_state(1, torch_device=torch_device)\n\n dev.apply([qml.QubitStateVector(state, wires=[0])])\n\n res = dev.state\n expected = state\n assert isinstance(res, torch.Tensor)\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_full_subsystem_statevector(self, device, torch_device, mocker):\n \"\"\"Test applying a state vector to the full subsystem\"\"\"\n dev = device(wires=[\"a\", \"b\", \"c\"], torch_device=torch_device)\n state = (\n torch.tensor([1, 0, 0, 0, 1, 0, 1, 1], dtype=torch.complex128, device=torch_device)\n / 2.0\n )\n state_wires = qml.wires.Wires([\"a\", \"b\", \"c\"])\n\n spy = mocker.spy(dev, \"_scatter\")\n dev._apply_state_vector(state=state, device_wires=state_wires)\n\n assert torch.allclose(torch.reshape(dev._state, (-1,)), state)\n spy.assert_not_called()\n\n def test_partial_subsystem_statevector(self, device, torch_device, mocker):\n \"\"\"Test applying a state vector to a subset of wires of the full subsystem\"\"\"\n dev = device(wires=[\"a\", \"b\", \"c\"], torch_device=torch_device)\n state = torch.tensor(\n [1, 0, 1, 0], dtype=torch.complex128, device=torch_device\n ) / torch.tensor(math.sqrt(2.0))\n state_wires = qml.wires.Wires([\"a\", \"c\"])\n\n spy = mocker.spy(dev, \"_scatter\")\n dev._apply_state_vector(state=state, device_wires=state_wires)\n res = torch.reshape(torch.sum(dev._state, axis=(1,)), [-1])\n\n assert torch.allclose(res, state)\n spy.assert_called()\n\n def test_invalid_qubit_state_vector_size(self, device, torch_device):\n \"\"\"Test that an exception is raised if the state\n vector is the wrong size\"\"\"\n dev = device(wires=2, torch_device=torch_device)\n state = torch.tensor([0, 1])\n\n with pytest.raises(ValueError, match=r\"State vector must be of length 2\\*\\*wires\"):\n dev.apply([qml.QubitStateVector(state, wires=[0, 1])])\n\n @pytest.mark.parametrize(\n \"state\", [torch.tensor([0, 12]), torch.tensor([1.0, -1.0], requires_grad=True)]\n )\n def test_invalid_qubit_state_vector_norm(self, device, torch_device, state):\n \"\"\"Test that an exception is raised if the state\n vector is not normalized\"\"\"\n dev = device(wires=2, torch_device=torch_device)\n\n with pytest.raises(ValueError, match=r\"Sum of amplitudes-squared does not equal one\"):\n dev.apply([qml.QubitStateVector(state, wires=[0])])\n\n def test_invalid_state_prep(self, device, torch_device):\n \"\"\"Test that an exception is raised if a state preparation is not the\n first operation in the circuit.\"\"\"\n dev = device(wires=2, torch_device=torch_device)\n state = torch.tensor([0, 12])\n\n with pytest.raises(\n qml.DeviceError,\n match=r\"cannot be used after other Operations have already been applied\",\n ):\n dev.apply([qml.PauliZ(0), qml.QubitStateVector(state, wires=[0])])\n\n @pytest.mark.parametrize(\"op,mat\", single_qubit)\n def test_single_qubit_no_parameters(self, device, torch_device, init_state, op, mat, tol):\n \"\"\"Test non-parametrized single qubit operations\"\"\"\n dev = device(wires=1, torch_device=torch_device)\n state = init_state(1, torch_device=torch_device)\n\n queue = [qml.QubitStateVector(state, wires=[0])]\n queue += [op(wires=0)]\n dev.apply(queue)\n\n res = dev.state\n # assert mat.dtype == state.dtype\n mat = torch.tensor(mat, dtype=torch.complex128, device=torch_device)\n expected = torch.matmul(mat, state)\n assert isinstance(res, torch.Tensor)\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"theta\", [0.5432, -0.232])\n @pytest.mark.parametrize(\"op,func\", single_qubit_param)\n def test_single_qubit_parameters(self, device, torch_device, init_state, op, func, theta, tol):\n \"\"\"Test parametrized single qubit operations\"\"\"\n dev = device(wires=1, torch_device=torch_device)\n state = init_state(1, torch_device=torch_device)\n\n par = torch.tensor(theta, dtype=torch.complex128, device=torch_device)\n queue = [qml.QubitStateVector(state, wires=[0])]\n queue += [op(par, wires=0)]\n dev.apply(queue)\n\n res = dev.state\n op_mat = torch.tensor(func(theta), dtype=torch.complex128, device=torch_device)\n expected = torch.matmul(op_mat, state)\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"theta\", [0.5432, -0.232])\n @pytest.mark.parametrize(\"op,func\", single_qubit_param)\n def test_single_qubit_parameters_inverse(\n self, device, torch_device, init_state, op, func, theta, tol\n ):\n \"\"\"Test parametrized single qubit operations\"\"\"\n dev = device(wires=1, torch_device=torch_device)\n state = init_state(1, torch_device=torch_device)\n\n par = torch.tensor(theta, dtype=torch.complex128, device=torch_device)\n queue = [qml.QubitStateVector(state, wires=[0])]\n queue += [op(par, wires=0).inv()]\n dev.apply(queue)\n\n res = dev.state\n op_mat = torch.tensor(func(theta), dtype=torch.complex128, device=torch_device)\n op_mat = torch.transpose(torch.conj(op_mat), 0, 1)\n expected = torch.matmul(op_mat, state)\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_rotation(self, device, torch_device, init_state, tol):\n \"\"\"Test three axis rotation gate\"\"\"\n dev = device(wires=1, torch_device=torch_device)\n state = init_state(1, torch_device=torch_device)\n\n a = torch.tensor(0.542, dtype=torch.complex128, device=torch_device)\n b = torch.tensor(1.3432, dtype=torch.complex128, device=torch_device)\n c = torch.tensor(-0.654, dtype=torch.complex128, device=torch_device)\n\n queue = [qml.QubitStateVector(state, wires=[0])]\n queue += [qml.Rot(a, b, c, wires=0)]\n dev.apply(queue)\n\n res = dev.state\n op_mat = torch.tensor(Rot3(a, b, c), dtype=torch.complex128, device=torch_device)\n expected = op_mat @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_controlled_rotation(self, device, torch_device, init_state, tol):\n \"\"\"Test three axis controlled-rotation gate\"\"\"\n dev = device(wires=2, torch_device=torch_device)\n state = init_state(2, torch_device=torch_device)\n\n a = torch.tensor(0.542, dtype=torch.complex128, device=torch_device)\n b = torch.tensor(1.3432, dtype=torch.complex128, device=torch_device)\n c = torch.tensor(-0.654, dtype=torch.complex128, device=torch_device)\n\n queue = [qml.QubitStateVector(state, wires=[0, 1])]\n queue += [qml.CRot(a, b, c, wires=[0, 1])]\n dev.apply(queue)\n\n res = dev.state\n op_mat = torch.tensor(CRot3(a, b, c), dtype=torch.complex128, device=torch_device)\n expected = op_mat @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_inverse_operation(self, device, torch_device, init_state, tol):\n \"\"\"Test that the inverse of an operation is correctly applied\"\"\"\n \"\"\"Test three axis rotation gate\"\"\"\n dev = device(wires=1, torch_device=torch_device)\n state = init_state(1, torch_device=torch_device)\n\n a = torch.tensor(0.542, dtype=torch.complex128, device=torch_device)\n b = torch.tensor(1.3432, dtype=torch.complex128, device=torch_device)\n c = torch.tensor(-0.654, dtype=torch.complex128, device=torch_device)\n\n queue = [qml.QubitStateVector(state, wires=[0])]\n queue += [qml.Rot(a, b, c, wires=0).inv()]\n dev.apply(queue)\n\n res = dev.state\n op_mat = torch.tensor(Rot3(a, b, c), dtype=torch.complex128, device=torch_device)\n expected = torch.linalg.inv(op_mat) @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"op,mat\", two_qubit)\n def test_two_qubit_no_parameters(self, device, torch_device, init_state, op, mat, tol):\n \"\"\"Test non-parametrized two qubit operations\"\"\"\n dev = device(wires=2, torch_device=torch_device)\n state = init_state(2, torch_device=torch_device)\n\n queue = [qml.QubitStateVector(state, wires=[0, 1])]\n queue += [op(wires=[0, 1])]\n dev.apply(queue)\n\n res = dev.state\n expected = torch.tensor(mat, dtype=torch.complex128, device=torch_device) @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"mat\", [U, U2])\n def test_qubit_unitary(self, device, torch_device, init_state, mat, tol):\n \"\"\"Test application of arbitrary qubit unitaries\"\"\"\n N = int(math.log(len(mat), 2))\n\n mat = torch.tensor(mat, dtype=torch.complex128, device=torch_device)\n dev = device(wires=N, torch_device=torch_device)\n state = init_state(N, torch_device=torch_device)\n\n queue = [qml.QubitStateVector(state, wires=range(N))]\n queue += [qml.QubitUnitary(mat, wires=range(N))]\n dev.apply(queue)\n\n res = dev.state\n expected = mat @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_diagonal_qubit_unitary(self, device, torch_device, init_state, tol):\n \"\"\"Tests application of a diagonal qubit unitary\"\"\"\n dev = device(wires=1, torch_device=torch_device)\n state = init_state(1, torch_device=torch_device)\n\n diag = torch.tensor(\n [-1.0 + 1j, 1.0 + 1j], requires_grad=True, dtype=torch.complex128, device=torch_device\n ) / math.sqrt(2)\n\n queue = [qml.QubitStateVector(state, wires=0), qml.DiagonalQubitUnitary(diag, wires=0)]\n dev.apply(queue)\n\n res = dev.state\n expected = torch.diag(diag) @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_diagonal_qubit_unitary_inverse(self, device, torch_device, init_state, tol):\n \"\"\"Tests application of a diagonal qubit unitary\"\"\"\n dev = device(wires=1, torch_device=torch_device)\n state = init_state(1, torch_device=torch_device)\n\n diag = torch.tensor(\n [-1.0 + 1j, 1.0 + 1j], requires_grad=True, dtype=torch.complex128, device=torch_device\n ) / math.sqrt(2)\n\n queue = [\n qml.QubitStateVector(state, wires=0),\n qml.DiagonalQubitUnitary(diag, wires=0).inv(),\n ]\n dev.apply(queue)\n\n res = dev.state\n expected = torch.diag(diag).conj() @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"op, mat\", three_qubit)\n def test_three_qubit_no_parameters(self, device, torch_device, init_state, op, mat, tol):\n \"\"\"Test non-parametrized three qubit operations\"\"\"\n dev = device(wires=3, torch_device=torch_device)\n state = init_state(3, torch_device=torch_device)\n\n queue = [qml.QubitStateVector(state, wires=[0, 1, 2])]\n queue += [op(wires=[0, 1, 2])]\n dev.apply(queue)\n\n res = dev.state\n expected = torch.tensor(mat, dtype=torch.complex128, device=torch_device) @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"theta\", [0.5432, -0.232])\n @pytest.mark.parametrize(\"op,func\", two_qubit_param)\n def test_two_qubit_parameters(self, device, torch_device, init_state, op, func, theta, tol):\n \"\"\"Test two qubit parametrized operations\"\"\"\n dev = device(wires=2, torch_device=torch_device)\n state = init_state(2, torch_device=torch_device)\n\n queue = [qml.QubitStateVector(state, wires=[0, 1])]\n queue += [op(theta, wires=[0, 1])]\n dev.apply(queue)\n\n res = dev.state\n op_mat = torch.tensor(func(theta), dtype=torch.complex128, device=torch_device)\n expected = op_mat @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"theta\", [0.5432, -0.232])\n @pytest.mark.parametrize(\"op,func\", four_qubit_param)\n def test_four_qubit_parameters(self, device, torch_device, init_state, op, func, theta, tol):\n \"\"\"Test two qubit parametrized operations\"\"\"\n dev = device(wires=4, torch_device=torch_device)\n state = init_state(4, torch_device=torch_device)\n\n par = torch.tensor(theta, device=torch_device)\n queue = [qml.QubitStateVector(state, wires=[0, 1, 2, 3])]\n queue += [op(par, wires=[0, 1, 2, 3])]\n dev.apply(queue)\n\n res = dev.state\n op_mat = torch.tensor(func(theta), dtype=torch.complex128, device=torch_device)\n expected = op_mat @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_apply_ops_above_8_wires_using_special(self, device, torch_device):\n \"\"\"Test that special apply methods that involve slicing function correctly when using 9\n wires\"\"\"\n dev = device(wires=9, torch_device=torch_device)\n dev._apply_ops = {\"CNOT\": dev._apply_cnot}\n\n queue = [qml.CNOT(wires=[1, 2])]\n dev.apply(queue)\n\n\nTHETA = torch.linspace(0.11, 1, 3, dtype=torch.float64)\nPHI = torch.linspace(0.32, 1, 3, dtype=torch.float64)\nVARPHI = torch.linspace(0.02, 1, 3, dtype=torch.float64)\n\n\[email protected](\"torch_device\", torch_devices)\[email protected](\"theta, phi, varphi\", list(zip(THETA, PHI, VARPHI)))\nclass TestExpval:\n \"\"\"Test expectation values\"\"\"\n\n # test data; each tuple is of the form (GATE, OBSERVABLE, EXPECTED)\n single_wire_expval_test_data = [\n (\n qml.RX,\n qml.Identity,\n lambda t, p, t_device: torch.tensor([1.0, 1.0], dtype=torch.float64, device=t_device),\n ),\n (\n qml.RX,\n qml.PauliZ,\n lambda t, p, t_device: torch.tensor(\n [torch.cos(t), torch.cos(t) * torch.cos(p)], dtype=torch.float64, device=t_device\n ),\n ),\n (\n qml.RY,\n qml.PauliX,\n lambda t, p, t_device: torch.tensor(\n [torch.sin(t) * torch.sin(p), torch.sin(p)], dtype=torch.float64, device=t_device\n ),\n ),\n (\n qml.RX,\n qml.PauliY,\n lambda t, p, t_device: torch.tensor(\n [0, -torch.cos(t) * torch.sin(p)], dtype=torch.float64, device=t_device\n ),\n ),\n (\n qml.RY,\n qml.Hadamard,\n lambda t, p, t_device: torch.tensor(\n [\n torch.sin(t) * torch.sin(p) + torch.cos(t),\n torch.cos(t) * torch.cos(p) + torch.sin(p),\n ],\n dtype=torch.float64,\n device=t_device,\n )\n / math.sqrt(2),\n ),\n ]\n\n @pytest.mark.parametrize(\"gate,obs,expected\", single_wire_expval_test_data)\n def test_single_wire_expectation(\n self, device, torch_device, gate, obs, expected, theta, phi, varphi, tol\n ):\n \"\"\"Test that single qubit gates with single qubit expectation values\"\"\"\n dev = device(wires=2, torch_device=torch_device)\n\n par1 = theta.to(device=torch_device)\n par2 = phi.to(device=torch_device)\n with qml.tape.QuantumTape() as tape:\n queue = [gate(par1, wires=0), gate(par2, wires=1), qml.CNOT(wires=[0, 1])]\n observables = [qml.expval(obs(wires=[i])) for i in range(2)]\n\n res = dev.execute(tape)\n\n expected_res = expected(theta, phi, torch_device)\n assert torch.allclose(res, expected_res, atol=tol, rtol=0)\n\n def test_hermitian_expectation(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Test that arbitrary Hermitian expectation values are correct\"\"\"\n dev = device(wires=2, torch_device=torch_device)\n\n Hermitian_mat = torch.tensor(\n [[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]],\n dtype=torch.complex128,\n device=torch_device,\n )\n\n par1 = theta.to(device=torch_device)\n par2 = phi.to(device=torch_device)\n with qml.tape.QuantumTape() as tape:\n queue = [qml.RY(par1, wires=0), qml.RY(par2, wires=1), qml.CNOT(wires=[0, 1])]\n observables = [qml.expval(qml.Hermitian(Hermitian_mat, wires=[i])) for i in range(2)]\n\n res = dev.execute(tape)\n\n a = Hermitian_mat[0, 0]\n re_b = Hermitian_mat[0, 1].real\n d = Hermitian_mat[1, 1]\n ev1 = (\n (a - d) * torch.cos(theta) + 2 * re_b * torch.sin(theta) * torch.sin(phi) + a + d\n ) / 2\n ev2 = ((a - d) * torch.cos(theta) * torch.cos(phi) + 2 * re_b * torch.sin(phi) + a + d) / 2\n expected = torch.tensor([ev1, ev2], dtype=torch.float64, device=torch_device)\n\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_multi_mode_hermitian_expectation(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Test that arbitrary multi-mode Hermitian expectation values are correct\"\"\"\n Hermit_mat2 = torch.tensor(\n [\n [-6, 2 + 1j, -3, -5 + 2j],\n [2 - 1j, 0, 2 - 1j, -5 + 4j],\n [-3, 2 + 1j, 0, -4 + 3j],\n [-5 - 2j, -5 - 4j, -4 - 3j, -6],\n ],\n dtype=torch.complex128,\n )\n\n dev = device(wires=2, torch_device=torch_device)\n\n par1 = theta.to(device=torch_device)\n par2 = phi.to(device=torch_device)\n with qml.tape.QuantumTape() as tape:\n queue = [qml.RY(par1, wires=0), qml.RY(par2, wires=1), qml.CNOT(wires=[0, 1])]\n observables = [qml.expval(qml.Hermitian(Hermit_mat2, wires=[0, 1]))]\n\n res = dev.execute(tape)\n\n # below is the analytic expectation value for this circuit with arbitrary\n # Hermitian observable Hermit_mat2\n expected = 0.5 * (\n 6 * torch.cos(theta) * torch.sin(phi)\n - torch.sin(theta) * (8 * torch.sin(phi) + 7 * torch.cos(phi) + 3)\n - 2 * torch.sin(phi)\n - 6 * torch.cos(phi)\n - 6\n )\n\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_paulix_pauliy(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Test that a tensor product involving PauliX and PauliY works correctly\"\"\"\n dev = device(wires=3, torch_device=torch_device)\n dev.reset()\n\n obs = qml.PauliX(0) @ qml.PauliY(2)\n\n dev.apply(\n [\n qml.RX(theta, wires=[0]),\n qml.RX(phi, wires=[1]),\n qml.RX(varphi, wires=[2]),\n qml.CNOT(wires=[0, 1]),\n qml.CNOT(wires=[1, 2]),\n ],\n obs.diagonalizing_gates(),\n )\n\n res = dev.expval(obs)\n\n expected = torch.sin(theta) * torch.sin(phi) * torch.sin(varphi)\n\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_pauliz_identity(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Test that a tensor product involving PauliZ and Identity works correctly\"\"\"\n dev = device(wires=3, torch_device=torch_device)\n dev.reset()\n\n obs = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)\n\n dev.apply(\n [\n qml.RX(theta, wires=[0]),\n qml.RX(phi, wires=[1]),\n qml.RX(varphi, wires=[2]),\n qml.CNOT(wires=[0, 1]),\n qml.CNOT(wires=[1, 2]),\n ],\n obs.diagonalizing_gates(),\n )\n\n res = dev.expval(obs)\n\n expected = torch.cos(varphi) * torch.cos(phi)\n\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_pauliz_hadamard(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Test that a tensor product involving PauliZ and PauliY and hadamard works correctly\"\"\"\n dev = device(wires=3, torch_device=torch_device)\n obs = qml.PauliZ(0) @ qml.Hadamard(1) @ qml.PauliY(2)\n\n dev.reset()\n dev.apply(\n [\n qml.RX(theta, wires=[0]),\n qml.RX(phi, wires=[1]),\n qml.RX(varphi, wires=[2]),\n qml.CNOT(wires=[0, 1]),\n qml.CNOT(wires=[1, 2]),\n ],\n obs.diagonalizing_gates(),\n )\n\n res = dev.expval(obs)\n\n expected = -(\n torch.cos(varphi) * torch.sin(phi) + torch.sin(varphi) * torch.cos(theta)\n ) / math.sqrt(2)\n\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_hermitian(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Test that a tensor product involving qml.Hermitian works correctly\"\"\"\n dev = device(wires=3, torch_device=torch_device)\n dev.reset()\n\n Hermit_mat3 = torch.tensor(\n [\n [-6, 2 + 1j, -3, -5 + 2j],\n [2 - 1j, 0, 2 - 1j, -5 + 4j],\n [-3, 2 + 1j, 0, -4 + 3j],\n [-5 - 2j, -5 - 4j, -4 - 3j, -6],\n ],\n dtype=torch.complex128,\n )\n\n obs = qml.PauliZ(0) @ qml.Hermitian(Hermit_mat3, wires=[1, 2])\n\n dev.apply(\n [\n qml.RX(theta, wires=[0]),\n qml.RX(phi, wires=[1]),\n qml.RX(varphi, wires=[2]),\n qml.CNOT(wires=[0, 1]),\n qml.CNOT(wires=[1, 2]),\n ],\n obs.diagonalizing_gates(),\n )\n\n res = dev.expval(obs)\n\n expected = 0.5 * (\n -6 * torch.cos(theta) * (torch.cos(varphi) + 1)\n - 2 * torch.sin(varphi) * (torch.cos(theta) + torch.sin(phi) - 2 * torch.cos(phi))\n + 3 * torch.cos(varphi) * torch.sin(phi)\n + torch.sin(phi)\n )\n\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_hermitian_hermitian(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Test that a tensor product involving two Hermitian matrices works correctly\"\"\"\n dev = device(wires=3, torch_device=torch_device)\n\n A1 = torch.tensor([[1, 2], [2, 4]], dtype=torch.complex128)\n\n A2 = torch.tensor(\n [\n [-6, 2 + 1j, -3, -5 + 2j],\n [2 - 1j, 0, 2 - 1j, -5 + 4j],\n [-3, 2 + 1j, 0, -4 + 3j],\n [-5 - 2j, -5 - 4j, -4 - 3j, -6],\n ],\n dtype=torch.complex128,\n )\n\n obs = qml.Hermitian(A1, wires=[0]) @ qml.Hermitian(A2, wires=[1, 2])\n\n dev.apply(\n [\n qml.RX(theta, wires=[0]),\n qml.RX(phi, wires=[1]),\n qml.RX(varphi, wires=[2]),\n qml.CNOT(wires=[0, 1]),\n qml.CNOT(wires=[1, 2]),\n ],\n obs.diagonalizing_gates(),\n )\n\n res = dev.expval(obs)\n\n expected = 0.25 * (\n -30\n + 4 * torch.cos(phi) * torch.sin(theta)\n + 3\n * torch.cos(varphi)\n * (-10 + 4 * torch.cos(phi) * torch.sin(theta) - 3 * torch.sin(phi))\n - 3 * torch.sin(phi)\n - 2\n * (\n 5\n + torch.cos(phi) * (6 + 4 * torch.sin(theta))\n + (-3 + 8 * torch.sin(theta)) * torch.sin(phi)\n )\n * torch.sin(varphi)\n + torch.cos(theta)\n * (\n 18\n + 5 * torch.sin(phi)\n + 3 * torch.cos(varphi) * (6 + 5 * torch.sin(phi))\n + 2 * (3 + 10 * torch.cos(phi) - 5 * torch.sin(phi)) * torch.sin(varphi)\n )\n )\n\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_hermitian_identity_expectation(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Test that a tensor product involving an Hermitian matrix and the identity works correctly\"\"\"\n dev = device(wires=2, torch_device=torch_device)\n\n A = torch.tensor(\n [[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]],\n dtype=torch.complex128,\n )\n\n obs = qml.Hermitian(A, wires=[0]) @ qml.Identity(wires=[1])\n\n dev.apply(\n [qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1])],\n obs.diagonalizing_gates(),\n )\n\n res = dev.expval(obs)\n\n a = A[0, 0]\n re_b = A[0, 1].real\n d = A[1, 1]\n expected = (\n (a - d) * torch.cos(theta) + 2 * re_b * torch.sin(theta) * torch.sin(phi) + a + d\n ) / 2\n\n assert torch.allclose(res, torch.real(expected), atol=tol, rtol=0)\n\n def test_hermitian_two_wires_identity_expectation(\n self, device, torch_device, theta, phi, varphi, tol\n ):\n \"\"\"Test that a tensor product involving an Hermitian matrix for two wires and the identity works correctly\"\"\"\n dev = device(wires=3, torch_device=torch_device)\n\n A = torch.tensor(\n [[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]],\n dtype=torch.complex128,\n )\n Identity = torch.tensor([[1, 0], [0, 1]])\n H = torch.kron(torch.kron(Identity, Identity), A)\n obs = qml.Hermitian(H, wires=[2, 1, 0])\n\n dev.apply(\n [qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1])],\n obs.diagonalizing_gates(),\n )\n res = dev.expval(obs)\n\n a = A[0, 0]\n re_b = A[0, 1].real\n d = A[1, 1]\n\n expected = (\n (a - d) * torch.cos(theta) + 2 * re_b * torch.sin(theta) * torch.sin(phi) + a + d\n ) / 2\n assert torch.allclose(res, torch.real(expected), atol=tol, rtol=0)\n\n\[email protected](\"torch_device\", torch_devices)\[email protected](\"theta, phi, varphi\", list(zip(THETA, PHI, VARPHI)))\nclass TestVar:\n \"\"\"Tests for the variance\n\n Note: the following tests use DefaultQubitTorch.execute that contains logic\n to transfer tensors created by default on the CPU to the GPU. Therefore, gate\n parameters do not have to explicitly be put on the GPU, it suffices to\n specify torch_device='cuda' when creating the PennyLane device.\n \"\"\"\n\n def test_var(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Tests for variance calculation\"\"\"\n dev = device(wires=1, torch_device=torch_device)\n\n par1 = theta.to(device=torch_device)\n par2 = phi.to(device=torch_device)\n\n # test correct variance for <Z> of a rotated state\n with qml.tape.QuantumTape() as tape:\n queue = [qml.RX(par1, wires=0), qml.RY(par2, wires=0)]\n observables = [qml.var(qml.PauliZ(wires=[0]))]\n\n res = dev.execute(tape)\n expected = 0.25 * (\n 3 - torch.cos(2 * theta) - 2 * torch.cos(theta) ** 2 * torch.cos(2 * phi)\n )\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_var_hermitian(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Tests for variance calculation using an arbitrary Hermitian observable\"\"\"\n dev = device(wires=2, torch_device=torch_device)\n\n theta = theta.to(device=torch_device)\n phi = phi.to(device=torch_device)\n\n # test correct variance for <H> of a rotated state\n H = torch.tensor([[4, -1 + 6j], [-1 - 6j, 2]], dtype=torch.complex128, device=torch_device)\n\n with qml.tape.QuantumTape() as tape:\n queue = [qml.RX(phi, wires=0), qml.RY(theta, wires=0)]\n observables = [qml.var(qml.Hermitian(H, wires=[0]))]\n\n res = dev.execute(tape)\n expected = 0.5 * (\n 2 * torch.sin(2 * theta) * torch.cos(phi) ** 2\n + 24 * torch.sin(phi) * torch.cos(phi) * (torch.sin(theta) - torch.cos(theta))\n + 35 * torch.cos(2 * phi)\n + 39\n )\n expected = expected.to(device=torch_device)\n\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_paulix_pauliy(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Test that a tensor product involving PauliX and PauliY works correctly\"\"\"\n dev = device(wires=3, torch_device=torch_device)\n\n theta = theta.to(device=torch_device)\n phi = phi.to(device=torch_device)\n varphi = varphi.to(device=torch_device)\n\n obs = qml.PauliX(0) @ qml.PauliY(2)\n\n dev.apply(\n [\n qml.RX(theta, wires=[0]),\n qml.RX(phi, wires=[1]),\n qml.RX(varphi, wires=[2]),\n qml.CNOT(wires=[0, 1]),\n qml.CNOT(wires=[1, 2]),\n ],\n obs.diagonalizing_gates(),\n )\n\n res = dev.var(obs)\n\n expected = (\n 8 * torch.sin(theta) ** 2 * torch.cos(2 * varphi) * torch.sin(phi) ** 2\n - torch.cos(2 * (theta - phi))\n - torch.cos(2 * (theta + phi))\n + 2 * torch.cos(2 * theta)\n + 2 * torch.cos(2 * phi)\n + 14\n ) / 16\n\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_pauliz_hadamard(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Test that a tensor product involving PauliZ and PauliY and hadamard works correctly\"\"\"\n dev = device(wires=3, torch_device=torch_device)\n obs = qml.PauliZ(0) @ qml.Hadamard(1) @ qml.PauliY(2)\n\n theta = theta.to(device=torch_device)\n phi = phi.to(device=torch_device)\n varphi = varphi.to(device=torch_device)\n\n dev.reset()\n dev.apply(\n [\n qml.RX(theta, wires=[0]),\n qml.RX(phi, wires=[1]),\n qml.RX(varphi, wires=[2]),\n qml.CNOT(wires=[0, 1]),\n qml.CNOT(wires=[1, 2]),\n ],\n obs.diagonalizing_gates(),\n )\n\n res = dev.var(obs)\n\n expected = (\n 3\n + torch.cos(2 * phi) * torch.cos(varphi) ** 2\n - torch.cos(2 * theta) * torch.sin(varphi) ** 2\n - 2 * torch.cos(theta) * torch.sin(phi) * torch.sin(2 * varphi)\n ) / 4\n\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_hermitian(self, device, torch_device, theta, phi, varphi, tol):\n \"\"\"Test that a tensor product involving qml.Hermitian works correctly\"\"\"\n dev = device(wires=3, torch_device=torch_device)\n\n theta = theta.to(device=torch_device)\n phi = phi.to(device=torch_device)\n varphi = varphi.to(device=torch_device)\n\n A = torch.tensor(\n [\n [-6, 2 + 1j, -3, -5 + 2j],\n [2 - 1j, 0, 2 - 1j, -5 + 4j],\n [-3, 2 + 1j, 0, -4 + 3j],\n [-5 - 2j, -5 - 4j, -4 - 3j, -6],\n ],\n dtype=torch.complex128,\n device=torch_device,\n )\n\n obs = qml.PauliZ(0) @ qml.Hermitian(A, wires=[1, 2])\n\n dev.apply(\n [\n qml.RX(theta, wires=[0]),\n qml.RX(phi, wires=[1]),\n qml.RX(varphi, wires=[2]),\n qml.CNOT(wires=[0, 1]),\n qml.CNOT(wires=[1, 2]),\n ],\n obs.diagonalizing_gates(),\n )\n\n res = dev.var(obs)\n\n expected = (\n 1057\n - torch.cos(2 * phi)\n + 12 * (27 + torch.cos(2 * phi)) * torch.cos(varphi)\n - 2\n * torch.cos(2 * varphi)\n * torch.sin(phi)\n * (16 * torch.cos(phi) + 21 * torch.sin(phi))\n + 16 * torch.sin(2 * phi)\n - 8 * (-17 + torch.cos(2 * phi) + 2 * torch.sin(2 * phi)) * torch.sin(varphi)\n - 8 * torch.cos(2 * theta) * (3 + 3 * torch.cos(varphi) + torch.sin(varphi)) ** 2\n - 24 * torch.cos(phi) * (torch.cos(phi) + 2 * torch.sin(phi)) * torch.sin(2 * varphi)\n - 8\n * torch.cos(theta)\n * (\n 4\n * torch.cos(phi)\n * (\n 4\n + 8 * torch.cos(varphi)\n + torch.cos(2 * varphi)\n - (1 + 6 * torch.cos(varphi)) * torch.sin(varphi)\n )\n + torch.sin(phi)\n * (\n 15\n + 8 * torch.cos(varphi)\n - 11 * torch.cos(2 * varphi)\n + 42 * torch.sin(varphi)\n + 3 * torch.sin(2 * varphi)\n )\n )\n ) / 16\n\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n\n#####################################################\n# QNode-level integration tests\n#####################################################\n\n\[email protected](\"torch_device\", torch_devices)\nclass TestQNodeIntegration:\n \"\"\"Integration tests for default.qubit.torch. This test ensures it integrates\n properly with the PennyLane UI, in particular the new QNode.\"\"\"\n\n def test_defines_correct_capabilities(self, torch_device):\n \"\"\"Test that the device defines the right capabilities\"\"\"\n\n dev = qml.device(\"default.qubit.torch\", wires=1, torch_device=torch_device)\n cap = dev.capabilities()\n capabilities = {\n \"model\": \"qubit\",\n \"supports_finite_shots\": True,\n \"supports_tensor_observables\": True,\n \"returns_probs\": True,\n \"returns_state\": True,\n \"supports_reversible_diff\": False,\n \"supports_inverse_operations\": True,\n \"supports_analytic_computation\": True,\n \"passthru_interface\": \"torch\",\n \"passthru_devices\": {\n \"torch\": \"default.qubit.torch\",\n \"tf\": \"default.qubit.tf\",\n \"autograd\": \"default.qubit.autograd\",\n \"jax\": \"default.qubit.jax\",\n },\n }\n assert cap == capabilities\n\n def test_load_torch_device(self, torch_device):\n \"\"\"Test that the torch device plugin loads correctly\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=2, torch_device=torch_device)\n assert dev.num_wires == 2\n assert dev.shots is None\n assert dev.short_name == \"default.qubit.torch\"\n assert dev.capabilities()[\"passthru_interface\"] == \"torch\"\n assert dev._torch_device == torch_device\n\n def test_qubit_circuit(self, device, torch_device, tol):\n \"\"\"Test that the torch device provides correct\n result for a simple circuit using the old QNode.\"\"\"\n p = torch.tensor(0.543, dtype=torch.float64, device=torch_device)\n\n dev = qml.device(\"default.qubit.torch\", wires=1, torch_device=torch_device)\n\n @qml.qnode(dev, interface=\"torch\")\n def circuit(x):\n qml.RX(x, wires=0)\n return qml.expval(qml.PauliY(0))\n\n expected = -torch.sin(p)\n\n assert circuit.gradient_fn == \"backprop\"\n assert torch.allclose(circuit(p), expected, atol=tol, rtol=0)\n\n def test_correct_state(self, device, torch_device, tol):\n \"\"\"Test that the device state is correct after applying a\n quantum function on the device\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=2, torch_device=torch_device)\n\n state = dev.state\n expected = torch.tensor([1, 0, 0, 0], dtype=torch.complex128, device=torch_device)\n assert torch.allclose(state, expected, atol=tol, rtol=0)\n\n input_param = torch.tensor(math.pi / 4, device=torch_device)\n\n @qml.qnode(dev, interface=\"torch\", diff_method=\"backprop\")\n def circuit():\n qml.Hadamard(wires=0)\n qml.RZ(input_param, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n circuit()\n state = dev.state\n\n amplitude = cmath.exp(-1j * cmath.pi / 8) / cmath.sqrt(2)\n\n expected = torch.tensor(\n [amplitude, 0, amplitude.conjugate(), 0], dtype=torch.complex128, device=torch_device\n )\n assert torch.allclose(state, expected, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"theta\", [0.5432, -0.232])\n @pytest.mark.parametrize(\"op,func\", single_qubit_param)\n def test_one_qubit_param_gates(self, torch_device, theta, op, func, init_state, tol):\n \"\"\"Test the integration of the one-qubit single parameter rotations by passing\n a Torch data structure as a parameter\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=1, torch_device=torch_device)\n state = init_state(1, torch_device=torch_device)\n\n @qml.qnode(dev, interface=\"torch\")\n def circuit(params):\n qml.QubitStateVector(state, wires=[0])\n op(params[0], wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n params = torch.tensor([theta])\n circuit(params)\n res = dev.state\n expected = torch.tensor(func(theta), dtype=torch.complex128, device=torch_device) @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"theta\", [0.5432, 4.213])\n @pytest.mark.parametrize(\"op,func\", two_qubit_param)\n def test_two_qubit_param_gates(self, torch_device, theta, op, func, init_state, tol):\n \"\"\"Test the integration of the two-qubit single parameter rotations by passing\n a Torch data structure as a parameter\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=2, torch_device=torch_device)\n state = init_state(2, torch_device=torch_device)\n\n @qml.qnode(dev, interface=\"torch\")\n def circuit(params):\n qml.QubitStateVector(state, wires=[0, 1])\n op(params[0], wires=[0, 1])\n return qml.expval(qml.PauliZ(0))\n\n # Pass a Torch Variable to the qfunc\n params = torch.tensor([theta], device=torch_device)\n circuit(params)\n res = dev.state\n expected = torch.tensor(func(theta), dtype=torch.complex128, device=torch_device) @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"theta\", [0.5432, 4.213])\n @pytest.mark.parametrize(\"op,func\", four_qubit_param)\n def test_four_qubit_param_gates(self, torch_device, theta, op, func, init_state, tol):\n \"\"\"Test the integration of the four-qubit single parameter rotations by passing\n a Torch data structure as a parameter\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=4, torch_device=torch_device)\n state = init_state(4, torch_device=torch_device)\n\n @qml.qnode(dev, interface=\"torch\")\n def circuit(params):\n qml.QubitStateVector(state, wires=[0, 1, 2, 3])\n op(params[0], wires=[0, 1, 2, 3])\n return qml.expval(qml.PauliZ(0))\n\n # Pass a Torch Variable to the qfunc\n params = torch.tensor([theta], device=torch_device)\n circuit(params)\n res = dev.state\n expected = torch.tensor(func(theta), dtype=torch.complex128, device=torch_device) @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_controlled_rotation_integration(self, torch_device, init_state, tol):\n \"\"\"Test the integration of the two-qubit controlled rotation by passing\n a Torch data structure as a parameter\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=2, torch_device=torch_device)\n\n a = torch.tensor(1.7, device=torch_device)\n b = torch.tensor(1.3432, device=torch_device)\n c = torch.tensor(-0.654, device=torch_device)\n state = init_state(2, torch_device=torch_device)\n\n @qml.qnode(dev, interface=\"torch\")\n def circuit(params):\n qml.QubitStateVector(state, wires=[0, 1])\n qml.CRot(params[0], params[1], params[2], wires=[0, 1])\n return qml.expval(qml.PauliZ(0))\n\n # Pass a Torch Variable to the qfunc\n params = torch.tensor([a, b, c], device=torch_device)\n circuit(params)\n res = dev.state\n expected = torch.tensor(CRot3(a, b, c), dtype=torch.complex128, device=torch_device) @ state\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n\[email protected](\"torch_device\", torch_devices)\nclass TestPassthruIntegration:\n \"\"\"Tests for integration with the PassthruQNode\"\"\"\n\n def test_jacobian_variable_multiply(self, device, torch_device, tol):\n \"\"\"Test that jacobian of a QNode with an attached default.qubit.torch device\n gives the correct result in the case of parameters multiplied by scalars\"\"\"\n x = torch.tensor(0.43316321, dtype=torch.float64, requires_grad=True, device=torch_device)\n y = torch.tensor(0.43316321, dtype=torch.float64, requires_grad=True, device=torch_device)\n z = torch.tensor(0.43316321, dtype=torch.float64, requires_grad=True, device=torch_device)\n\n dev = qml.device(\"default.qubit.torch\", wires=1, torch_device=torch_device)\n\n @qml.qnode(dev, interface=\"torch\", diff_method=\"backprop\")\n def circuit(p):\n qml.RX(3 * p[0], wires=0)\n qml.RY(p[1], wires=0)\n qml.RX(p[2] / 2, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n res = circuit([x, y, z])\n res.backward()\n\n expected = torch.cos(3 * x) * torch.cos(y) * torch.cos(z / 2) - torch.sin(\n 3 * x\n ) * torch.sin(z / 2)\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n x_grad = -3 * (\n torch.sin(3 * x) * torch.cos(y) * torch.cos(z / 2) + torch.cos(3 * x) * torch.sin(z / 2)\n )\n y_grad = -torch.cos(3 * x) * torch.sin(y) * torch.cos(z / 2)\n z_grad = -0.5 * (\n torch.sin(3 * x) * torch.cos(z / 2) + torch.cos(3 * x) * torch.cos(y) * torch.sin(z / 2)\n )\n\n assert torch.allclose(x.grad, x_grad)\n assert torch.allclose(y.grad, y_grad)\n assert torch.allclose(z.grad, z_grad)\n\n def test_jacobian_repeated(self, device, torch_device, tol):\n \"\"\"Test that jacobian of a QNode with an attached default.qubit.torch device\n gives the correct result in the case of repeated parameters\"\"\"\n x = torch.tensor(0.43316321, dtype=torch.float64, requires_grad=True, device=torch_device)\n y = torch.tensor(0.2162158, dtype=torch.float64, requires_grad=True, device=torch_device)\n z = torch.tensor(0.75110998, dtype=torch.float64, requires_grad=True, device=torch_device)\n p = torch.tensor([x, y, z], requires_grad=True, device=torch_device)\n dev = qml.device(\"default.qubit.torch\", wires=1, torch_device=torch_device)\n\n @qml.qnode(dev, interface=\"torch\", diff_method=\"backprop\")\n def circuit(x):\n qml.RX(x[1], wires=0)\n qml.Rot(x[0], x[1], x[2], wires=0)\n return qml.expval(qml.PauliZ(0))\n\n res = circuit(p)\n res.backward()\n\n expected = torch.cos(y) ** 2 - torch.sin(x) * torch.sin(y) ** 2\n\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n expected_grad = torch.tensor(\n [\n -torch.cos(x) * torch.sin(y) ** 2,\n -2 * (torch.sin(x) + 1) * torch.sin(y) * torch.cos(y),\n 0,\n ],\n dtype=torch.float64,\n device=torch_device,\n )\n assert torch.allclose(p.grad, expected_grad, atol=tol, rtol=0)\n\n def test_jacobian_agrees_backprop_parameter_shift(self, device, torch_device, tol):\n \"\"\"Test that jacobian of a QNode with an attached default.qubit.torch device\n gives the correct result with respect to the parameter-shift method\"\"\"\n p = pnp.array([0.43316321, 0.2162158, 0.75110998, 0.94714242], requires_grad=True)\n\n def circuit(x):\n for i in range(0, len(p), 2):\n qml.RX(x[i], wires=0)\n qml.RY(x[i + 1], wires=1)\n for i in range(2):\n qml.CNOT(wires=[i, i + 1])\n return qml.expval(qml.PauliZ(0)) # , qml.var(qml.PauliZ(1))\n\n dev1 = qml.device(\"default.qubit.torch\", wires=3, torch_device=torch_device)\n dev2 = qml.device(\"default.qubit\", wires=3)\n\n circuit1 = qml.QNode(circuit, dev1, diff_method=\"backprop\", interface=\"torch\")\n circuit2 = qml.QNode(circuit, dev2, diff_method=\"parameter-shift\")\n\n p_torch = torch.tensor(p, requires_grad=True, device=torch_device)\n res = circuit1(p_torch)\n res.backward()\n\n assert qml.math.allclose(res, circuit2(p), atol=tol, rtol=0)\n\n p_grad = p_torch.grad\n assert qml.math.allclose(p_grad, qml.jacobian(circuit2)(p), atol=tol, rtol=0)\n\n def test_state_differentiability(self, device, torch_device, tol):\n \"\"\"Test that the device state can be differentiated\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=1, torch_device=torch_device)\n\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"torch\")\n def circuit(a):\n qml.RY(a, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n a = torch.tensor(0.54, requires_grad=True, device=torch_device)\n\n circuit(a)\n res = torch.abs(dev.state) ** 2\n res = res[1] - res[0]\n res.backward()\n\n grad = a.grad\n expected = torch.sin(a)\n assert torch.allclose(grad, expected, atol=tol, rtol=0)\n\n def test_prob_differentiability(self, device, torch_device, tol):\n \"\"\"Test that the device probability can be differentiated\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=2, torch_device=torch_device)\n\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"torch\")\n def circuit(a, b):\n qml.RX(a, wires=0)\n qml.RY(b, wires=1)\n qml.CNOT(wires=[0, 1])\n return qml.probs(wires=[1])\n\n a = torch.tensor(0.54, requires_grad=True, dtype=torch.float64, device=torch_device)\n b = torch.tensor(0.12, requires_grad=True, dtype=torch.float64, device=torch_device)\n\n # get the probability of wire 1\n prob_wire_1 = circuit(a, b)\n # compute Prob(|1>_1) - Prob(|0>_1)\n res = prob_wire_1[1] - prob_wire_1[0]\n res.backward()\n\n expected = -torch.cos(a) * torch.cos(b)\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n assert torch.allclose(a.grad, torch.sin(a) * torch.cos(b), atol=tol, rtol=0)\n assert torch.allclose(b.grad, torch.cos(a) * torch.sin(b), atol=tol, rtol=0)\n\n def test_backprop_gradient(self, device, torch_device, tol):\n \"\"\"Tests that the gradient of the qnode is correct\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=2, torch_device=torch_device)\n\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"torch\")\n def circuit(a, b):\n qml.RX(a, wires=0)\n qml.CRX(b, wires=[0, 1])\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\n\n a = torch.tensor(-0.234, dtype=torch.float64, requires_grad=True, device=torch_device)\n b = torch.tensor(0.654, dtype=torch.float64, requires_grad=True, device=torch_device)\n\n res = circuit(a, b)\n res.backward()\n\n # the analytic result of evaluating circuit(a, b)\n expected_cost = 0.5 * (torch.cos(a) * torch.cos(b) + torch.cos(a) - torch.cos(b) + 1)\n\n assert torch.allclose(res, expected_cost, atol=tol, rtol=0)\n\n assert torch.allclose(a.grad, -0.5 * torch.sin(a) * (torch.cos(b) + 1), atol=tol, rtol=0)\n assert torch.allclose(b.grad, 0.5 * torch.sin(b) * (1 - torch.cos(a)))\n\n @pytest.mark.parametrize(\"x, shift\", [(0.0, 0.0), (0.5, -0.5)])\n def test_hessian_at_zero(self, torch_device, x, shift):\n \"\"\"Tests that the Hessian at vanishing state vector amplitudes\n is correct.\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=1, torch_device=torch_device)\n\n x = torch.tensor(x, requires_grad=True)\n\n @qml.qnode(dev, interface=\"torch\", diff_method=\"backprop\")\n def circuit(x):\n qml.RY(shift, wires=0)\n qml.RY(x, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n grad = torch.autograd.functional.jacobian(circuit, x)\n hess = torch.autograd.functional.hessian(circuit, x)\n\n assert qml.math.isclose(grad, torch.tensor(0.0))\n assert qml.math.isclose(hess, torch.tensor(-1.0))\n\n @pytest.mark.parametrize(\"operation\", [qml.U3, qml.U3.compute_decomposition])\n @pytest.mark.parametrize(\"diff_method\", [\"backprop\", \"parameter-shift\", \"finite-diff\"])\n def test_torch_interface_gradient(self, torch_device, operation, diff_method, tol):\n \"\"\"Tests that the gradient of an arbitrary U3 gate is correct\n using the PyTorch interface, using a variety of differentiation methods.\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=1, torch_device=torch_device)\n\n input_state = torch.tensor(1j * np.array([1, -1]) / math.sqrt(2), device=torch_device)\n\n @qml.qnode(dev, diff_method=diff_method, interface=\"torch\")\n def circuit(x, weights, w):\n \"\"\"In this example, a mixture of scalar\n arguments, array arguments, and keyword arguments are used.\"\"\"\n qml.QubitStateVector(input_state, wires=w)\n operation(x, weights[0], weights[1], wires=w)\n return qml.expval(qml.PauliX(w))\n\n # Check that the correct QNode type is being used.\n if diff_method == \"backprop\":\n assert circuit.gradient_fn == \"backprop\"\n elif diff_method == \"finite-diff\":\n assert circuit.gradient_fn is qml.gradients.finite_diff\n\n def cost(params):\n \"\"\"Perform some classical processing\"\"\"\n return circuit(params[0], params[1:], w=0) ** 2\n\n theta = torch.tensor(0.543, dtype=torch.float64, device=torch_device)\n phi = torch.tensor(-0.234, dtype=torch.float64, device=torch_device)\n lam = torch.tensor(0.654, dtype=torch.float64, device=torch_device)\n\n params = torch.tensor(\n [theta, phi, lam], dtype=torch.float64, requires_grad=True, device=torch_device\n )\n\n res = cost(params)\n res.backward()\n\n # check that the result is correct\n expected_cost = (\n torch.sin(lam) * torch.sin(phi) - torch.cos(theta) * torch.cos(lam) * torch.cos(phi)\n ) ** 2\n assert torch.allclose(res, expected_cost, atol=tol, rtol=0)\n\n # check that the gradient is correct\n expected_grad = (\n torch.tensor(\n [\n torch.sin(theta) * torch.cos(lam) * torch.cos(phi),\n torch.cos(theta) * torch.cos(lam) * torch.sin(phi)\n + torch.sin(lam) * torch.cos(phi),\n torch.cos(theta) * torch.sin(lam) * torch.cos(phi)\n + torch.cos(lam) * torch.sin(phi),\n ],\n device=torch_device,\n )\n * 2\n * (torch.sin(lam) * torch.sin(phi) - torch.cos(theta) * torch.cos(lam) * torch.cos(phi))\n )\n assert torch.allclose(params.grad, expected_grad, atol=tol, rtol=0)\n\n def test_inverse_operation_jacobian_backprop(self, device, torch_device, tol):\n \"\"\"Test that inverse operations work in backprop\n mode\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=1)\n\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"torch\")\n def circuit(param):\n qml.RY(param, wires=0).inv()\n return qml.expval(qml.PauliX(0))\n\n x = torch.tensor(0.3, requires_grad=True, dtype=torch.float64)\n\n res = circuit(x)\n res.backward()\n\n assert torch.allclose(res, -torch.sin(x), atol=tol, rtol=0)\n\n grad = x.grad\n assert torch.allclose(grad, -torch.cos(x), atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"interface\", [\"autograd\", \"torch\"])\n def test_error_backprop_wrong_interface(self, torch_device, interface, tol):\n \"\"\"Tests that an error is raised if diff_method='backprop' but not using\n the torch interface\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=1, torch_device=torch_device)\n\n def circuit(x, w=None):\n qml.RZ(x, wires=w)\n return qml.expval(qml.PauliX(w))\n\n with pytest.raises(Exception) as e:\n assert qml.qnode(dev, diff_method=\"autograd\", interface=interface)(circuit)\n assert (\n str(e.value)\n == \"Differentiation method autograd not recognized. Allowed options are ('best', 'parameter-shift', 'backprop', 'finite-diff', 'device', 'adjoint').\"\n )\n\n\[email protected](\"torch_device\", torch_devices)\nclass TestSamples:\n \"\"\"Tests for sampling outputs\"\"\"\n\n def test_sample_observables(self, torch_device):\n \"\"\"Test that the device allows for sampling from observables.\"\"\"\n shots = 100\n dev = qml.device(\"default.qubit.torch\", wires=2, shots=shots, torch_device=torch_device)\n\n @qml.qnode(dev, diff_method=None, interface=\"torch\")\n def circuit(a):\n qml.RX(a, wires=0)\n return qml.sample(qml.PauliZ(0))\n\n a = torch.tensor(0.54, dtype=torch.float64, device=torch_device)\n res = circuit(a)\n\n assert torch.is_tensor(res)\n assert res.shape == (shots,)\n assert torch.allclose(\n torch.unique(res), torch.tensor([-1, 1], dtype=torch.int64, device=torch_device)\n )\n\n def test_estimating_marginal_probability(self, device, torch_device, tol):\n \"\"\"Test that the probability of a subset of wires is accurately estimated.\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=2, shots=1000, torch_device=torch_device)\n\n @qml.qnode(dev, diff_method=None, interface=\"torch\")\n def circuit():\n qml.PauliX(0)\n return qml.probs(wires=[0])\n\n res = circuit()\n\n assert torch.is_tensor(res)\n\n expected = torch.tensor([0, 1], dtype=torch.float64, device=torch_device)\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_estimating_full_probability(self, device, torch_device, tol):\n \"\"\"Test that the probability of a subset of wires is accurately estimated.\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=2, shots=1000, torch_device=torch_device)\n\n @qml.qnode(dev, diff_method=None, interface=\"torch\")\n def circuit():\n qml.PauliX(0)\n qml.PauliX(1)\n return qml.probs(wires=[0, 1])\n\n res = circuit()\n\n assert torch.is_tensor(res)\n\n expected = torch.tensor([0, 0, 0, 1], dtype=torch.float64, device=torch_device)\n assert torch.allclose(res, expected, atol=tol, rtol=0)\n\n def test_estimating_expectation_values(self, device, torch_device, tol):\n \"\"\"Test that estimating expectation values using a finite number\n of shots produces a numeric tensor\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=3, shots=1000, torch_device=torch_device)\n\n @qml.qnode(dev, diff_method=None, interface=\"torch\")\n def circuit(a, b):\n qml.RX(a, wires=[0])\n qml.RX(b, wires=[1])\n qml.CNOT(wires=[0, 1])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))\n\n a = torch.tensor(0.543, dtype=torch.float64, device=torch_device)\n b = torch.tensor(0.43, dtype=torch.float64, device=torch_device)\n\n res = circuit(a, b)\n assert torch.is_tensor(res)\n\n # We don't check the expected value due to stochasticity, but\n # leave it here for completeness.\n # expected = [torch.cos(a), torch.cos(a) * torch.cos(b)]\n # assert np.allclose(res, expected, atol=tol, rtol=0)\n\n\[email protected](\"torch_device\", torch_devices)\nclass TestHighLevelIntegration:\n \"\"\"Tests for integration with higher level components of PennyLane.\"\"\"\n\n def test_qnode_collection_integration(self, torch_device):\n \"\"\"Test that a PassthruQNode default.qubit.torch works with QNodeCollections.\"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=2, torch_device=torch_device)\n\n obs_list = [qml.PauliX(0) @ qml.PauliY(1), qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliZ(1)]\n qnodes = qml.map(qml.templates.StronglyEntanglingLayers, obs_list, dev, interface=\"torch\")\n\n assert qnodes.interface == \"torch\"\n\n torch.manual_seed(42)\n weights = torch.rand(\n qml.templates.StronglyEntanglingLayers.shape(n_wires=2, n_layers=2),\n requires_grad=True,\n device=torch_device,\n )\n\n def cost(weights):\n return torch.sum(qnodes(weights))\n\n res = cost(weights)\n res.backward()\n\n grad = weights.grad\n\n assert torch.is_tensor(res)\n assert grad.shape == weights.shape\n\n def test_sampling_analytic_mode(self, torch_device):\n \"\"\"Test that when sampling with shots=None, dev uses 1000 shots and\n raises an error.\n \"\"\"\n dev = qml.device(\"default.qubit.torch\", wires=1, shots=None, torch_device=torch_device)\n\n @qml.qnode(dev, interface=\"torch\", diff_method=\"backprop\")\n def circuit():\n return qml.sample(qml.PauliZ(wires=0))\n\n with pytest.raises(\n qml.QuantumFunctionError,\n match=\"The number of shots has to be explicitly set on the device\",\n ):\n res = circuit()\n"
]
| [
[
"numpy.random.seed",
"numpy.array",
"numpy.sqrt"
]
]
|
AshkanTaghipour/ivadomed | [
"84c4e01831265b311c7b053ffdb19fb393fb135d"
]
| [
"ivadomed/testing.py"
]
| [
"import os\nimport copy\nimport nibabel as nib\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader, ConcatDataset\nfrom tqdm import tqdm\n\nfrom ivadomed import metrics as imed_metrics\nfrom ivadomed import utils as imed_utils\nfrom ivadomed import visualize as imed_visualize\nfrom ivadomed import inference as imed_inference\nfrom ivadomed import uncertainty as imed_uncertainty\nfrom ivadomed.loader import utils as imed_loader_utils\nfrom ivadomed.object_detection import utils as imed_obj_detect\nfrom ivadomed.training import get_metadata\nfrom ivadomed.postprocessing import threshold_predictions\n\ncudnn.benchmark = True\n\n\ndef test(model_params, dataset_test, testing_params, log_directory, device, cuda_available=True,\n metric_fns=None, postprocessing=None):\n \"\"\"Main command to test the network.\n\n Args:\n model_params (dict): Model's parameters.\n dataset_test (imed_loader): Testing dataset.\n testing_params (dict): Testing parameters.\n log_directory (str): Folder where predictions are saved.\n device (torch.device): Indicates the CPU or GPU ID.\n cuda_available (bool): If True, CUDA is available.\n metric_fns (list): List of metrics, see :mod:`ivadomed.metrics`.\n postprocessing (dict): Contains postprocessing steps.\n\n Returns:\n dict: result metrics.\n \"\"\"\n # DATA LOADER\n test_loader = DataLoader(dataset_test, batch_size=testing_params[\"batch_size\"],\n shuffle=False, pin_memory=True,\n collate_fn=imed_loader_utils.imed_collate,\n num_workers=0)\n\n # LOAD TRAIN MODEL\n fname_model = os.path.join(log_directory, \"best_model.pt\")\n print('\\nLoading model: {}'.format(fname_model))\n model = torch.load(fname_model, map_location=device)\n if cuda_available:\n model.cuda()\n model.eval()\n\n # CREATE OUTPUT FOLDER\n path_3Dpred = os.path.join(log_directory, 'pred_masks')\n if not os.path.isdir(path_3Dpred):\n os.makedirs(path_3Dpred)\n\n # METRIC MANAGER\n metric_mgr = imed_metrics.MetricManager(metric_fns)\n\n # UNCERTAINTY SETTINGS\n if (testing_params['uncertainty']['epistemic'] or testing_params['uncertainty']['aleatoric']) and \\\n testing_params['uncertainty']['n_it'] > 0:\n n_monteCarlo = testing_params['uncertainty']['n_it'] + 1\n testing_params['uncertainty']['applied'] = True\n print('\\nComputing model uncertainty over {} iterations.'.format(n_monteCarlo - 1))\n else:\n testing_params['uncertainty']['applied'] = False\n n_monteCarlo = 1\n\n for i_monteCarlo in range(n_monteCarlo):\n preds_npy, gt_npy = run_inference(test_loader, model, model_params, testing_params, path_3Dpred,\n cuda_available, i_monteCarlo, postprocessing)\n metric_mgr(preds_npy, gt_npy)\n # If uncertainty computation, don't apply it on last iteration for prediction\n if testing_params['uncertainty']['applied'] and (n_monteCarlo - 2 == i_monteCarlo):\n testing_params['uncertainty']['applied'] = False\n # COMPUTE UNCERTAINTY MAPS\n imed_uncertainty.run_uncertainty(ifolder=path_3Dpred)\n\n metrics_dict = metric_mgr.get_results()\n metric_mgr.reset()\n print(metrics_dict)\n return metrics_dict\n\n\ndef run_inference(test_loader, model, model_params, testing_params, ofolder, cuda_available,\n i_monte_carlo=None, postprocessing=None):\n \"\"\"Run inference on the test data and save results as nibabel files.\n\n Args:\n test_loader (torch DataLoader):\n model (nn.Module):\n model_params (dict):\n testing_params (dict):\n ofolder (str): Folder where predictions are saved.\n cuda_available (bool): If True, CUDA is available.\n i_monte_carlo (int): i_th Monte Carlo iteration.\n postprocessing (dict): Indicates postprocessing steps.\n\n Returns:\n ndarray, ndarray: Prediction, Ground-truth of shape n_sample, n_label, h, w, d.\n \"\"\"\n # INIT STORAGE VARIABLES\n preds_npy_list, gt_npy_list, filenames = [], [], []\n pred_tmp_lst, z_tmp_lst, fname_tmp = [], [], ''\n volume = None\n weight_matrix = None\n\n for i, batch in enumerate(tqdm(test_loader, desc=\"Inference - Iteration \" + str(i_monte_carlo))):\n with torch.no_grad():\n # GET SAMPLES\n # input_samples: list of batch_size tensors, whose size is n_channels X height X width X depth\n # gt_samples: idem with n_labels\n # batch['*_metadata']: list of batch_size lists, whose size is n_channels or n_labels\n if model_params[\"name\"] == \"HeMISUnet\":\n input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch[\"input\"]), cuda_available)\n else:\n input_samples = imed_utils.cuda(batch[\"input\"], cuda_available)\n gt_samples = imed_utils.cuda(batch[\"gt\"], cuda_available, non_blocking=True)\n\n # EPISTEMIC UNCERTAINTY\n if testing_params['uncertainty']['applied'] and testing_params['uncertainty']['epistemic']:\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n\n # RUN MODEL\n if model_params[\"name\"] == \"HeMISUnet\" or \\\n ('film_layers' in model_params and any(model_params['film_layers'])):\n metadata = get_metadata(batch[\"input_metadata\"], model_params)\n preds = model(input_samples, metadata)\n else:\n preds = model(input_samples)\n\n if model_params[\"name\"] == \"HeMISUnet\":\n # Reconstruct image with only one modality\n input_samples = batch['input'][0]\n\n if model_params[\"name\"] == \"Modified3DUNet\" and model_params[\"attention\"] and ofolder:\n imed_visualize.save_feature_map(batch, \"attentionblock2\", os.path.dirname(ofolder), model, input_samples,\n slice_axis=test_loader.dataset.slice_axis)\n\n # PREDS TO CPU\n preds_cpu = preds.cpu()\n\n task = imed_utils.get_task(model_params[\"name\"])\n if task == \"classification\":\n gt_npy_list.append(gt_samples.cpu().numpy())\n preds_npy_list.append(preds_cpu.data.numpy())\n\n # RECONSTRUCT 3D IMAGE\n last_batch_bool = (i == len(test_loader) - 1)\n\n slice_axis = imed_utils.AXIS_DCT[testing_params['slice_axis']]\n\n # LOOP ACROSS SAMPLES\n for smp_idx in range(len(preds_cpu)):\n if \"bounding_box\" in batch['input_metadata'][smp_idx][0]:\n imed_obj_detect.adjust_undo_transforms(testing_params[\"undo_transforms\"].transforms, batch, smp_idx)\n\n if model_params[\"is_2d\"]:\n last_sample_bool = (last_batch_bool and smp_idx == len(preds_cpu) - 1)\n # undo transformations\n preds_idx_undo, metadata_idx = testing_params[\"undo_transforms\"](preds_cpu[smp_idx],\n batch['gt_metadata'][smp_idx],\n data_type='gt')\n # preds_idx_undo is a list n_label arrays\n preds_idx_arr = np.array(preds_idx_undo)\n\n # TODO: gt_filenames should not be a list\n fname_ref = list(filter(None, metadata_idx[0]['gt_filenames']))[0]\n\n # NEW COMPLETE VOLUME\n if pred_tmp_lst and (fname_ref != fname_tmp or last_sample_bool) and task != \"classification\":\n # save the completely processed file as a nifti file\n if ofolder:\n fname_pred = os.path.join(ofolder, fname_tmp.split('/')[-1])\n fname_pred = fname_pred.rsplit(\"_\", 1)[0] + '_pred.nii.gz'\n # If Uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n postprocessing = None\n else:\n fname_pred = None\n output_nii = imed_inference.pred_to_nib(data_lst=pred_tmp_lst,\n z_lst=z_tmp_lst,\n fname_ref=fname_tmp,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='2d',\n bin_thr=-1,\n postprocessing=postprocessing)\n output_data = output_nii.get_fdata().transpose(3, 0, 1, 2)\n preds_npy_list.append(output_data)\n\n gt = get_gt(filenames)\n gt_npy_list.append(gt)\n\n output_nii_shape = output_nii.get_fdata().shape\n if len(output_nii_shape) == 4 and output_nii_shape[-1] > 1 and ofolder:\n imed_visualize.save_color_labels(np.stack(pred_tmp_lst, -1),\n False,\n fname_tmp,\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n imed_utils.AXIS_DCT[testing_params['slice_axis']])\n\n # re-init pred_stack_lst\n pred_tmp_lst, z_tmp_lst = [], []\n\n # add new sample to pred_tmp_lst, of size n_label X h X w ...\n pred_tmp_lst.append(preds_idx_arr)\n\n # TODO: slice_index should be stored in gt_metadata as well\n z_tmp_lst.append(int(batch['input_metadata'][smp_idx][0]['slice_index']))\n fname_tmp = fname_ref\n filenames = metadata_idx[0]['gt_filenames']\n\n else:\n pred_undo, metadata, last_sample_bool, volume, weight_matrix = \\\n imed_inference.volume_reconstruction(batch,\n preds_cpu,\n testing_params['undo_transforms'],\n smp_idx, volume, weight_matrix)\n fname_ref = metadata[0]['gt_filenames'][0]\n # Indicator of last batch\n if last_sample_bool:\n pred_undo = np.array(pred_undo)\n if ofolder:\n fname_pred = os.path.join(ofolder, fname_ref.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n postprocessing = None\n else:\n fname_pred = None\n # Choose only one modality\n output_nii = imed_inference.pred_to_nib(data_lst=[pred_undo],\n z_lst=[],\n fname_ref=fname_ref,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='3d',\n bin_thr=-1,\n postprocessing=postprocessing)\n output_data = output_nii.get_fdata().transpose(3, 0, 1, 2)\n preds_npy_list.append(output_data)\n\n gt = get_gt(metadata[0]['gt_filenames'])\n gt_npy_list.append(gt)\n # Save merged labels with color\n\n if pred_undo.shape[0] > 1 and ofolder:\n imed_visualize.save_color_labels(pred_undo,\n False,\n batch['input_metadata'][smp_idx][0]['input_filenames'],\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n slice_axis)\n\n return preds_npy_list, gt_npy_list\n\n\ndef threshold_analysis(model_path, ds_lst, model_params, testing_params, metric=\"dice\", increment=0.1,\n fname_out=\"thr.png\", cuda_available=True):\n \"\"\"Run a threshold analysis to find the optimal threshold on a sub-dataset.\n\n Args:\n model_path (str): Model path.\n ds_lst (list): List of loaders.\n model_params (dict): Model's parameters.\n testing_params (dict): Testing parameters\n metric (str): Choice between \"dice\" and \"recall_specificity\". If \"recall_specificity\", then a ROC analysis\n is performed.\n increment (float): Increment between tested thresholds.\n fname_out (str): Plot output filename.\n cuda_available (bool): If True, CUDA is available.\n\n Returns:\n float: optimal threshold.\n \"\"\"\n if metric not in [\"dice\", \"recall_specificity\"]:\n raise ValueError('\\nChoice of metric for threshold analysis: dice, recall_specificity.')\n\n # Adjust some testing parameters\n testing_params[\"uncertainty\"][\"applied\"] = False\n\n # Load model\n model = torch.load(model_path)\n # Eval mode\n model.eval()\n\n # List of thresholds\n thr_list = list(np.arange(0.0, 1.0, increment))[1:]\n\n # Init metric manager for each thr\n metric_fns = [imed_metrics.recall_score,\n imed_metrics.dice_score,\n imed_metrics.specificity_score]\n metric_dict = {thr: imed_metrics.MetricManager(metric_fns) for thr in thr_list}\n\n # Load\n loader = DataLoader(ConcatDataset(ds_lst), batch_size=testing_params[\"batch_size\"],\n shuffle=False, pin_memory=True, sampler=None,\n collate_fn=imed_loader_utils.imed_collate,\n num_workers=0)\n\n # Run inference\n preds_npy, gt_npy = run_inference(loader, model, model_params,\n testing_params,\n ofolder=None,\n cuda_available=cuda_available)\n\n print('\\nRunning threshold analysis to find optimal threshold')\n # Make sure the GT is binarized\n gt_npy = [threshold_predictions(gt, thr=0.5) for gt in gt_npy]\n # Move threshold\n for thr in tqdm(thr_list, desc=\"Search\"):\n preds_thr = [threshold_predictions(copy.deepcopy(pred), thr=thr) for pred in preds_npy]\n metric_dict[thr](preds_thr, gt_npy)\n\n # Get results\n tpr_list, fpr_list, dice_list = [], [], []\n for thr in thr_list:\n result_thr = metric_dict[thr].get_results()\n tpr_list.append(result_thr[\"recall_score\"])\n fpr_list.append(1 - result_thr[\"specificity_score\"])\n dice_list.append(result_thr[\"dice_score\"])\n\n # Get optimal threshold\n if metric == \"dice\":\n diff_list = dice_list\n else:\n diff_list = [tpr - fpr for tpr, fpr in zip(tpr_list, fpr_list)]\n\n optimal_idx = np.max(np.where(diff_list == np.max(diff_list)))\n optimal_threshold = thr_list[optimal_idx]\n print('\\tOptimal threshold: {}'.format(optimal_threshold))\n\n # Save plot\n print('\\tSaving plot: {}'.format(fname_out))\n if metric == \"dice\":\n # Run plot\n imed_metrics.plot_dice_thr(thr_list, dice_list, optimal_idx, fname_out)\n else:\n # Add 0 and 1 as extrema\n tpr_list = [0.0] + tpr_list + [1.0]\n fpr_list = [0.0] + fpr_list + [1.0]\n optimal_idx += 1\n # Run plot\n imed_metrics.plot_roc_curve(tpr_list, fpr_list, optimal_idx, fname_out)\n\n return optimal_threshold\n\n\ndef get_gt(filenames):\n \"\"\"Get ground truth data as numpy array.\n \n Args:\n filenames (list): List of ground truth filenames, one per class.\n Returns:\n ndarray: 4D numpy array.\n \"\"\"\n gt_lst = []\n for gt in filenames:\n # For multi-label, if all labels are not in every image\n if gt is not None:\n gt_lst.append(nib.load(gt).get_fdata())\n else:\n gt_lst.append(np.zeros(nib.load(list(filter(None, filenames))[0]).get_fdata().shape))\n return np.array(gt_lst)\n"
]
| [
[
"torch.utils.data.ConcatDataset",
"numpy.max",
"numpy.array",
"torch.no_grad",
"numpy.stack",
"numpy.arange",
"torch.utils.data.DataLoader",
"torch.load"
]
]
|
2014mchidamb/AdversarialChess | [
"90b2d4245d01dcd4065e72eb201da5092d949da9"
]
| [
"train_model.py"
]
| [
"from model import Magikarp\nimport numpy as np\nimport tensorflow as tf\n\nconfig = {}\nconfig['batch_size'] = 64\nconfig['datafile'] = '../Data/training_data.hdf5'\nconfig['p_datafile'] = '../Data/tal_data.hdf5'\nconfig['full_boards_file'] = '../Data/full_boards.pkl'\nconfig['num_epochs'] = 10\nconfig['save_file'] = 'trained_model/trained_genadv.ckpt'\n\nwith tf.Session() as sess:\n\tmagikarp = Magikarp(config, sess)\n\tmagikarp.train()\n"
]
| [
[
"tensorflow.Session"
]
]
|
USDA-VS/vSNP_archive | [
"a08e5db7f0acdf78af947e84181c559d04269d76"
]
| [
"vSNP_version1/functions.py"
]
| [
"import os\nimport sys\nimport shutil\nimport subprocess\nimport gzip\nimport glob\nimport csv\nimport json\nimport time\nimport regex\nimport re\nimport numpy as np\nimport pandas as pd\nimport zipfile\nimport xlsxwriter\nimport xlrd\nimport pysam\nimport vcf\nimport smtplib\nfrom multiprocessing import Pool\nfrom prettytable import PrettyTable\nfrom dask import delayed\nfrom itertools import repeat as itertools_repeat\nfrom numpy import mean\nfrom email.utils import formatdate\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nfrom distutils.dir_util import copy_tree\nfrom datetime import datetime\nfrom concurrent import futures\nfrom collections import OrderedDict\nfrom collections import Counter\nfrom collections import defaultdict\nfrom Bio.SeqIO.QualityIO import FastqGeneralIterator\nfrom Bio import SeqIO\nimport logging\nimport inspect\n\nfrom parameters import Get_Specie_Parameters\n\n# logging.basicConfig(format='%(levelname)s: %(message)s', filemode='w', filename='debug.log', level=logging.DEBUG)\n# logging.getLogger().addHandler(logging.StreamHandler()) #print to console\n\n# def warning_log(ex, inspect_getframeinfo, *args):\n# logging.warning(f'\\nException occured, file: {inspect_getframeinfo.filename}\\nfuction: {inspect.stack()[0][3]}, near line in script {inspect_getframeinfo.lineno} --> {type(ex).__name__, ex.args}\\nAdditional args: {args}\\n\\n')\n\n# def debug_log(ex, inspect_getframeinfo, *args):\n# logging.debug(f'\\nException occured, file: {inspect_getframeinfo.filename}\\nfuction: {inspect.stack()[0][3]}, near line in script {inspect_getframeinfo.lineno} --> {type(ex).__name__, ex.args}\\nAdditional args: {args}\\n\\n')\n\n\ndef run_loop(arg_options):\n\n root_dir = arg_options['root_dir']\n limited_cpu_count = arg_options['limited_cpu_count']\n\n startTime = datetime.now()\n ts = time.time()\n st = datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')\n print(\"Start time: %s\" % st)\n\n list_of_files = glob.glob('*gz')\n\n for afile in list_of_files:\n prefix_name = re.sub('_.*', '', afile)\n prefix_name = re.sub('\\..*', '', prefix_name)\n print(prefix_name)\n if not os.path.exists(prefix_name):\n os.makedirs(prefix_name)\n shutil.move(afile, prefix_name)\n\n # placed at root\n # get file opened and give a header\n summary_file = root_dir + '/stat_alignment_summary_' + st + '.xlsx'\n workbook = xlsxwriter.Workbook(summary_file)\n worksheet = workbook.add_worksheet()\n row = 0\n col = 0\n top_row_header = [\"time_stamp\", \"sample_name\", \"species\", \"reference_sequence_name\", \"R1size\", \"R2size\", \"Q_ave_R1\", \"Q_ave_R2\", \"Q30_R1\", \"Q30_R2\", \"allbam_mapped_reads\", \"genome_coverage\", \"ave_coverage\", \"ave_read_length\", \"unmapped_reads\", \"unmapped_assembled_contigs\", \"good_snp_count\", \"mlst_type\", \"octalcode\", \"sbcode\", \"hexadecimal_code\", \"binarycode\"]\n for header in top_row_header:\n worksheet.write(row, col, header)\n col += 1\n ###\n\n # Cumulative stats\n path_found = False\n if os.path.isdir(\"/project/mycobacteria_brucella/mycobacterium/stats\"): #check bioinfo from server\n path_found = True\n copy_to = \"/project/mycobacteria_brucella/mycobacterium/stats\"\n elif os.path.isdir(\"/Volumes/bioinfo/project/mycobacteria_brucella/mycobacterium/stats\"): #check bioinfo from Mac\n path_found = True\n copy_to = \"/Volumes/bioinfo/project/mycobacteria_brucella/mycobacterium/stats\"\n else:\n copy_to = None\n print(\"Bioinfo not connected\")\n\n if path_found:\n try:\n summary_cumulative_file = copy_to + '/stat_alignment_culmulative_summary' + '.xlsx'\n summary_cumulative_file_temp = copy_to + '/stat_alignment_culmulative_summary-' + st + '-temp.xlsx'\n temp_folder = copy_to + '/temp'\n except OSError as ex:\n text = \"ERROR, Bioinfo unresponsive unable to copy to stats file\"\n msg = MIMEMultipart()\n msg['From'] = \"[email protected]\"\n msg['To'] = \"[email protected]\"\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = \"### No coverage file\"\n msg.attach(MIMEText(text))\n smtp = smtplib.SMTP('10.10.8.12')\n smtp.send_message(msg)\n smtp.quit()\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()), \"Bioinfo unresponsive unable to copy to stats file\")\n ###\n\n directory_list = [item for item in os.listdir() if os.path.isdir(item)]\n total_samples = len(directory_list)\n directory_size = {}\n for folder in directory_list: #run files by size, smallest to largest\n size = sum(os.path.getsize(os.path.join(dirpath,filename)) for dirpath, dirnames, filenames in os.walk(folder) for filename in filenames)\n directory_size[folder] = size\n directory_size = {k: v for k, v in sorted(directory_size.items(), key=lambda x: x[1])}\n directory_list = [*directory_size] #ordered list\n lower_count = 0\n upper_count = 1\n row = 1\n while lower_count < total_samples:\n upper_count = lower_count + limited_cpu_count\n run_list = directory_list[lower_count:upper_count] #create a run list\n for i in run_list:\n directory_list.remove(i)\n total_samples = len(directory_list)\n print(run_list)\n\n print(\"Iterating directories\")\n frames = []\n if arg_options['debug_call']: #run just one sample at a time to debug\n for sample_name in run_list:\n print(\"DEBUGGING, SAMPLES RAN INDIVIDUALLY\")\n stat_summary = read_aligner(sample_name, arg_options)\n\n df_stat_summary = pd.DataFrame.from_dict(stat_summary, orient='index') #convert stat_summary to df\n frames.append(df_stat_summary) #frames to concatenate\n worksheet.write(row, 0, stat_summary.get('time_stamp', 'n/a'))\n worksheet.write(row, 1, stat_summary.get('sample_name', 'n/a'))\n worksheet.write(row, 2, stat_summary.get('species', 'n/a'))\n worksheet.write(row, 3, stat_summary.get('reference_sequence_name', 'n/a'))\n worksheet.write(row, 4, stat_summary.get('R1size', 'n/a'))\n worksheet.write(row, 5, stat_summary.get('R2size', 'n/a'))\n worksheet.write(row, 6, stat_summary.get('Q_ave_R1', 'n/a'))\n worksheet.write(row, 7, stat_summary.get('Q_ave_R2', 'n/a'))\n worksheet.write(row, 8, stat_summary.get('Q30_R1', 'n/a'))\n worksheet.write(row, 9, stat_summary.get('Q30_R2', 'n/a'))\n worksheet.write(row, 10, stat_summary.get('allbam_mapped_reads', 'n/a'))\n worksheet.write(row, 11, stat_summary.get('genome_coverage', 'n/a'))\n worksheet.write(row, 12, stat_summary.get('ave_coverage', 'n/a'))\n worksheet.write(row, 13, stat_summary.get('ave_read_length', 'n/a'))\n worksheet.write(row, 14, stat_summary.get('unmapped_reads', 'n/a'))\n worksheet.write(row, 15, stat_summary.get('unmapped_assembled_contigs', 'n/a'))\n worksheet.write(row, 16, stat_summary.get('good_snp_count', 'n/a'))\n worksheet.write(row, 17, stat_summary.get('mlst_type', 'n/a'))\n worksheet.write(row, 18, stat_summary.get('octalcode', 'n/a'))\n worksheet.write(row, 19, stat_summary.get('sbcode', 'n/a'))\n worksheet.write(row, 20, stat_summary.get('hexadecimal_code', 'n/a'))\n worksheet.write(row, 21, stat_summary.get('binarycode', 'n/a'))\n row += 1\n else: # run all in run_list in parallel\n print(\"SAMPLES RAN IN PARALLEL\")\n # itertools allows additional arguments to pass\n # Need to speed test which why is faster\n with futures.ProcessPoolExecutor(max_workers=limited_cpu_count) as pool:\n for stat_summary in pool.map(read_aligner, run_list, itertools_repeat(arg_options)):\n df_stat_summary = pd.DataFrame.from_dict(stat_summary, orient='index') #convert stat_summary to df\n frames.append(df_stat_summary) #frames to concatenate\n\n worksheet.write(row, 0, stat_summary.get('time_stamp', 'n/a'))\n worksheet.write(row, 1, stat_summary.get('sample_name', 'n/a'))\n worksheet.write(row, 2, stat_summary.get('species', 'n/a'))\n worksheet.write(row, 3, stat_summary.get('reference_sequence_name', 'n/a'))\n worksheet.write(row, 4, stat_summary.get('R1size', 'n/a'))\n worksheet.write(row, 5, stat_summary.get('R2size', 'n/a'))\n worksheet.write(row, 6, stat_summary.get('Q_ave_R1', 'n/a'))\n worksheet.write(row, 7, stat_summary.get('Q_ave_R2', 'n/a'))\n worksheet.write(row, 8, stat_summary.get('Q30_R1', 'n/a'))\n worksheet.write(row, 9, stat_summary.get('Q30_R2', 'n/a'))\n worksheet.write(row, 10, stat_summary.get('allbam_mapped_reads', 'n/a'))\n worksheet.write(row, 11, stat_summary.get('genome_coverage', 'n/a'))\n worksheet.write(row, 12, stat_summary.get('ave_coverage', 'n/a'))\n worksheet.write(row, 13, stat_summary.get('ave_read_length', 'n/a'))\n worksheet.write(row, 14, stat_summary.get('unmapped_reads', 'n/a'))\n worksheet.write(row, 15, stat_summary.get('unmapped_assembled_contigs', 'n/a'))\n worksheet.write(row, 16, stat_summary.get('good_snp_count', 'n/a'))\n worksheet.write(row, 17, stat_summary.get('mlst_type', 'n/a'))\n worksheet.write(row, 18, stat_summary.get('octalcode', 'n/a'))\n worksheet.write(row, 19, stat_summary.get('sbcode', 'n/a'))\n worksheet.write(row, 20, stat_summary.get('hexadecimal_code', 'n/a'))\n worksheet.write(row, 21, stat_summary.get('binarycode', 'n/a'))\n row += 1\n\n if not arg_options['quiet'] and path_found:\n try:\n open_check = open(summary_cumulative_file, 'a') #'a' is very important, 'w' will leave you with an empty file\n open_check.close()\n df_all = pd.read_excel(summary_cumulative_file)\n df_all_trans = df_all.T #indexed on column headers\n # save back the old and remake the working stats file\n shutil.move(summary_cumulative_file, '{}' .format(temp_folder + '/stat_backup' + st + '.xlsx'))\n sorter = list(df_all_trans.index) #list of original column order\n frames.insert(0, df_all_trans) #put as first item in list\n df_concat = pd.concat(frames, axis=1, sort=True) #cat frames\n df_sorted = df_concat.loc[sorter] #sort based on sorter order\n df_sorted.T.to_excel(summary_cumulative_file, index=False) # transpose before writing to excel, numerical index not needed\n except BlockingIOError as ex:\n sorter = list(df_stat_summary.index) #list of original column order\n df_concat = pd.concat(frames, axis=1, sort=True) #cat frames\n df_sorted = df_concat.loc[sorter] #sort based on sorter order\n df_sorted.T.to_excel(summary_cumulative_file_temp, index=False)\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()))\n pass\n except OSError as ex:\n sorter = list(df_stat_summary.index) #list of original column order\n df_concat = pd.concat(frames, axis=1, sort=True) #cat frames\n df_sorted = df_concat.loc[sorter] #sort based on sorter order\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()))\n try:\n df_sorted.T.to_excel(summary_cumulative_file_temp, index=False)\n except OSError as ex:\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()), \"##### UNABLE TO MAKE CONNECTION TO BIOINFO\")\n pass\n pass\n else:\n logging.debug(\"Path to cumulative stat summary file not found\")\n print(\"Path to cumulative stat summary file not found\")\n\n workbook.close()\n\n runtime = (datetime.now() - startTime)\n print(\"\\n\\nruntime: %s: \\n\" % runtime)\n\n if arg_options['email_list']:\n try:\n send_email_step1(arg_options['email_list'], runtime, path_found, summary_file, st)\n except TimeoutError as ex:\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()), \"Unable to send email with current smtp setting\")\n pass\n\n\ndef reference_table():\n\n pretty_table = PrettyTable(['-r option', 'Species', 'NCBI identifier'])\n pretty_table.add_row(['af', 'Mycobacterium_bovis_AF2122/97', 'NC_002945.4'])\n pretty_table.add_row(['h37', 'Mycobacterium tuberculosis H37Rv', 'NC_000962.3'])\n pretty_table.add_row(['ab1', 'Brucella abortus biovar 1 str. 9-941', 'NC_006932.1, NC_006933.1'])\n pretty_table.add_row(['ab3', 'Brucella abortus strain BER', 'NZ_CP007682.1, NZ_CP007683.1'])\n pretty_table.add_row(['suis1', 'Brucella suis 1330', 'NC_017251.1, NC_017250.1'])\n pretty_table.add_row(['suis2', 'Brucella suis ATCC 23445', 'NC_010169.1, NC_010167.1'])\n pretty_table.add_row(['suis3', 'Brucella suis bv. 3 str. 686', 'NZ_CP007719.1, NZ_CP007718.1'])\n pretty_table.add_row(['mel1', 'Brucella melitensis bv. 1 str. 16M', 'NC_003317.1, NC_003318.1'])\n pretty_table.add_row(['mel1b', 'Brucella melitensis BwIM_SOM_36b', 'NZ_CP018508.1, NZ_CP018509.1'])\n pretty_table.add_row(['mel2', 'Brucella melitensis ATCC 23457', 'NC_012441.1, NC_012442.1'])\n pretty_table.add_row(['mel3', 'Brucella melitensis bv. 3 str. Ether', 'NZ_CP007760, NZ_CP007761'])\n pretty_table.add_row(['canis', 'Brucella canis ATCC 23365', 'NC_010103.1, NC_010104.1'])\n pretty_table.add_row(['ceti1', 'Bceti1Cudo', 'Bceti1Cudo inhouse'])\n pretty_table.add_row(['ceti2', 'Brucella ceti TE10759-12', 'NC_022905.1, NC_022906.1'])\n pretty_table.add_row(['ovis', 'Brucella ovis ATCC 25840', 'NC_009505.1, NC_009504.1'])\n pretty_table.add_row(['para-NC_002944', 'Mycobacterium avium subsp. paratuberculosis str. k10', 'NC_002944.2'])\n pretty_table.add_row(['para-CP033688', 'Mycobacterium avium subsp. paratuberculosis strain Telford', 'CP033688.1'])\n pretty_table.add_row(['typhimurium-14028S', 'Salmonella enterica subsp. enterica serovar Typhimurium str. 14028S', 'NC_016856.1, NC_016855.1(plasmid)'])\n pretty_table.add_row(['typhimurium-LT2', 'Salmonella enterica subsp. enterica serovar Typhimurium str. LT2', 'AE006468.2'])\n pretty_table.add_row(['heidelberg-SL476', 'Salmonella enterica subsp. enterica serovar Heidelberg str. SL476', 'NC_011083.1'])\n pretty_table.add_row(['strepequi', 'Streptococcus equi subsp. zooepidemicus ATCC 35246', 'NC_017582'])\n pretty_table.add_row(['blockley', 'Salmonella enterica subsp. enterica serovar Blockley strain 79-1229', 'GCF_002283115.1_ASM228311v1_genomic'])\n pretty_table.add_row(['infantis-FSIS1502169', 'Salmonella enterica subsp. enterica serovar Infantis strain FSIS1502169', 'CP016406.1'])\n pretty_table.add_row(['dublin-ATCC39184', 'Salmonella enterica subsp. enterica serovar Dublin str. ATCC 39184', 'CP019179.1'])\n pretty_table.add_row(['kentucky-SA20030505', 'Salmonella enterica subsp. enterica serovar Kentucky str. SA20030505 chromosome', 'CP022500.1'])\n pretty_table.add_row(['newport-USDA-ARS-USMARC-1925', 'Salmonella enterica subsp. enterica serovar Newport str. USDA-ARS-USMARC-1925 chromosome', 'CP025232.1'])\n pretty_table.add_row(['senftenberg-NCTC10080', 'Salmonella enterica subsp. enterica serovar Senftenberg strain NCTC10080 genome assembly', 'LS483465.1'])\n pretty_table.add_row(['enteritidis-Durban', 'Salmonella enterica subsp. enterica serovar Enteritidis strain Durban', 'CP007507.1'])\n pretty_table.add_row(['montevideo-507440-20', 'Salmonella enterica subsp. enterica serovar Montevideo str. 507440-20', 'CP007530.1'])\n pretty_table.add_row(['te_atcc35865', 'Taylorella equigenitalis ATCC 35865', 'NC_018108.1'])\n pretty_table.add_row(['te_09-0932', 'Taylorella equigenitalis strain 09-0932', 'NZ_CP021201.1'])\n pretty_table.add_row(['te_89-0490', 'Taylorella equigenitalis strain 89-0490', 'NZ_CP021199.1'])\n pretty_table.add_row(['te_92-0972', 'Taylorella equigenitalis strain 92-0972', 'NZ_CP021060.1'])\n pretty_table.add_row(['te_98-0554', 'Taylorella equigenitalis strain 98-0554', 'NZ_CP021246.1'])\n pretty_table.add_row(['te_mce9', 'Taylorella equigenitalis MCE9', 'NC_014914.1'])\n pretty_table.add_row(['flu', 'H7N3', 'segments 1-8'])\n pretty_table.add_row(['newcaste', '18-016505-001-fusion-HN', '18-016505-001-fusion-HN'])\n pretty_table.add_row(['belize', 'Newcastle disease virus isolate Belize (Spanish Lookout)/4224-3/2008', 'KF767466.1'])\n pretty_table.add_row(['gua', 'Newcastle disease virus isolate Guatemala chicken/Guatemala/Gua1-1407/2018', 'Gua1_1407_2018'])\n\n return pretty_table\n\n\ndef fix_vcf(each_vcf, arg_options):\n mal = []\n # Fix common VCF errors\n temp_file = each_vcf + \".temp\"\n write_out = open(temp_file, 'w') #r+ used for reading and writing to the same file\n initial_file_time_stats = os.stat(each_vcf)\n with open(each_vcf, 'r') as file:\n try:\n for line in file:\n if line.rstrip(): # true if not empty line'^$'\n line = line.rstrip() #remove right white space\n line = re.sub('\"AC=', 'AC=', line)\n line = re.sub('\"\"', '\"', line)\n line = re.sub('\"\"', '\"', line)\n line = re.sub('\"\"', '\"', line)\n line = re.sub('\"$', '', line)\n line = re.sub('GQ:PL\\t\"', 'GQ:PL\\t', line)\n line = re.sub('[0-9]+\\tGT\\t.\\/.$', '999\\tGT:AD:DP:GQ:PL\\t1/1:0,80:80:99:2352,239,0', line)\n line = re.sub('^\"', '', line)\n if line.startswith('##') and line.endswith('\"'):\n line = re.sub('\"$', '', line)\n if line.startswith('##'):\n line = line.split('\\t')\n line = ''.join(line[0])\n if not line.startswith('##'):\n line = re.sub('\"', '', line)\n line = line.split('\\t')\n line = \"\\t\".join(line[0:10])\n print(line, file=write_out)\n else:\n print(line, file=write_out)\n except IndexError as ex:\n mal.append(\"##### IndexError: Deleting corrupt VCF file: \" + each_vcf)\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), each_vcf, \"Deleting corrupt VCF file\")\n os.remove(each_vcf)\n except UnicodeDecodeError as ex:\n mal.append(\"##### UnicodeDecodeError: Deleting corrupt VCF file: \" + each_vcf)\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), each_vcf, \"Deleting corrupt VCF file\")\n os.remove(each_vcf)\n\n write_out.close()\n os.rename(temp_file, each_vcf)\n # revert timestamp to original allows elites to properly sort on file modification time\n os.utime(each_vcf, times=(initial_file_time_stats.st_mtime, initial_file_time_stats.st_mtime))\n return mal\n\n\ndef read_aligner(sample_name, arg_options):\n\n os.chdir(arg_options['root_dir'] + \"/\" + sample_name)\n sample_directory = str(os.getcwd())\n pair_check = len(glob.glob('*_R2*fastq.gz'))\n if pair_check > 0:\n R1 = glob.glob('*_R1*fastq.gz')\n R2 = glob.glob('*_R2*fastq.gz')\n paired = True\n else:\n R1 = glob.glob('*fastq.gz')\n R2 = None\n paired = False\n arg_options['paired'] = paired\n\n if len(R1) > 1:\n print(\"#### Check for a duplicate file in {}\" .format(sample_name))\n sys.exit(0)\n\n os.makedirs(\"zips\")\n shutil.move(R1[0], \"zips\")\n arg_options['R1'] = sample_directory + \"/zips/\" + R1[0]\n if paired:\n shutil.move(R2[0], \"zips\")\n arg_options['R2'] = sample_directory + \"/zips/\" + R2[0]\n else:\n arg_options['R2'] = None\n\n read_quality_stats = {}\n print(\"Getting mean for {}\" .format(arg_options['R1']))\n handle = gzip.open(arg_options['R1'], \"rt\")\n mean_quality_list = []\n for rec in SeqIO.parse(handle, \"fastq\"):\n mean_q = get_read_mean(rec)\n mean_quality_list.append(mean_q)\n\n read_quality_stats[\"Q_ave_R1\"] = \"{:.1f}\" .format(mean(mean_quality_list))\n thirty_or_greater_count = sum(i > 29 for i in mean_quality_list)\n read_quality_stats[\"Q30_R1\"] = \"{:.1%}\" .format(thirty_or_greater_count / len(mean_quality_list))\n\n if paired:\n print(\"Getting mean for {}\" .format(arg_options['R2']))\n handle = gzip.open(arg_options['R2'], \"rt\")\n mean_quality_list = []\n for rec in SeqIO.parse(handle, \"fastq\"):\n mean_q = get_read_mean(rec)\n mean_quality_list.append(mean_q)\n\n read_quality_stats[\"Q_ave_R2\"] = \"{:.1f}\" .format(mean(mean_quality_list))\n thirty_or_greater_count = sum(i > 29 for i in mean_quality_list)\n read_quality_stats[\"Q30_R2\"] = \"{:.1%}\" .format(thirty_or_greater_count / len(mean_quality_list))\n arg_options['read_quality_stats'] = read_quality_stats\n\n arg_options['sample_name'] = sample_name\n arg_options = species_selection_step1(arg_options)\n if arg_options['species'] is None:\n arg_options = {**arg_options, **read_quality_stats}\n R1size = sizeof_fmt(os.path.getsize(arg_options['R1']))\n if paired:\n R2size = sizeof_fmt(os.path.getsize(arg_options['R2']))\n arg_options['R1size'] = R1size\n arg_options['R2size'] = R2size\n return arg_options\n try:\n stat_summary = align_reads(arg_options)\n for k, v in stat_summary.items():\n print(\"%s: %s\" % (k, v))\n return(stat_summary)\n except AttributeError as ex:\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), sample_name, \"Unable to return stat_summary\")\n return arg_options\n\n\ndef get_species(arg_options):\n\n #species = corresponding NCBI accession\n species_cross_reference = {}\n species_cross_reference[\"salmonella\"] = [\"016856, 016855\"]\n species_cross_reference[\"bovis\"] = [\"AF2122_NC002945\", \"00879\"]\n species_cross_reference[\"af\"] = [\"NC_002945.4\"]\n species_cross_reference[\"h37\"] = [\"000962\", \"002755\", \"009525\", \"018143\"]\n species_cross_reference[\"para-NC_002944\"] = [\"NC_002944\"]\n species_cross_reference[\"para-CP033688\"] = [\"CP033688\"]\n species_cross_reference[\"ab1\"] = [\"006932\", \"006933\"]\n species_cross_reference[\"ab3\"] = [\"007682\", \"007683\"]\n species_cross_reference[\"canis\"] = [\"010103\", \"010104\"]\n species_cross_reference[\"ceti1\"] = [\"Bceti1Cudo\"]\n species_cross_reference[\"ceti2\"] = [\"022905\", \"022906\"]\n species_cross_reference[\"mel1\"] = [\"003317\", \"003318\"]\n species_cross_reference[\"mel1b\"] = [\"CP018508\", \"CP018509\"]\n species_cross_reference[\"mel2\"] = [\"012441\", \"012442\"]\n species_cross_reference[\"mel3\"] = [\"007760\", \"007761\"]\n species_cross_reference[\"ovis\"] = [\"009504\", \"009505\"]\n species_cross_reference[\"neo\"] = [\"KN046827\"]\n species_cross_reference[\"suis1\"] = [\"017250\", \"017251\"]\n species_cross_reference[\"suis2\"] = [\"NC_010169\", \"NC_010167\"]\n species_cross_reference[\"suis3\"] = [\"007719\", \"007718\"]\n species_cross_reference[\"suis4\"] = [\"B-REF-BS4-40\"]\n species_cross_reference[\"te_09-0932\"] = [\"CP021201\"]\n species_cross_reference[\"te_89-0490\"] = [\"CP021199\"]\n species_cross_reference[\"te_92-0972\"] = [\"CP021060\"]\n species_cross_reference[\"te_98-0554\"] = [\"CP021246\"]\n species_cross_reference[\"te_atcc35865\"] = [\"NC_018108\"]\n species_cross_reference[\"te_mce9\"] = [\"NC_014914\"]\n species_cross_reference[\"gua\"] = [\"Gua1_1407_2018\"]\n vcf_list = glob.glob('*vcf')\n for each_vcf in vcf_list:\n print(each_vcf)\n vcf_reader = vcf.Reader(open(each_vcf, 'r'))\n print(\"single_vcf %s\" % each_vcf)\n for record in vcf_reader:\n header = record.CHROM\n for key, vlist in species_cross_reference.items():\n for li in vlist:\n if li in header:\n return (key)\n\n\ndef species_selection_step1(arg_options):\n all_parameters = Get_Specie_Parameters()\n\n if arg_options['species']:\n species_selection = arg_options['species']\n print(\"Sample will be ran as: {}\" .format(species_selection))\n parameters, genotype_codes = all_parameters.choose(species_selection)\n else:\n best_ref_found = best_reference([arg_options['R1'], arg_options['R2']])\n arg_options['species'] = best_ref_found\n print(\"Sample will be ran as {}\" .format(best_ref_found))\n parameters, genotype_codes = all_parameters.choose(best_ref_found)\n\n if parameters['species'] is None:\n print(\"\\n#### ERROR #####\\nNo specie parameters found for: \\n\\t{} \\n\\t{}\\n\\n\" .format(arg_options['R1'], arg_options['R2']))\n arg_options['reference_sequence_name'] = best_ref_found\n arg_options.update(parameters)\n return arg_options\n elif parameters:\n #shutil.copy2(parameters[\"reference\"], arg_options['root_dir'])\n arg_options.update(parameters)\n return arg_options\n else:\n print(\"### See species_selection_step1 function\")\n arg_options['species'] = None\n arg_options['reference_sequence_name'] = best_ref_found\n arg_options.update(parameters)\n return arg_options\n\n\ndef best_reference(fastq_list):\n\n '''Use oligos to determine species. Most often if the absents of a single oligo from a set specific for either brucella or bovis will confer species type. Some species will the absents of more than one oligo. Oligo findings are translated to binary patterns.'''\n print(\"\\nFinding the best reference\\n\")\n write_out = open(\"best_reference.txt\", 'w')\n '''get the species'''\n oligo_dictionary = {}\n oligo_dictionary[\"01_ab1\"] = \"AATTGTCGGATAGCCTGGCGATAACGACGC\"\n oligo_dictionary[\"02_ab3\"] = \"CACACGCGGGCCGGAACTGCCGCAAATGAC\"\n oligo_dictionary[\"03_ab5\"] = \"GCTGAAGCGGCAGACCGGCAGAACGAATAT\"\n oligo_dictionary[\"04_mel\"] = \"TGTCGCGCGTCAAGCGGCGTGAAATCTCTG\"\n oligo_dictionary[\"05_suis1\"] = \"TGCGTTGCCGTGAAGCTTAATTCGGCTGAT\"\n oligo_dictionary[\"06_suis2\"] = \"GGCAATCATGCGCAGGGCTTTGCATTCGTC\"\n oligo_dictionary[\"07_suis3\"] = \"CAAGGCAGATGCACATAATCCGGCGACCCG\"\n oligo_dictionary[\"08_ceti1\"] = \"GTGAATATAGGGTGAATTGATCTTCAGCCG\"\n oligo_dictionary[\"09_ceti2\"] = \"TTACAAGCAGGCCTATGAGCGCGGCGTGAA\"\n oligo_dictionary[\"10_canis4\"] = \"CTGCTACATAAAGCACCCGGCGACCGAGTT\"\n oligo_dictionary[\"11_canis\"] = \"ATCGTTTTGCGGCATATCGCTGACCACAGC\"\n oligo_dictionary[\"12_ovis\"] = \"CACTCAATCTTCTCTACGGGCGTGGTATCC\"\n oligo_dictionary[\"13_ether2\"] = \"CGAAATCGTGGTGAAGGACGGGACCGAACC\"\n oligo_dictionary[\"14_63B1\"] = \"CCTGTTTAAAAGAATCGTCGGAACCGCTCT\"\n oligo_dictionary[\"15_16M0\"] = \"TCCCGCCGCCATGCCGCCGAAAGTCGCCGT\"\n oligo_dictionary[\"16_mel1b\"] = \"TCTGTCCAAACCCCGTGACCGAACAATAGA\" #added 2018-01-30\n oligo_dictionary[\"17_tb157\"] = \"CTCTTCGTATACCGTTCCGTCGTCACCATGGTCCT\"\n oligo_dictionary[\"18_tb7\"] = \"TCACGCAGCCAACGATATTCGTGTACCGCGACGGT\"\n oligo_dictionary[\"19_tbbov\"] = \"CTGGGCGACCCGGCCGACCTGCACACCGCGCATCA\"\n oligo_dictionary[\"20_tb5\"] = \"CCGTGGTGGCGTATCGGGCCCCTGGATCGCGCCCT\"\n oligo_dictionary[\"21_tb2\"] = \"ATGTCTGCGTAAAGAAGTTCCATGTCCGGGAAGTA\"\n oligo_dictionary[\"22_tb3\"] = \"GAAGACCTTGATGCCGATCTGGGTGTCGATCTTGA\"\n oligo_dictionary[\"23_tb4\"] = \"CGGTGTTGAAGGGTCCCCCGTTCCAGAAGCCGGTG\"\n oligo_dictionary[\"24_tb6\"] = \"ACGGTGATTCGGGTGGTCGACACCGATGGTTCAGA\"\n oligo_dictionary[\"25_para\"] = \"CCTTTCTTGAAGGGTGTTCG\"\n oligo_dictionary[\"26_para_sheep\"] = \"CGTGGTGGCGACGGCGGCGGGCCTGTCTAT\"\n oligo_dictionary[\"27_para_cattle\"] = \"TCTCCTCGGTCGGTGATTCGGGGGCGCGGT\"\n\n brucella_identifications = {}\n brucella_identifications[\"1111111111111111\"] = \"odd\" #Unexpected findings\n brucella_identifications[\"0111111111111111\"] = \"ab1\" #Brucella abortus bv 1, 2 or 4\n brucella_identifications[\"1011111111111111\"] = \"ab3\" #Brucella abortus bv 3\n brucella_identifications[\"1101111111111111\"] = \"ab1\" #Brucella abortus bv 5, 6 or 9\n brucella_identifications[\"1110111111111101\"] = \"mel1\"\n brucella_identifications[\"0000010101101101\"] = \"mel1\"\n brucella_identifications[\"1110111111111100\"] = \"mel1b\" #added 2018-01-30\n brucella_identifications[\"0000010101101100\"] = \"mel1b\" #added 2018-01-30\n brucella_identifications[\"1110111111111011\"] = \"mel2\"\n brucella_identifications[\"0000010101101001\"] = \"mel2\"\n brucella_identifications[\"0100010101101001\"] = \"mel2\"\n brucella_identifications[\"1110011111101011\"] = \"mel2\"\n brucella_identifications[\"1110111111110111\"] = \"mel3\"\n brucella_identifications[\"1110011111100111\"] = \"mel3\"\n brucella_identifications[\"1111011111111111\"] = \"suis1\"\n brucella_identifications[\"1111101111111111\"] = \"suis2\"\n brucella_identifications[\"1111110111111101\"] = \"suis3\"\n brucella_identifications[\"1111111011111111\"] = \"ceti1\"\n brucella_identifications[\"1111111001111111\"] = \"ceti1\"\n brucella_identifications[\"1111111101111111\"] = \"ceti2\"\n brucella_identifications[\"1111111110111101\"] = \"suis4\"\n brucella_identifications[\"1111111110011101\"] = \"canis\"\n brucella_identifications[\"1111111111101111\"] = \"ovis\"\n\n bovis_identifications = {}\n bovis_identifications[\"11101111\"] = \"h37\" #tb1\n bovis_identifications[\"11101101\"] = \"h37\" #tb1\n bovis_identifications[\"01100111\"] = \"h37\" #tb2\n bovis_identifications[\"01101011\"] = \"h37\" #tb3\n bovis_identifications[\"11101011\"] = \"h37\" #tb3\n bovis_identifications[\"01101111\"] = \"h37\" #tb4a\n bovis_identifications[\"01101101\"] = \"h37\" #tb4b\n bovis_identifications[\"11101101\"] = \"h37\" #tb4b\n bovis_identifications[\"01101111\"] = \"h37\" #tb4b\n bovis_identifications[\"11111111\"] = \"h37\" #tb5\n bovis_identifications[\"11001111\"] = \"h37\" #tb6\n bovis_identifications[\"10101110\"] = \"h37\" #tb7\n bovis_identifications[\"11001110\"] = \"af\" #bovis\n bovis_identifications[\"11011110\"] = \"af\" #bovis\n bovis_identifications[\"11001100\"] = \"af\" #bovis\n \n para_identifications = {}\n para_identifications[\"110\"] = \"para-CP033688\"\n para_identifications[\"101\"] = \"para-NC_002944\"\n \n count_summary = {}\n\n with futures.ProcessPoolExecutor() as pool:\n for v, count in pool.map(finding_best_ref, oligo_dictionary.values(), itertools_repeat(fastq_list)):\n for k, value in oligo_dictionary.items():\n if v == value:\n count_summary.update({k: count})\n count_summary = OrderedDict(sorted(count_summary.items()))\n\n count_list = []\n for v in count_summary.values():\n count_list.append(v)\n brucella_sum = sum(count_list[:16])\n bovis_sum = sum(count_list[16:24])\n para_sum = sum(count_list[24:])\n print(\"Best reference Brucella counts:\", file=write_out)\n for i in count_list[:16]:\n print(i, end=',', file=write_out)\n print(\"\\nBest reference TB counts:\", file=write_out)\n for i in count_list[16:24]:\n print(i, end=',', file=write_out)\n\n print(\"\\nBest reference Para counts:\", file=write_out)\n for i in count_list[24:]:\n print(i, end=',', file=write_out)\n\n #Binary dictionary\n binary_dictionary = {}\n for k, v in count_summary.items():\n if v > 1:\n binary_dictionary.update({k: 1})\n else:\n binary_dictionary.update({k: 0})\n binary_dictionary = OrderedDict(sorted(binary_dictionary.items()))\n\n binary_list = []\n for v in binary_dictionary.values():\n binary_list.append(v)\n brucella_binary = binary_list[:16]\n brucella_string = ''.join(str(e) for e in brucella_binary)\n bovis_binary = binary_list[16:24]\n bovis_string = ''.join(str(e) for e in bovis_binary)\n para_binary = binary_list[24:]\n para_string = ''.join(str(e) for e in para_binary)\n\n if brucella_sum > 3:\n if brucella_string in brucella_identifications:\n print(\"Brucella group, species %s\" % brucella_identifications[brucella_string])\n print(\"\\n\\nBrucella group, species %s\" % brucella_identifications[brucella_string], file=write_out)\n return(brucella_identifications[brucella_string]) # return to set parameters\n else:\n print(\"Brucella group, but no match\")\n print(\"\\n\\nBrucella group, but no match\", file=write_out)\n return(\"Brucella group, but no match\")\n elif bovis_sum > 3:\n if bovis_string in bovis_identifications:\n print(\"TB group, species %s\" % bovis_identifications[bovis_string])\n print(\"\\n\\nTB group, species %s\" % bovis_identifications[bovis_string], file=write_out)\n return(bovis_identifications[bovis_string]) # return to set parameters\n else:\n print(\"TB group, but no match\")\n print(\"\\n\\nTB group, but no match\", file=write_out)\n return(\"TB group, but no match\")\n elif para_sum >= 1:\n if para_string in para_identifications:\n print(\"Para group, species %s\" % para_identifications[para_string])\n print(\"\\n\\nPara group, species %s\" % para_identifications[para_string], file=write_out)\n return(para_identifications[para_string]) # return to set parameters\n else:\n print(\"M. paratuberculosis group, but no match\")\n print(\"\\n\\nNo match\", file=write_out)\n return(\"M. paratuberculosis group, but no match\")\n else:\n print(\"Unable to find a best reference species or group\")\n return(\"Unable to find a best reference species or group\")\n\n write_out.close()\n\n\ndef finding_best_ref(v, fastq_list):\n count = 0\n for fastq in fastq_list:\n with gzip.open(fastq, 'rt') as in_handle:\n # all 3, title and seq and qual, were needed\n for title, seq, qual in FastqGeneralIterator(in_handle):\n count += seq.count(v)\n return(v, count)\n\n\ndef align_reads(arg_options):\n paired = arg_options['paired']\n working_directory = os.getcwd()\n print(\"Working on: {}\" .format(working_directory))\n ts = time.time()\n st = datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')\n R1 = arg_options['R1']\n if paired:\n R2 = arg_options['R2']\n if arg_options[\"species\"] is None:\n stat_summary = {}\n R1size = sizeof_fmt(os.path.getsize(R1))\n if paired:\n R2size = sizeof_fmt(os.path.getsize(R2))\n stat_summary[\"R2size\"] = R2size\n stat_summary[\"time_stamp\"] = st\n stat_summary[\"sample_name\"] = arg_options[\"sample_name\"]\n stat_summary[\"species\"] = \"NOT_FOUND\"\n stat_summary[\"reference_sequence_name\"] = \"N/A\"\n stat_summary[\"R1size\"] = R1size\n stat_summary[\"allbam_mapped_reads\"] = \"CHECK SAMPLE *****************************************\"\n stat_summary.update(arg_options['read_quality_stats'])\n return(stat_summary)\n else:\n startTime = datetime.now()\n print(\"\\n\\n*** START ***\\n\")\n print(\"Start time: %s\" % startTime)\n sample_name = arg_options[\"sample_name\"]\n print(\"species: %s\" % arg_options[\"species\"])\n if arg_options[\"species\"] in [\"ab1\", \"ab3\", \"suis1\", \"suis2\", \"suis3\", \"suis4\", \"mel1\", \"mel1b\", \"mel2\", \"mel3\", \"canis\", \"ceti1\", \"ceti2\"]:\n print(\"Brucella\")\n mlst(arg_options)\n elif arg_options[\"species\"] in [\"h37\", \"af\"]: #removed bovis\n print(\"TB\")\n spoligo(arg_options)\n os.chdir(working_directory)\n shutil.copy(arg_options[\"reference\"], working_directory)\n sample_reference = glob.glob(working_directory + '/*fasta')\n\n print(\"reference: %s\" % sample_reference)\n ref = re.sub('\\.fasta', '', os.path.basename(sample_reference[0]))\n if len(sample_reference) != 1:\n print(\"### ERROR reference not available or too many\")\n sys.exit(0)\n sample_reference = sample_reference[0]\n\n print(\"--\")\n print(\"sample name: %s\" % sample_name)\n print(\"sample reference: %s\" % sample_reference)\n print(\"Read 1: %s\" % R1)\n if paired:\n print(\"Read 2: %s\\n\" % R2)\n print(\"working_directory: %s\" % working_directory)\n print(\"--\")\n\n loc_sam = working_directory + \"/\" + sample_name\n os.system(\"samtools faidx {}\" .format(sample_reference))\n os.system(\"picard CreateSequenceDictionary REFERENCE={} OUTPUT={}\" .format(sample_reference, working_directory + \"/\" + ref + \".dict\"))\n os.system(\"bwa index {}\" .format(sample_reference))\n samfile = loc_sam + \".sam\"\n bamfile = loc_sam + \".bam\"\n abyss_out = loc_sam + \"-unmapped_contigs.fasta\"\n unmapsam = loc_sam + \"-unmapped.sam\"\n metrics = loc_sam + \"-metrics.txt\"\n unmapped_read1 = loc_sam + \"-unmapped_R1.fastq\"\n unmapped_read1gz = loc_sam + \"-unmapped_R1.fastq.gz\"\n if paired:\n unmapped_read2 = loc_sam + \"-unmapped_R2.fastq\"\n unmapped_read2gz = loc_sam + \"-unmapped_R2.fastq.gz\"\n sortedbam = loc_sam + \"-sorted.bam\"\n nodupbam = loc_sam + \"-nodup.bam\"\n unfiltered_hapall = loc_sam + \"-unfiltered_hapall.vcf\"\n mapq_fix = loc_sam + \"-mapq_fix_hapall.vcf\"\n hapall = loc_sam + \"-hapall.vcf\"\n zero_coverage_vcf = loc_sam + \"_zc.vcf\"\n\n #########################################################\n print(\"\\n@@@ BWA mem: {}\".format(sample_name))\n if paired:\n os.system(r'bwa mem -M -R \"@RG\\tID:%s\\tSM:%s\\tPL:ILLUMINA\\tPI:250\" -t 16 %s %s %s > %s' % (sample_name, sample_name, sample_reference, R1, R2, samfile))\n else:\n os.system(r'bwa mem -M -R \"@RG\\tID:%s\\tSM:%s\\tPL:ILLUMINA\\tPI:250\" -t 16 %s %s > %s' % (sample_name, sample_name, sample_reference, R1, samfile))\n os.system(\"samtools view -Sb {} -o {}\" .format(samfile, bamfile))\n os.system(\"samtools sort {} -o {}\" .format(bamfile, sortedbam))\n os.system(\"samtools index {}\" .format(sortedbam))\n\n print(\"\\n@@@ Remove Duplicate Reads: {}\" .format(sample_name))\n os.system(\"picard MarkDuplicates INPUT={} OUTPUT={} METRICS_FILE={} ASSUME_SORTED=true REMOVE_DUPLICATES=true\" .format(sortedbam, nodupbam, metrics))\n os.system(\"samtools index {}\" .format(nodupbam))\n\n print(\"\\n@@@ Calling SNPs with FreeBayes Parallel: {}\" .format(sample_name))\n #os.system(\"freebayes -f {} {} > {}\" .format(sample_reference, nodupbam, unfiltered_hapall))\n chrom_ranges = open(\"chrom_ranges.txt\", 'w')\n for record in SeqIO.parse(sample_reference, \"fasta\"):\n chrom = record.id\n total_len = len(record.seq)\n min_number = 0\n step = 100000\n if step < total_len:\n for chunk in range(min_number, total_len, step)[1:]:\n print(\"{}:{}-{}\".format(chrom, min_number, chunk), file=chrom_ranges)\n min_number = chunk\n print(\"{}:{}-{}\".format(chrom, min_number, total_len), file=chrom_ranges)\n\n # for pos in list(range(last_number, total_len, 100000))[1:]:\n # print(\"{}:{}-{}\" .format(chrom, last_number, pos), file=chrom_ranges)\n # last_number = pos\n # print(\"{}:{}-{}\" .format(chrom, pos, total_len), file=chrom_ranges)\n chrom_ranges.close()\n os.system(r'freebayes-parallel chrom_ranges.txt 8 -E -1 -e 1 -u --strict-vcf -f %s %s > %s' % (sample_reference, nodupbam, unfiltered_hapall))\n # \"fix\" MQ notation in VCF to match GATK output\n write_fix = open(mapq_fix, 'w+')\n with open(unfiltered_hapall, 'r') as unfiltered:\n for line in unfiltered:\n line = line.strip()\n new_line = re.sub(r';MQM=', r';MQ=', line)\n new_line = re.sub(r'ID=MQM,', r'ID=MQ,', new_line)\n print(new_line, file=write_fix)\n write_fix.close()\n # remove clearly poor positions\n os.system(r'vcffilter -f \"QUAL > 20\" %s > %s' % (mapq_fix, hapall))\n\n print(\"\\n@@@ Assemble Unmapped Reads: {}\" .format(sample_name))\n os.system(\"samtools view -h -f4 -T {} {} -o {}\".format(sample_reference, nodupbam, unmapsam))\n if paired:\n os.system(\"picard SamToFastq INPUT={} FASTQ={} SECOND_END_FASTQ={}\".format(unmapsam, unmapped_read1, unmapped_read2))\n else:\n os.system(\"picard SamToFastq INPUT={} FASTQ={}\".format(unmapsam, unmapped_read1))\n\n abyss_contig_count = 0\n try:\n if paired:\n os.system(\"ABYSS --out {} --coverage 5 --kmer 64 {} {}\" .format(abyss_out, unmapped_read1, unmapped_read2))\n with open(abyss_out) as f:\n for line in f:\n abyss_contig_count += line.count(\">\")\n else:\n os.system(\"ABYSS --out {} --coverage 5 --kmer 64 {}\" .format(abyss_out, unmapped_read1))\n with open(abyss_out) as f:\n for line in f:\n abyss_contig_count += line.count(\">\")\n except FileNotFoundError as ex:\n abyss_contig_count = 0\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()), sample_name, \"Zero Abyss contigs\")\n\n # Full bam stats\n stat_out = open(\"stat_align.txt\", 'w')\n stat_out.write(os.popen(\"samtools idxstats {} \" .format(sortedbam)).read())\n stat_out.close()\n with open(\"stat_align.txt\") as f:\n first_line = f.readline()\n first_line = first_line.rstrip()\n first_line = re.split(':|\\t', first_line)\n reference_sequence_name = str(first_line[0])\n allbam_mapped_reads = int(first_line[2])\n # Duplicate bam stats\n duplicate_stat_file = \"duplicate_stat_align.txt\"\n duplicate_stat_out = open(duplicate_stat_file, 'w')\n #os.system(\"samtools idxstats {} > {}\" .format(sortedbam, stat_out)) Doesn't work when needing to std out.\n duplicate_stat_out.write(os.popen(\"samtools idxstats {} \" .format(nodupbam)).read())\n duplicate_stat_out.close()\n with open(duplicate_stat_file) as f:\n for line_num, line in enumerate(f):\n if line_num == 1:\n dup_line_two = line\n dup_line_two = dup_line_two.split()\n unmapped_reads = int(dup_line_two[3])\n try:\n print(f\"{unmapped_reads} unmapped reads\")\n except:\n unmapped_reads = \"none_found\"\n logging.debug(\"Zero unmapped reads\")\n print(f\"no unmapped reads\")\n allbam_mapped_reads = \"{:,}\".format(allbam_mapped_reads)\n\n try:\n zero_coverage_vcf, good_snp_count, ave_coverage, genome_coverage = add_zero_coverage(sample_name, sample_reference, nodupbam, hapall, zero_coverage_vcf)\n except FileNotFoundError as ex:\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), sample_name, \"ALIGNMENT ERROR, NO COVERAGE FILE\")\n text = \"ALIGNMENT ERROR, NO COVERAGE FILE \" + sample_name\n msg = MIMEMultipart()\n msg['From'] = \"[email protected]\"\n msg['To'] = \"[email protected]\"\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = \"### No coverage file\"\n msg.attach(MIMEText(text))\n smtp = smtplib.SMTP('10.10.8.12')\n smtp.send_message(msg)\n smtp.quit()\n\n if arg_options[\"gbk_file\"] or arg_options['no_annotation']:\n annotated_vcf = loc_sam + \"-annotated.vcf\"\n gbk_file = arg_options['gbk_file']\n\n print(\"Putting gbk into indexed dataframe...\")\n annotation_dict = {}\n for gbk in gbk_file:\n print(\"gbk file: %s\" % gbk)\n write_out = open('temp.csv', 'w+')\n gbk_dict = SeqIO.to_dict(SeqIO.parse(gbk, \"genbank\"))\n gbk_chrome = list(gbk_dict.keys())[0]\n for key, value in gbk_dict.items():\n for feature in value.features:\n if \"CDS\" in feature.type or \"rRNA\" in feature.type:\n myproduct = None\n mylocus = None\n mygene = None\n try:\n myproduct = feature.qualifiers['product'][0]\n except KeyError:\n pass\n try:\n mylocus = feature.qualifiers['locus_tag'][0]\n except KeyError:\n pass\n try:\n mygene = feature.qualifiers['gene'][0]\n except KeyError:\n pass\n print(key, int(feature.location.start), int(feature.location.end), mylocus, myproduct, mygene, sep='\\t', file=write_out)\n write_out.close()\n df = pd.read_csv('temp.csv', sep='\\t', names=[\"chrom\", \"start\", \"stop\", \"locus\", \"product\", \"gene\"])\n df = df.sort_values(['start', 'gene'], ascending=[True, False])\n df = df.drop_duplicates('start')\n pro = df.reset_index(drop=True)\n pro.index = pd.IntervalIndex.from_arrays(pro['start'], pro['stop'], closed='both')\n annotation_dict[gbk_chrome] = pro\n\n header_out = open('v_header.csv', 'w+')\n with open(zero_coverage_vcf) as fff:\n for line in fff:\n if re.search('^#', line):\n print(line.strip(), file=header_out)\n header_out.close()\n\n vcf_df = pd.read_csv(zero_coverage_vcf, sep='\\t', header=None, names=[\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\", \"Sample\"], comment='#')\n vcf_df['ABS_VALUE'] = vcf_df['CHROM'].map(str) + '-' + vcf_df['POS'].map(str)\n vcf_df = vcf_df.set_index('ABS_VALUE')\n\n annotate_condense_dict = {}\n for gbk_chrome, pro in annotation_dict.items():\n matching_chrom_df = vcf_df[vcf_df['CHROM'] == gbk_chrome]\n for index, row in matching_chrom_df.iterrows():\n pos = row.POS\n try:\n aaa = pro.iloc[pro.index.get_loc(int(pos))][['chrom', 'locus', 'product', 'gene']]\n try:\n chrom, name, locus, tag = aaa.values[0]\n annotate_condense_dict[str(chrom) + \"-\" + str(pos)] = \"{}, {}, {}\".format(name, locus, tag)\n except ValueError:\n # if only one annotation entire chromosome (such with flu) then having [0] fails\n chrom, name, locus, tag = aaa.values\n annotate_condense_dict[str(chrom) + \"-\" + str(pos)] = \"{}, {}, {}\".format(name, locus, tag)\n except KeyError:\n annotate_condense_dict[str(gbk_chrome) + \"-\" + str(pos)] = \"No annotated product\"\n\n annotate_df = pd.DataFrame.from_dict(annotate_condense_dict, orient='index', columns=[\"ID\"])\n annotate_df.index.name = 'ABS_VALUE'\n vcf_df.drop(['ID'], axis=1, inplace=True)\n\n vcf_df = vcf_df.merge(annotate_df, how='left', left_index=True, right_index=True)\n vcf_df = vcf_df[[\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\", \"Sample\"]]\n vcf_df.to_csv('v_annotated_body.csv', sep='\\t', header=False, index=False)\n\n cat_files = ['v_header.csv', 'v_annotated_body.csv']\n with open(annotated_vcf, \"wb\") as outfile:\n for cf in cat_files:\n with open(cf, \"rb\") as infile:\n outfile.write(infile.read())\n try:\n os.remove('temp.csv')\n os.remove('v_header.csv')\n os.remove('v_annotated_body.csv')\n except FileNotFoundError as ex:\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()), sample_name)\n pass\n os.remove(samfile)\n os.remove(bamfile)\n os.remove(unmapsam)\n os.remove(sortedbam)\n os.remove(sortedbam + \".bai\")\n os.remove(unfiltered_hapall)\n os.remove(sample_reference + \".amb\")\n os.remove(sample_reference + \".ann\")\n os.remove(sample_reference + \".bwt\")\n os.remove(sample_reference + \".pac\")\n os.remove(sample_reference + \".sa\")\n os.remove(ref + \".dict\")\n os.remove(duplicate_stat_file)\n os.remove(\"stat_align.txt\")\n\n unmapped = working_directory + \"/unmapped\"\n os.makedirs(unmapped)\n\n newZip = zipfile.ZipFile(unmapped_read1gz, 'w')\n newZip.write(unmapped_read1, compress_type=zipfile.ZIP_DEFLATED)\n newZip.close()\n os.remove(unmapped_read1)\n if paired:\n newZip = zipfile.ZipFile(unmapped_read2gz, 'w')\n newZip.write(unmapped_read2, compress_type=zipfile.ZIP_DEFLATED)\n newZip.close()\n os.remove(unmapped_read2)\n\n try:\n shutil.move(unmapped_read1gz, unmapped)\n if paired:\n shutil.move(unmapped_read2gz, unmapped)\n shutil.move(abyss_out, unmapped)\n except FileNotFoundError as ex:\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()), sample_name)\n pass\n alignment = working_directory + \"/alignment\"\n os.makedirs(alignment)\n movefiles = glob.glob('*-*')\n for i in movefiles:\n shutil.move(i, alignment)\n try:\n shutil.move(sample_reference, alignment)\n shutil.move(sample_reference + \".fai\", alignment)\n except shutil.Error:\n pass\n except FileNotFoundError:\n pass\n except FileExistsError:\n pass\n\n runtime = (datetime.now() - startTime)\n print(\"\\n\\nruntime: %s: \\n\" % runtime)\n ave_coverage = \"{:0.1f}\".format(float(ave_coverage))\n print(\"average_coverage: %s\" % ave_coverage)\n\n R1size = sizeof_fmt(os.path.getsize(R1))\n if paired:\n R2size = sizeof_fmt(os.path.getsize(R2))\n\n try:\n with open(\"mlst/mlst.txt\") as f:\n first_line = f.readline()\n mlst_type = first_line.rstrip()\n first_line = first_line.split()\n mlst_type = first_line[1:]\n mlst_type = '-'.join(mlst_type)\n except FileNotFoundError:\n mlst_type = \"N/A\"\n\n try:\n with open(\"spoligo.txt\") as f:\n first_line = f.readline()\n first_line = first_line.rstrip()\n first_line = first_line.split()\n octalcode = first_line[0]\n sbcode = first_line[1]\n hexcode = first_line[2]\n binarycode = first_line[3]\n except FileNotFoundError:\n octalcode = \"N/A\"\n sbcode = \"N/A\"\n hexcode = \"N/A\"\n binarycode = \"N/A\"\n #Capture program versions for step 1\n try:\n verison_out = open(\"version_capture.txt\", 'w')\n print(os.popen('conda list bwa | grep -v \"^#\"; \\\n conda list abyss | grep -v \"^#\"; \\\n conda list picard | grep -v \"^#\"; \\\n conda list samtools | grep -v \"^#\"; \\\n conda list freebayes | grep -v \"^#\"; \\\n conda list biopython | grep -v \"^#\"').read(), file=verison_out)\n print(\"Dependent source: {}\" .format(arg_options['script_dependents']), file=verison_out)\n verison_out.close()\n except:\n logging.debug(\"failed version capture\")\n pass\n\n sequence_count = 0\n total_length = 0\n with gzip.open(R1, \"rt\") as handle:\n for r in SeqIO.parse(handle, \"fastq\"):\n total_length = total_length + len(r.seq)\n sequence_count = sequence_count + 1\n ave_read_length = total_length / sequence_count\n ave_read_length = \"{:0.1f}\".format(float(ave_read_length))\n\n ts = time.time()\n st = datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')\n\n stat_summary = {}\n stat_summary[\"time_stamp\"] = st\n stat_summary[\"sample_name\"] = sample_name\n stat_summary[\"species\"] = arg_options[\"species\"]\n stat_summary[\"reference_sequence_name\"] = reference_sequence_name\n stat_summary[\"R1size\"] = R1size\n if paired:\n stat_summary[\"R2size\"] = R2size\n else:\n stat_summary[\"R2size\"] = None\n stat_summary[\"allbam_mapped_reads\"] = allbam_mapped_reads\n stat_summary[\"genome_coverage\"] = genome_coverage\n stat_summary[\"ave_coverage\"] = ave_coverage\n stat_summary[\"ave_read_length\"] = ave_read_length\n stat_summary[\"unmapped_reads\"] = unmapped_reads\n stat_summary[\"unmapped_assembled_contigs\"] = abyss_contig_count\n stat_summary[\"good_snp_count\"] = good_snp_count\n stat_summary[\"mlst_type\"] = mlst_type\n stat_summary[\"octalcode\"] = octalcode\n stat_summary[\"sbcode\"] = sbcode\n stat_summary[\"hexadecimal_code\"] = hexcode\n stat_summary[\"binarycode\"] = binarycode\n ###\n # Create a sample stats file in the sample's script1 directory\n summary_file = loc_sam + \"_\" + st + '.xlsx'\n workbook = xlsxwriter.Workbook(summary_file)\n worksheet = workbook.add_worksheet()\n row = 0\n col = 0\n\n top_row_header = [\"time_stamp\", \"sample_name\", \"species\", \"reference_sequence_name\", \"R1size\", \"R2size\", \"Q_ave_R1\", \"Q_ave_R2\", \"Q30_R1\", \"Q30_R2\", \"allbam_mapped_reads\", \"genome_coverage\", \"ave_coverage\", \"ave_read_length\", \"unmapped_reads\", \"unmapped_assembled_contigs\", \"good_snp_count\", \"mlst_type\", \"octalcode\", \"sbcode\", \"hexadecimal_code\", \"binarycode\"]\n for header in top_row_header:\n worksheet.write(row, col, header)\n col += 1\n # worksheet.write(row, col, v)\n try:\n stat_summary.update(arg_options['read_quality_stats'])\n except KeyError as ex:\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()), sample_name)\n pass\n worksheet.write(1, 0, stat_summary.get('time_stamp', 'n/a'))\n worksheet.write(1, 1, stat_summary.get('sample_name', 'n/a'))\n worksheet.write(1, 2, stat_summary.get('species', 'n/a'))\n worksheet.write(1, 3, stat_summary.get('reference_sequence_name', 'n/a'))\n worksheet.write(1, 4, stat_summary.get('R1size', 'n/a'))\n worksheet.write(1, 5, stat_summary.get('R2size', 'n/a'))\n worksheet.write(1, 6, stat_summary.get('Q_ave_R1', 'n/a'))\n worksheet.write(1, 7, stat_summary.get('Q_ave_R2', 'n/a'))\n worksheet.write(1, 8, stat_summary.get('Q30_R1', 'n/a'))\n worksheet.write(1, 9, stat_summary.get('Q30_R2', 'n/a'))\n worksheet.write(1, 10, stat_summary.get('allbam_mapped_reads', 'n/a'))\n worksheet.write(1, 11, stat_summary.get('genome_coverage', 'n/a'))\n worksheet.write(1, 12, stat_summary.get('ave_coverage', 'n/a'))\n worksheet.write(1, 13, stat_summary.get('ave_read_length', 'n/a'))\n worksheet.write(1, 14, stat_summary.get('unmapped_reads', 'n/a'))\n worksheet.write(1, 15, stat_summary.get('unmapped_assembled_contigs', 'n/a'))\n worksheet.write(1, 16, stat_summary.get('good_snp_count', 'n/a'))\n worksheet.write(1, 17, stat_summary.get('mlst_type', 'n/a'))\n worksheet.write(1, 18, stat_summary.get('octalcode', 'n/a'))\n worksheet.write(1, 19, stat_summary.get('sbcode', 'n/a'))\n worksheet.write(1, 20, stat_summary.get('hexadecimal_code', 'n/a'))\n worksheet.write(1, 21, stat_summary.get('binarycode', 'n/a'))\n workbook.close()\n return(stat_summary)\n\n\ndef sizeof_fmt(num, suffix='B'):\n for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, 'Yi', suffix)\n\n\ndef get_annotations(line, in_annotation_as_dict):\n #pos_found = False\n line = line.rstrip()\n if line.startswith(\"#\"): # save headers to file\n return(line)\n elif not line.startswith(\"#\"): # position rows\n #pos_found = False\n split_line = line.split('\\t')\n chrom = split_line[0]\n position = split_line[1] # get position\n #print(\"Getting annotations\")\n for each_key, each_value in in_annotation_as_dict.items():\n pos_found = False\n if chrom == each_key:\n for feature in each_value.features:\n position = int(position)\n #print(position)\n if position in feature and (\"CDS\" in feature.type or \"rRNA\" in feature.type):\n myproduct = \"none list\"\n mylocus = \"none list\"\n mygene = \"none list\"\n for p in feature.qualifiers['product']:\n myproduct = p\n for l in feature.qualifiers['locus_tag']:\n mylocus = l\n if \"gene\" in feature.qualifiers:\n gene = feature.qualifiers['gene']\n for g in gene:\n mygene = g\n annotation_found = myproduct + \", gene: \" + mygene + \", locus_tag: \" + mylocus\n pos_found = True\n if not pos_found:\n annotation_found = \"No annotated product\"\n #print(annotation_found)\n split_line[2] = annotation_found\n annotated_line = \"\\t\".join(split_line)\n return(annotated_line)\n\n\ndef mlst(arg_options):\n\n if arg_options['debug_call']:\n with open(\"mlst-arg_options.json\", 'w') as outfile:\n json.dump(arg_options, outfile)\n\n # with open(\"mlst-arg_options.json\") as infile:\n # arg_options = json.load(infile)\n\n sample_directory = str(os.getcwd())\n R1 = arg_options['R1']\n R2 = arg_options['R2']\n sample_name = arg_options['sample_name']\n\n #https://bmcmicrobiol.biomedcentral.com/articles/10.1186/1471-2180-7-34\n write_ref = open(\"ST1-MLST.fasta\", 'w')\n print(\">ST1-MLST\", file=write_ref)\n print(\"CGTTTCCCCAAGGAAGTGGAGGTTGCAGGCGATACGATCGATGTTGGCTACGGCCCGATCAAGGTTCATGCCGTCCGCAACCCGGCCGAACTGCCGTGGAAGGAAGAAAACGTCGATATCGCCCTTGAATGCACCGGCATTTTCACCTCGCGCGACAAGGCAGCACTTCATCTTGAAGCTGGCGCCAAGCGCGTCATCGTCTCCGCTCCCGCAGACGGTGCCGATCTCACCGTCGTCTATGGTGTCAACAACGACAAGCTGACGAAGGACCATCTGGTCATCTCCAACGCTTCGTGTACCACCAACTGCCTTGCGCCGGTGGCTCAGGTTCTCAACGATACTATCGGTATCGAAAAGGGCTTCATGACCACGATCCACTCCTATACGGGCGACCAGCCGACGCTGGACACCATGCACAAGGATCTCTACCGCGCCCGCGCCGCTGCCCTTTCCATGATCCCGACCTCGACGGGTGCGGCCAAGGCCGTCGGTCTCGTTCTGCCGGAACTGAAAGGCAAGCTCGACGGCGTTGCCATTCGCGTCCCGACCCCAAATGTCTCGGTCGTTGATCTCACCTTCATCGCCAAGCGTGAAACCACCGTTGAAGAAGTCAACAATGCGATCCGCGAAGCCGCCAATGGCCGCCTCAAGGGCATTCTCGGCTATACCGATGAGAAGCTCGTCTCGCACGACTTCAACCACGATTCCCATTCCTCGGTCTTCCACACCGACCAGACCAAGGTTATGGACGGCACCATGGTGCGTATCCTGTCGTGGTACGACAATGAATGGGGCTTCTCCAGCCGCATGAGCGACACCGCCGTCGCTTTGGGCAAGCTGATCTGATAACGGCAACGCCTCTCCTTCACTGGCGAGGCGTTTTCATTTCTTGATAAGGACCGAGAGAAGAAACATGATGTTCCGCACCCTTGACGATGCCAATGTCCAATCCAAGCGCGTGCTGGTCCGTGTTGACCTCAACGTGCCGAAAATCCGCCGTTCTGCTCGCTGGTCTTAACACCCCGGGCGTCACCACCGTGATCGAGCCGGTCATGACGCGCGATCATACGGAAAAGATGCTGCAAGACTTTGGCGCAGACCTGACGGTTGAAACCGATAAGGATGGTGTGCGCCATATCCGTATTGTCGGCCAGGGCAAGCTTACCGGCCAGACCATCGACGTGCCGGGTGATCCCTCGTCAACGGCTTTTCCGCTGGTGGCCGCCCTTCTGGTCGAAGGTTCGGAGGTCACCATCCGCAATGTGCTGATGAACCCGACCCGCACCGGCCTGATCCTGACGTTGCAGGAAATGGGGGCGGATATCGAGATCATCGATCCACGCCTTGCCGGCGGCGAGGATGTCGCCGATCTGCGCGTCAAGGCCTCGAAGCTGAAAGGCGTTGTCGTTCCGCCGGAACGTGCGCCTTCGATGATCGATGAATATCCGGTTCTGGCCATTGCCGCGTCTTTTGCGGAAGGCGAAACCGTGATGGACGGTCTCGATGAACTGCGCGTCAAGGAATCGGATCGTCTGGCGGCCGTTGCGCGCGGCCTTGAAGCCAATGGTGTCGATTGTACCGAAGGCGAGATGTCGCTGACGGTTCGTGGCCGCCCCGGCGGCAAGGGGCTGGGCGGTGGCACGGTTGCAACCCACCTCGACCACCGCATCGCGATGAGTTTCCTCGTCATGGGCCTTGCATCGGAAAAGCCGGTTACGGTGGATGACAGCACCATGATCGCCACCTCTTTCCCGGAATTCATGGGCATGATGGCGGGGCTGGGGGCGAAGATTGCCGAAAGCGGTGCAGAATGAAATCGTTCGTCGTCGCCCCGTTCATTGTCGCCATTGACGGACCGGCCGCCTCGGGCAAGGGAACCCTTGCCCGGCGGATCGCGACACATTACGGGATGCCGCATCTCGATACGGGCCTGACCTATCGCGCGGTCGCCAAGAGCCGCGCTCTGTCATTCTGGCCGTGGCAGGCCCGGTGGACGGCGACGAGATCGACCTCACCAATTGCGACTGGGTCGTGCGTCCTAAAAAGATGATCGCTGATCTGGGCTTTGAAGACGTGACCGTCCTCAATGATTTCGAGGCGCAGGCCCTTGCCGTGGTTTCGCTGGAAGGCCACCATATGGAACAGATCGGCGGCAAACCGGAGGAGGCTGTTGCCACCCGCGTCGTGCTCGGCCCCGGCACGGGCCTTGGCGTGGCAGGTCTGTTTCGCACACGTCATGCATGGGTTCCGGTTCCCGGTGAAGGCGGTCATATCGATATCGGTCCACGCACCGAACGCGACTACCAGATTTTCCCGCATATCGAACGCATCGAAGGGCGTGTCACCGGCGAGCAAATTCTTAGCGGGCGGGGCCTGCGCAACCTCTATCTGGGCATCTGCGCCGCCGACAAGATCACGCCCACCCTTGAGACGCCAGTAGACATTACATCCGCCGGACTGGACGGCAGCAATCCACAAGCCGCAGAAACGCTTGACCTCTTCGCCACCTATCTGGGGCGGCTTGCGGGCGACCTTGCGCTCATTTTCATGGCGCATGGCGGCGTTTATCTTTCGGGTGGCATCCCGGTGCGCATCCTTTCCGCCCTCAAGGCCGGTTCGTTCCGCGCAACCTTCGAGGACAAGGCCCCGCACAAGGCCATCATGCGCGACATACCGGTCCGCGTTATCACATATCAACTGGCGGCCTTAACCGGGCTTTCCGCTTTCGCCCGCACCCCCTCGCGCTTTGAAGTTTCGACCGAGGGCCGCCGCTGGCGCATGCGCCGCTAGAGCATTTCCGAGCCAAAAGTGCGAAGCGGTTCCGTTTCCCAACGAGCCGACCGCGGCTGCGCTTGCCTATGGTCTCGACAAGAGCGAAGGCAAGACCATCGCTGTCTATGACCTTGGCGGCGGTACTTTCGACGTGTCGGTTCTGGAAATCGGCGACGGCGTTTTTGAAGTGAAGTCCACCAATGGCGACACGTTCCTTGGCGGTGAAGACTTCGATATTCGTCTGGTCGAATATCTGGTTGCCGAGTTCAAGAAGGAAAGTGGCATCGACCTGAAGAACGACAAGCTTGCCCTGCAGCGCCTCAAGGAAGCTGCCGAAAAGGCCAAGATCGAACTGTCGTCCTCGCAGCAGACCGAAATCAACCTGCCGTTCATCACGGCTGACCAGACTGGCCCGAAGCATCTGGCGATCAAGCTGTCGCGCGCCAAGTTTGAAAGCCTGGTCGATGATCTCGTGCAGCGCACGGTCGAGCCGTGCAAGGCGGCGCTCAAGGATGCCGGCCTCAAGGCTGGCGAAATTGACGAAGTGGTTCTGGTCGGCGGCATGACCCGCATGCCCAAGATTCAGGAAGTCGTGAAGGCCTTCTTCGGCAAGGAACCGCACAAGGGCGTGAACCCGGATGAAGTCGTGGCCATGGGCGCGGCGATCCAGGGCGGCGTTTTGCAGGGCGACGTCAAGGACGTGCTGCTGCTCGACGTGACCCCGCTTTCGCTCGGCATTGAAACGCTGGGCGGCGTGTTCACCCGCCTGATCGAACGCAACACCACTATCCCGACCAAGAAGTCGCAGACCTTCTCCACGGCTGAGGACAACCAGTCGGCCGTGACGATCCGCGTCTTCCAGGGCGAGCGTGAAATGGCAGCCGATAACAAGCTGCTTGGACAGTTCGACCTCGTTGGCATTCCGCCACGTCCCTGCCCGGAAAGCTTGCCGATTGCCAGGAGCGCGATCCGGCCAAGTCCGAAATCTTCATCGTCGAGGGCGATTCGGCAGGCGGTTCCGCCAAGAGCGGGCGCTCGCGCCAGAATCAGGCCATTCTGCCGCTGCGCGGCAAAATCCTGAACGTGGAACGCGTGCGTTTCGACCGGATGATTTCATCCGATCAGGTGGGCACCCTCATCACGGCGCTTGGCACCTCCATCGGCAAGGATGAAACGCACGGCTTCAACGCCGACAAGCTGCGTTATCACAAGATCATCATCATGACCGACGCCGACGTCGATGGCGCCCATATTCGTACGCTTCTGCTCACCTTCTTCTTCCGGCAGATGCCGGAACTGATCGAACGCGGGCATATCTATATCGCGCAGCCGCCGCTCTATAAGGTGACACGCGGCAAGTCTTCGCAATATATCAAGAACGAAGCCGCCTTTGAGGATTTCCTCATCGAAACCGGCCTTGAAGAAACGACACTGGAACTGGTGACTGGCGAAATGCGCGCCGGGCCGGATTTGCGCTCGGTGGTGGAGGATGCGCGCACGCTGCGTCAGCTTCTGCACGGCCTGCACACCCGCTATGACCGCAGCGTGGTGGAACAGGCGGCAATTGCCGGCCTGCTCAACCCCGATGCCTCAAGGGACAATGCAACGGCACAGCATTCCGCCGATACGGTTGCCAAGCGTCTCGACATGATTTCGGAAGAGACCGAGCGCGGCTGGAGCGGCCATGTGATGGAAGACGGCGGCTATCGCTTCGAGCGTATGGTGCGCGGTGTAAAGGATATCGCCATTCTCGACATGGCCCTGCTCGGCTCGGCCGATGCCCGCCAGGTCGACCGAGATCGAGATGTATTCCCGCCTGATCCATACGGTCGATCATATCGAAGGCCGCCTGCGTGACGGCATGGATGCGTTTGACGGCTTCCTCAGCCATGCATGGGCTGTGACGGTGACAGGCGCGCCGAAGCTGTGGGCAATGCGCTTTCTTGAGGAAAACGAACGCAGCCCGCGCGCATGGTATGGCGGCGCGATCGGCATGATGCATTTCAATGGCGATATGAATACAGGGCTGACGCTGCGCACCATCCGCATCAAGGATGGTGTGGCGGAAATCCGTGCAGGGGCGACGCTTCTGTTCGATTCCAACCCTGACGAGGAAGAAGCCGAGACCGAATTGAAGGCATCGGCCATGATTGCGGCTGTGCGGGACGCACAGAAGAGCAATCAGATCGCGGAAGAAAGTGTGGCGGCAAAGGTGGGTGAGGGGGTTTCGATCCTGCTGGTCGATCACGAGGATTCCTTCGTCCATACGCTTGCCAATTATTTCCGCCAGACGGGCGCCAAGGTTTCCACCGTGCGTTCACCGGTGGCAGAGGAGATATTCGACCGCGTCAATCCCGATCTGGTGGTGTTATCGCCGGGACCGGGCTCGCCGCAGGATTTCGATTGCAAGGCGACCATCGATAAGGCGCGCAAGCGCCAGCTTCCGATTTTTGGCGTCTGCCTCGGCCTTCAGGCACTGGCGGAAGCCTATGGCGGGGCGTTGCGCCAGCTTCGCGTTCCGGTGCATGGCAAGCCTTCACGCATCCGCGTATCAAAGCCGGAGCGCATTTTCTCCGGCTTGCCGGAGGAAGTGACGGTGGGGCGTTATCATTCGATCTTCGCCGATCCTGAACGCCTGCCGGATGATTTTCTCGTCACAGCCGAAACGGAAGACGGGATCATAGCCTGCGGTGGAGGTGGTGATGGTGCCGCCGGGCTCCAGCCTGCCTGCGGATGCGGGGCTTGTCGTGTTGCCCGGCACCAAATCCACGATTGCCGATCTGCTGGCGCTGCGTGAAAACGGCTGGGACCGCGAATTGGTCGCCCATGTGAAGCGGGGCGGGCATGTGCTTGGTATTTGCGGCGGGTTTCAAATGCTTGGACGGCGGATCAGTGACCCGGCGGGTATTGAAGGCAATGTGCGCGATATCGAGGGGCTGGGCCTTCTCGATATCGAGACGATGACGGAGCCGGAAAAAGTGGTTCGCAATGTTGAGGCGGTGTCGCTGCTGCATGATGAGCCGCTGGAGGGCTATGAAATCCACATCGGGCGCACCAGCGGGCCGGATATGGCGCGGCCATTTGCGCGTATCGGCGATCATGATGATGGGGCCGTCTCGCCCGATGGTCGTATCATGGGAACCTATCTCCACGGTATTTTCAGTGCGGATCGTTTCCGCCACCACTTTTTGCGCGCGCTGGGTGTGGAAGGCGGCCAGATGAATTATCGCGAGAGCGTCGAAGAGGCTCTGGGCGAACTGGCTGAAGGGCTGGAAGCCTCGCTGGATATTGATGGCCTGTTTGCGCTGGCATGATTGACGCCGCGAAGCCGAAAGCCTAGTGTCAAACCATGTGACAGGTTTTGCCGGAACGAATCCCCGGCAATACCAAAAGGGAATGCGACGGACGGACCCACGCCGGGCGTCTTTATCGCAGCCGACCCCGCGACTGTAGAGCGGAGAGGGAAGAGGCAAGCCGGGCAACCGGCAGCCACTGGAAATCAGATGCGATAATGCAACATCGCATTTTTGCCATCTTCTCGACAGATTATCTCCACACAATGGGGCATTTCGTGCCGCAATTACCCTCGATATGTCACCCCTGTCAGCGCGGCATGGGCGGTTTACTCCCGATGCTGCCCGCCCGATAAGGGACCGCGCAAAACGTAATTTGTGTAAGGAGAATGCCATGCGCACTCTTAAGTCTCTCGTAATCGTCTCGGCTGCGCTGCTGCCGTTCTCTGCGACCGCTTTTGCTGCCGACGCCATCCAGGAACAGCCTCCGGTTCCGGCTCCGGTTGAAGTAGCTCCCCAGTATAGCTGGGCTGGTGGCTATACCGGTCTTTACCTTGGCTATGGCTGGAACAAGGCCAAGACCAGCACCGTTGGCAGCATCAAGCCTGACGATTGGAAGGCTGGCGCCTTTGCTGGCTGGAACTTCCAGCAGGACCAGATCGTATACGGTGTTGAAGGTGATGCAGGTTATTCCTGGGCCAAGAAGTCCAAGGACGGCCTGGAAGTCAAGCAGGGCTTTGAAGGCTCGCTGCGTGCCCGCGTCGGCTACGACCTGAACCCGGTTATGCCGTACCTCACGGCTGGTATTGCCGGTTCGCAGATCAAGCTTAACAACGGCTTGGACGACGAAAGCAAGTTCCGCGTGGGTTGGACGGCTGGTGCCGGTCTCGAAGCCAAGCTGACGGACAACATCCTCGGCCGCGTTGAGTACCGTTACACCCAGTACGGCAACAAGAACTATGATCTGGCCGGTACGACTGTTCGCAACAAGCTGGACACGCAGGATATCCGCGTCGGCATCGGCTACAAGTTCTAATTATAGCATAATTGGACACGGAAAACCGGACAGGCAACTGTCCGGTTTTTTGTTGTCTGCAAAGGTCGAGAAAGCGCGGCAGAGCAACGGCGGCAGCCTGATTTTCAGGGGAAATGAAGTGGAGGCTTCTGTTGCCAGGTGCCTCCGAACCCCGCCTTAAGGGGCTAACCCTAAGGACTTTAGAGTGGGTTTCCCGCACCGCCATTAGGCAGCGAGAGCATAACCCTGAGCATTGTTGTCATTTGCAACTACTCTGTTGACCCGATAACGGTGGTATCATGCCGAGTAAAAGAGCGATCTTTACACCCTTGTCGATCCTGTTTCGCCCCCGCCACAACACAGCCTGATCGGCAAGCTGTGCTGTGGTGGAGGCGCCGGGTACCGCCCCCGGGTCCAATGGGTTTATTACACCGTCCGTTTATCACCATAGTCGGCTTGCGCCGACAGGACGTATATAGGCGTGGTTTTTACCGATTGGAAGGGGGCTTGTGCGTTTTCGCGCAAGACCGACAGAGGTGGTGCGGCCCTTCCGTTCATTTTCCATTGACAGCTTCCGCGTGCTGGTCAATCCTCACAATATATCGGGATCGGCCTTGAAGAGGCTTGGCGCAGCCGGGGCGGAAACCATGGCTGAAACGGGGACGATATGCCCCAATCGAAGGAGAGTGGATATATGAGTGAATATCTCGCGGATGTCCGTCGCTATGATGCTGCCGCCGATGAGGCCGTTGTCGAGAAAATCGTCAAGCATCTTGGCATTGCGCTTCGCAATCGCGATTCCTCGCTCGTTTCGGCAAGC\", file=write_ref)\n write_ref.close()\n\n directory = str(os.getcwd())\n print(directory)\n sample_reference_mlst_location = directory + \"/ST1-MLST.fasta\"\n sample_name_mlst = sample_name + \"-mlst\"\n print(\"mlst reference: %s\" % sample_reference_mlst_location)\n ref = re.sub('\\.fasta', '', os.path.basename(sample_reference_mlst_location))\n print(ref)\n\n loc_sam_mlst = directory + \"/\" + sample_name_mlst\n print(\"\\n--\")\n print(\"sample name mlst: %s\" % sample_name_mlst)\n print(\"sample reference mlst: %s\" % sample_reference_mlst_location)\n print(\"ref, no ext: %s \" % ref)\n print(\"Read 1: %s\" % R1)\n print(\"Read 2: %s\\n\" % R2)\n print(\"directory: %s\" % directory)\n print(\"loc_sam_mlst: %s\" % loc_sam_mlst)\n print(\"--\\n\")\n\n os.system(\"samtools faidx {}\" .format(sample_reference_mlst_location))\n os.system(\"picard CreateSequenceDictionary REFERENCE={} OUTPUT={}\" .format(sample_reference_mlst_location, directory + \"/\" + ref + \".dict\"))\n os.system(\"bwa index {}\" .format(sample_reference_mlst_location))\n print(\"\\n@@@ BWA mem\")\n samfile_mlst = loc_sam_mlst + \".sam\"\n os.system(\"bwa mem -M -t 16 {} {} {} > {}\" .format(sample_reference_mlst_location, R1, R2, samfile_mlst))\n print(\"\\nAdd read group and out all BAM\")\n all_bam_mlst = loc_sam_mlst + \"-all.bam\"\n os.system(\"picard AddOrReplaceReadGroups INPUT={} OUTPUT={} RGLB=lib1 RGPU=unit1 RGSM={} RGPL=illumina\" .format(samfile_mlst, all_bam_mlst, sample_name_mlst))\n\n print(\"\\n@@@ Samtools mapped\")\n mapbam = loc_sam_mlst + \"-unmapped.bam\"\n os.system(\"samtools view -h -F4 -b -T {} {} -o {}\" .format(sample_reference_mlst_location, all_bam_mlst, mapbam))\n\n print(\"\\n@@@ Sort BAM\")\n sortedbam = loc_sam_mlst + \"-sorted.bam\"\n os.system(\"samtools sort {} -o {}\" .format(mapbam, sortedbam))\n\n print(\"\\n@@@ Index BAM\")\n os.system(\"samtools index {}\" .format(sortedbam))\n\n print(\"\\n@@@ Calling SNPs with freebayes\")\n unfiltered_vcf_mlst = directory + \"/\" + sample_name + \"-unfiltered_mlst\" + \".vcf\"\n mapq_fix = loc_sam_mlst + \"-mapq_fix_mlst.vcf\"\n vcf_mlst = directory + \"/\" + sample_name + \"_mlst\" + \".vcf\"\n\n os.system(\"freebayes -E -1 -f {} {} > {}\" .format(sample_reference_mlst_location, sortedbam, unfiltered_vcf_mlst))\n # \"fix\" MQ notation in VCF to match GATK output\n write_fix = open(mapq_fix, 'w+')\n with open(unfiltered_vcf_mlst, 'r') as unfiltered:\n for line in unfiltered:\n line = line.strip()\n new_line = re.sub(r';MQM=', r';MQ=', line)\n print(new_line, file=write_fix)\n write_fix.close()\n # remove clearly poor positions\n os.system(r'vcffilter -f \"QUAL > 20\" %s > %s' % (mapq_fix, vcf_mlst))\n\n pos_call_dict = {}\n vcf_reader = vcf.Reader(open(vcf_mlst, 'r'))\n for record in vcf_reader:\n if record.ALT[0]:\n pos_call_dict.update({record.POS: str(record.ALT[0])})\n\n # Position 1629 was too close to the end of glk sequence. Reads would not assemble properly to call possilbe SNP, therefore 100 bases of the gene were added. Because of this all positions beyond this point are 100 more. Same with position 1645 and 2693.\n\n target_pos_ref = {231: 'C', 297: 'T', 363: 'C', 398: 'C', 429: 'C', 523: 'G', 631: 'G', 730: 'G', 1247: 'G', 1296: 'C', 1342: 'G', 1381: 'A', 1648: 'C', 1685: 'C', 1741: 'C', 1754: 'G', 2165: 'A', 2224: 'T', 2227: 'C', 2297: 'G', 2300: 'A', 2344: 'A', 2352: 'G', 2403: 'C', 2530: 'G', 2557: 'G', 2578: 'G', 2629: 'A', 3045: 'A', 3054: 'G', 3118: 'G', 3295: 'C', 3328: 'C', 3388: 'A', 3966: 'C', 3969: 'G', 4167: 'G', 4271: 'C', 4296: 'G', 4893: 'C', 4996: 'G', 4998: 'T', 5058: 'G', 5248: 'A', 5672: 'G', 5737: 'C', 5928: 'A', 5963: 'G', 5984: 'C', 5987: 'C', 6025: 'G', 6045: 'G', 6498: 'G', 6499: 'C', 6572: 'A', 6627: 'T', 6715: 'C', 6735: 'T', 6745: 'G', 6785: 'T', 6810: 'C', 6828: 'C', 6845: 'C', 6864: 'G', 6875: 'C', 7382: 'G', 7432: 'G', 7464: 'G', 7594: 'G', 7660: 'T', 7756: 'A'}\n\n #pos_call_dict will replace target_pos_ref\n for key, value in pos_call_dict.items():\n if key in target_pos_ref.keys():\n target_pos_ref[key] = value\n ordered_combined_dict = OrderedDict(sorted(target_pos_ref.items()))\n combined_value_list = list(ordered_combined_dict.values())\n mlst_join = ''.join(combined_value_list)\n\n mlst_dictionary = {}\n mlst_dictionary[\"CTCCCGGGGCGACCCGATCGAAGCGGGAAGGCCACGGCGCGTGAGCAGCCGGGCATCTGTCCCGCGGGGTA\"] = \"MLST type 01\"\n mlst_dictionary[\"CTCCCGGGGCGACCCGAGCGAAGCGGGAAGGCCACGGCGCGTGAGCAGCCGGGCATCTGTCCCGCGGGGTA\"] = \"MLST type 02\"\n mlst_dictionary[\"CTCCCGTGGCGACCCGAGCGAAGCGGGAAGGCCACGGCGCGTGAGCAGCCGGGCATCTGTCCCGCGGGGTA\"] = \"MLST type 03\"\n mlst_dictionary[\"CTCCCGGGGCGACCCGAGCGAAGCGGGAAGGCCAAGGCGCGTGAGCAGCCGGGCATCTGTCCCGCGGGGTA\"] = \"MLST type 04\"\n mlst_dictionary[\"CTCCCGGGGCGACCCGATCGAAGCGGGAAGGCCACGGCGAGTGAGCAGCCGGGCATCTGTCCCGCGGGGTA\"] = \"MLST type 05\"\n mlst_dictionary[\"TTCCTGGGGCAACCCGAGCGAGGCAGGGAGGCCGCGGCTCGTGAGCGGTCGGGCATCTGTCCCGCGGGGTA\"] = \"MLST type 06\"\n mlst_dictionary[\"CTTCCTGGCCGAGCCGAGTGAAGGGGGGAGGCCACGGCGCGTGCTCGGCTGGGTACCTGTCTCGCGGTGCT\"] = \"MLST type 07\"\n mlst_dictionary[\"CTTCCTGGCCGACCCGAGTGAAGGGGGGAGGCCACGGCGCGTGCGCGGCTGGGTACCCGTCTCGCGGTGCT\"] = \"MLST type 08\"\n mlst_dictionary[\"CTTCCTGGCCGACCCGAGTGAAGGGGGGAGGCCACGGCGCGTGCGCGGCTGGGTACCTGTCTCGTGGTGCT\"] = \"MLST type 09\"\n mlst_dictionary[\"CTTCCTGGCCGACCCGAGTGAAGGGGGGGGGCCACGGCGCGTGCTCGGCTGGGTACCTGTCTCGCGGTGCT\"] = \"MLST type 10\"\n mlst_dictionary[\"CTTCCTGGCCGACCCGAGTGAAGGGGGGAGGCCACGGCGCGTGCGCGGCTGGGTACCTGTCTCGCGGTGCT\"] = \"MLST type 11\"\n mlst_dictionary[\"CTTCCTGGCCGACCCGAGTGAAGGGGGGAGGCCACGGCGCGTGCTCGGCTGGGTACCTGTCTCGCGGTGCT\"] = \"MLST type 12\"\n mlst_dictionary[\"CCCCCGGGCCGACTCGAGCGAAGCGAAGAGGCCACGGCGCGTGAGTGACCAGGCACCTATCCCACGGGGTA\"] = \"MLST type 13\"\n mlst_dictionary[\"CCCCCGGGCCGGCCCAAGCGAAGCGGGGAGGCTACAGTGCGTGAGTGGCCAGGCACCTGTCCCGCGGGGTA\"] = \"MLST type 14\"\n mlst_dictionary[\"CCCCCGGGCCGACCCGGGCGAAGCGGGGAGGCTACGGTGCGTGAGTGGCCAGGCACCTGTCCCGCGAGGTA\"] = \"MLST type 15\"\n mlst_dictionary[\"CCCCCGGCCCGACCCGGGCGAAGCGGGGAGGCTACGGTGCGTGAGTGGCCAGGCACCTGTCCCGCGAGGTA\"] = \"MLST type 16\"\n mlst_dictionary[\"CCCCCGGGCCGGCCCAAGCGAAGCGGGGAGGCTACAATGCGTGAGTGGCCAGGCACCTGTCCCGCAGGGTA\"] = \"MLST type 17\"\n mlst_dictionary[\"CCCCCGGGCCGGCCCAAGCGAAGCGGGGAGGCTACAATGCGTGAGTGGCCAGGCACCTGTCCCGCAGGCTA\"] = \"MLST type 18\"\n mlst_dictionary[\"CCCCCGGGCCGACCCGAGCGAAGCGGGGAGGACACGGCGCGTGAGTGGCCAGGCACCTGTCCCGCGGGGTA\"] = \"MLST type 19\"\n mlst_dictionary[\"CCCCCGGGCCGGCCCAAGCGAAGCGGGGAGGCTACAATGCGTGAGTGGCCAGGCACATGTCCCGCAGGGTA\"] = \"MLST type 20\"\n mlst_dictionary[\"CCCCCGGGCCGGCCCAAGCGAAGCGGGGAGGCTACAATGCGTGAGTGGCCAGGCACATGCCCCGCAGGGTA\"] = \"MLST type 21\"\n mlst_dictionary[\"CCCCCGGGCCGACCCGAGCGAGGCGGGGAGGCCACGGCGCGGGAGTGGCCAGACACCTGTCCTGCGGGGTA\"] = \"MLST type 22\"\n mlst_dictionary[\"CCCCCGGGCTGACCCGAGCGAAACGGGGAAGCCACGGCGCGTAAGTGGCCAGGCACCTGTCCCGCGGGGTA\"] = \"MLST type 23\"\n mlst_dictionary[\"CCCCCGGGCTGACCCGAGCGGAACGGGGAAGCCACGGCGCGTAAGTGGCCAGGCACCTGTCCCGCGGGGTA\"] = \"MLST type 23x\"\n mlst_dictionary[\"CCCCCGGGCCGACCCGAGCAAAGCGGGGAGGCCACGGCGCGTAAGTGGCCAGGCACCTGTCCCGCGGGGTA\"] = \"MLST type 24\"\n mlst_dictionary[\"CCCCCGGGCCGACCCGAGCGAAGCGGGGAGGCCACGGCGCGTAAGTGGCCAGGCACCTGTCCCGCGGGGTA\"] = \"MLST type 25\"\n mlst_dictionary[\"CCCCCGGGCCGACCCGAGCGAAGCGGGGAGGCCACGGCGCGTAAGTGGCCAAGCACCTGTTCCGCGGGGTA\"] = \"MLST type 26\"\n mlst_dictionary[\"CCCCCGGGCCGACCCGAGCGAAGCGGGGAGACCACGGCGCATAAGTGGCCAGGCACCTGTCCCGCGGGGTA\"] = \"MLST type 27\"\n mlst_dictionary[\"CCCTCGGGCCGACCTGAGCGAAGCGGGGAGACCACGGCGCATAAGTGGCCAGGCTCCTGTCCCGCGGGGTA\"] = \"MLST type 28\"\n\n remove_files = glob.glob('ST1-MLST*')\n for i in remove_files:\n os.remove(i)\n remove_files = glob.glob('*-mlst*')\n for i in remove_files:\n os.remove(i)\n remove_files = glob.glob('*_mlst.vcf.idx')\n for i in remove_files:\n os.remove(i)\n os.remove(unfiltered_vcf_mlst)\n\n write_out = open(\"mlst.txt\", 'w')\n if mlst_join in mlst_dictionary:\n mlst_type = mlst_dictionary[mlst_join]\n print(mlst_type)\n print(mlst_type, file=write_out)\n else:\n print(\"NO MLST MATCH FOUND\\n\")\n print(\"NO MLST MATCH FOUND\", file=write_out)\n write_out.close()\n\n os.makedirs(\"mlst\")\n shutil.move(vcf_mlst, \"mlst\")\n shutil.move(\"mlst.txt\", \"mlst\")\n os.chdir(sample_directory)\n\n\ndef finding_sp(v):\n total = 0\n total_finds = 0\n #if total < 6: # doesn't make a big different. Might as well get full counts\n #total += sum(seq.count(x) for x in (v)) #v=list of for and rev spacer\n total_finds = [len(regex.findall(\"(\" + spacer + \"){s<=1}\", seq_string)) for spacer in v]\n for number in total_finds:\n total += number\n return(total)\n\n\ndef binary_to_octal(binary):\n #binary_len = len(binary)\n i = 0\n ie = 1\n octal = \"\"\n while ie < 43:\n ie = i + 3\n print(binary[i:ie])\n region = binary[i:ie]\n region_len = len(region)\n i += 3\n if int(region[0]) == 1:\n if region_len < 2: # for the lone spacer 43. When present needs to be 1 not 4.\n oct = 1\n else:\n oct = 4\n else:\n oct = 0\n try:\n if int(region[1]) == 1:\n oct += 2\n if int(region[2]) == 1:\n oct += 1\n except IndexError:\n pass\n octal = octal + str(oct)\n return(octal)\n\n\ndef binary_to_hex(binary):\n section1 = binary[0:7]\n section2 = binary[7:14]\n section3 = binary[14:21]\n section4 = binary[21:28]\n section5 = binary[28:36]\n section6 = binary[36:43]\n\n hex_section1 = hex(int(section1, 2))\n hex_section2 = hex(int(section2, 2))\n hex_section3 = hex(int(section3, 2))\n hex_section4 = hex(int(section4, 2))\n hex_section5 = hex(int(section5, 2))\n hex_section6 = hex(int(section6, 2))\n\n return(hex_section1.replace('0x', '').upper() + \"-\" + hex_section2.replace('0x', '').upper() + \"-\" + hex_section3.replace('0x', '').upper() + \"-\" + hex_section4.replace('0x', '').upper() + \"-\" + hex_section5.replace('0x', '').upper() + \"-\" + hex_section6.replace('0x', '').upper())\n\n\ndef spoligo(arg_options):\n sample_directory = str(os.getcwd())\n R1 = arg_options['R1']\n R2 = arg_options['R2']\n print(\"\\nFinding spoligotype pattern...\\n\")\n '''spoligo spacers'''\n spoligo_dictionary = {}\n spoligo_dictionary[\"spacer01\"] = [\"TGATCCAGAGCCGGCGACCCTCTAT\", \"ATAGAGGGTCGCCGGCTCTGGATCA\"]\n spoligo_dictionary[\"spacer02\"] = [\"CAAAAGCTGTCGCCCAAGCATGAGG\", \"CCTCATGCTTGGGCGACAGCTTTTG\"]\n spoligo_dictionary[\"spacer03\"] = [\"CCGTGCTTCCAGTGATCGCCTTCTA\", \"TAGAAGGCGATCACTGGAAGCACGG\"]\n spoligo_dictionary[\"spacer04\"] = [\"ACGTCATACGCCGACCAATCATCAG\", \"CTGATGATTGGTCGGCGTATGACGT\"]\n spoligo_dictionary[\"spacer05\"] = [\"TTTTCTGACCACTTGTGCGGGATTA\", \"TAATCCCGCACAAGTGGTCAGAAAA\"]\n spoligo_dictionary[\"spacer06\"] = [\"CGTCGTCATTTCCGGCTTCAATTTC\", \"GAAATTGAAGCCGGAAATGACGACG\"]\n spoligo_dictionary[\"spacer07\"] = [\"GAGGAGAGCGAGTACTCGGGGCTGC\", \"GCAGCCCCGAGTACTCGCTCTCCTC\"]\n spoligo_dictionary[\"spacer08\"] = [\"CGTGAAACCGCCCCCAGCCTCGCCG\", \"CGGCGAGGCTGGGGGCGGTTTCACG\"]\n spoligo_dictionary[\"spacer09\"] = [\"ACTCGGAATCCCATGTGCTGACAGC\", \"GCTGTCAGCACATGGGATTCCGAGT\"]\n spoligo_dictionary[\"spacer10\"] = [\"TCGACACCCGCTCTAGTTGACTTCC\", \"GGAAGTCAACTAGAGCGGGTGTCGA\"]\n spoligo_dictionary[\"spacer11\"] = [\"GTGAGCAACGGCGGCGGCAACCTGG\", \"CCAGGTTGCCGCCGCCGTTGCTCAC\"]\n spoligo_dictionary[\"spacer12\"] = [\"ATATCTGCTGCCCGCCCGGGGAGAT\", \"ATCTCCCCGGGCGGGCAGCAGATAT\"]\n spoligo_dictionary[\"spacer13\"] = [\"GACCATCATTGCCATTCCCTCTCCC\", \"GGGAGAGGGAATGGCAATGATGGTC\"]\n spoligo_dictionary[\"spacer14\"] = [\"GGTGTGATGCGGATGGTCGGCTCGG\", \"CCGAGCCGACCATCCGCATCACACC\"]\n spoligo_dictionary[\"spacer15\"] = [\"CTTGAATAACGCGCAGTGAATTTCG\", \"CGAAATTCACTGCGCGTTATTCAAG\"]\n spoligo_dictionary[\"spacer16\"] = [\"CGAGTTCCCGTCAGCGTCGTAAATC\", \"GATTTACGACGCTGACGGGAACTCG\"]\n spoligo_dictionary[\"spacer17\"] = [\"GCGCCGGCCCGCGCGGATGACTCCG\", \"CGGAGTCATCCGCGCGGGCCGGCGC\"]\n spoligo_dictionary[\"spacer18\"] = [\"CATGGACCCGGGCGAGCTGCAGATG\", \"CATCTGCAGCTCGCCCGGGTCCATG\"]\n spoligo_dictionary[\"spacer19\"] = [\"TAACTGGCTTGGCGCTGATCCTGGT\", \"ACCAGGATCAGCGCCAAGCCAGTTA\"]\n spoligo_dictionary[\"spacer20\"] = [\"TTGACCTCGCCAGGAGAGAAGATCA\", \"TGATCTTCTCTCCTGGCGAGGTCAA\"]\n spoligo_dictionary[\"spacer21\"] = [\"TCGATGTCGATGTCCCAATCGTCGA\", \"TCGACGATTGGGACATCGACATCGA\"]\n spoligo_dictionary[\"spacer22\"] = [\"ACCGCAGACGGCACGATTGAGACAA\", \"TTGTCTCAATCGTGCCGTCTGCGGT\"]\n spoligo_dictionary[\"spacer23\"] = [\"AGCATCGCTGATGCGGTCCAGCTCG\", \"CGAGCTGGACCGCATCAGCGATGCT\"]\n spoligo_dictionary[\"spacer24\"] = [\"CCGCCTGCTGGGTGAGACGTGCTCG\", \"CGAGCACGTCTCACCCAGCAGGCGG\"]\n spoligo_dictionary[\"spacer25\"] = [\"GATCAGCGACCACCGCACCCTGTCA\", \"TGACAGGGTGCGGTGGTCGCTGATC\"]\n spoligo_dictionary[\"spacer26\"] = [\"CTTCAGCACCACCATCATCCGGCGC\", \"GCGCCGGATGATGGTGGTGCTGAAG\"]\n spoligo_dictionary[\"spacer27\"] = [\"GGATTCGTGATCTCTTCCCGCGGAT\", \"ATCCGCGGGAAGAGATCACGAATCC\"]\n spoligo_dictionary[\"spacer28\"] = [\"TGCCCCGGCGTTTAGCGATCACAAC\", \"GTTGTGATCGCTAAACGCCGGGGCA\"]\n spoligo_dictionary[\"spacer29\"] = [\"AAATACAGGCTCCACGACACGACCA\", \"TGGTCGTGTCGTGGAGCCTGTATTT\"]\n spoligo_dictionary[\"spacer30\"] = [\"GGTTGCCCCGCGCCCTTTTCCAGCC\", \"GGCTGGAAAAGGGCGCGGGGCAACC\"]\n spoligo_dictionary[\"spacer31\"] = [\"TCAGACAGGTTCGCGTCGATCAAGT\", \"ACTTGATCGACGCGAACCTGTCTGA\"]\n spoligo_dictionary[\"spacer32\"] = [\"GACCAAATAGGTATCGGCGTGTTCA\", \"TGAACACGCCGATACCTATTTGGTC\"]\n spoligo_dictionary[\"spacer33\"] = [\"GACATGACGGCGGTGCCGCACTTGA\", \"TCAAGTGCGGCACCGCCGTCATGTC\"]\n spoligo_dictionary[\"spacer34\"] = [\"AAGTCACCTCGCCCACACCGTCGAA\", \"TTCGACGGTGTGGGCGAGGTGACTT\"]\n spoligo_dictionary[\"spacer35\"] = [\"TCCGTACGCTCGAAACGCTTCCAAC\", \"GTTGGAAGCGTTTCGAGCGTACGGA\"]\n spoligo_dictionary[\"spacer36\"] = [\"CGAAATCCAGCACCACATCCGCAGC\", \"GCTGCGGATGTGGTGCTGGATTTCG\"]\n spoligo_dictionary[\"spacer37\"] = [\"CGCGAACTCGTCCACAGTCCCCCTT\", \"AAGGGGGACTGTGGACGAGTTCGCG\"]\n spoligo_dictionary[\"spacer38\"] = [\"CGTGGATGGCGGATGCGTTGTGCGC\", \"GCGCACAACGCATCCGCCATCCACG\"]\n spoligo_dictionary[\"spacer39\"] = [\"GACGATGGCCAGTAAATCGGCGTGG\", \"CCACGCCGATTTACTGGCCATCGTC\"]\n spoligo_dictionary[\"spacer40\"] = [\"CGCCATCTGTGCCTCATACAGGTCC\", \"GGACCTGTATGAGGCACAGATGGCG\"]\n spoligo_dictionary[\"spacer41\"] = [\"GGAGCTTTCCGGCTTCTATCAGGTA\", \"TACCTGATAGAAGCCGGAAAGCTCC\"]\n spoligo_dictionary[\"spacer42\"] = [\"ATGGTGGGACATGGACGAGCGCGAC\", \"GTCGCGCTCGTCCATGTCCCACCAT\"]\n spoligo_dictionary[\"spacer43\"] = [\"CGCAGAATCGCACCGGGTGCGGGAG\", \"CTCCCGCACCCGGTGCGATTCTGCG\"]\n\n count_summary = {}\n\n global seq_string\n sequence_list = []\n try:\n for fastq in R1, R2:\n with gzip.open(fastq, \"rt\") as in_handle:\n # all 3, title and seq and qual, were needed\n for title, seq, qual in FastqGeneralIterator(in_handle):\n sequence_list.append(seq)\n except TypeError:\n # TypeError if not paired\n pass\n\n if len(seq) > 99:\n #Three 10bp sequences dispersed across repeat region, forward and reverse\n capture_spacer_sequence = re.compile(\".*TTTCCGTCCC.*|.*GGGACGGAAA.*|.*TCTCGGGGTT.*|.*AACCCCGAGA.*|.*TGGGTCTGAC.*|.*GTCAGACCCA.*\")\n sequence_list = list(filter(capture_spacer_sequence.match, sequence_list))\n seq_string = \"\".join(sequence_list)\n else:\n #if < 100 then search all reads, not just those with repeat regions.\n seq_string = \"\".join(sequence_list)\n\n # for spacer_id, spacer_sequence in spoligo_dictionary.items():\n # count = finding_sp(spacer_sequence)\n # count_summary.update({spacer_id: count})\n # count_summary = OrderedDict(sorted(count_summary.items()))\n # print(\"count_summary {}\" .format(count_summary))\n\n for spacer_id, spacer_sequence in spoligo_dictionary.items():\n count = delayed(finding_sp)(spacer_sequence)\n count_summary.update({spacer_id: count})\n pull = delayed(count_summary)\n count_summary = pull.compute()\n count_summary = OrderedDict(sorted(count_summary.items()))\n print(\"count_summary {}\".format(count_summary))\n\n seq_string = \"\"\n\n spoligo_binary_dictionary = {}\n for k, v in count_summary.items():\n if v > 4:\n spoligo_binary_dictionary.update({k: 1})\n else:\n spoligo_binary_dictionary.update({k: 0})\n spoligo_binary_dictionary = OrderedDict(sorted(spoligo_binary_dictionary.items()))\n spoligo_binary_list = []\n for v in spoligo_binary_dictionary.values():\n spoligo_binary_list.append(v)\n bovis_string = ''.join(str(e) for e in spoligo_binary_list) #bovis_string correct\n hexadecimal = binary_to_hex(bovis_string)\n write_out = open(\"spoligo.txt\", 'w')\n found = False\n with open(arg_options[\"spoligo_db\"]) as f: # put into dictionary or list\n for line in f:\n line = line.rstrip()\n octalcode = line.split()[0] #no arg splits on whitespace\n sbcode = line.split()[1]\n binarycode = line.split()[2]\n if bovis_string == '0000000000000000000000000000000000000000000':\n found = True\n octalcode = \"spoligo not found\"\n sbcode = \"spoligo not found\"\n hexadecimal = \"SB2277 ???\"\n binarycode = \"0000000000000000000000000000000000000000000\"\n print(\"CHECK SAMPLE! NO SPACERS FOUND. LIKELY NOT TB COMPLEX. ALTHOUGH SB2277 IS A ZERO STRING BINARY\\n\")\n print(\"CHECK SAMPLE! NO SPACERS FOUND. LIKELY NOT TB COMPLEX. ALTHOUGH SB2277 IS A ZERO STRING BINARY\", file=write_out)\n print(\"\\nOne mismatch allowed spacers search against both R1 and R2 reads.\\n\", file=write_out)\n for k, v in count_summary.items():\n print(k, v, file=write_out)\n elif bovis_string == binarycode:\n found = True\n print(\"Pattern found:\")\n print(\"%s %s %s %s\" % (octalcode, sbcode, hexadecimal, binarycode))\n print(\"%s %s %s %s\" % (octalcode, sbcode, hexadecimal, binarycode), file=write_out)\n print(\"\\One mismatch allowed spacer search against both R1 and R2 reads.\\n\", file=write_out)\n for k, v in count_summary.items():\n print(k, v, file=write_out)\n\n print(\"bovis_string: %s\" % bovis_string, file=write_out)\n print(\"binarycode : %s\" % binarycode, file=write_out)\n\n if not found:\n octal = binary_to_octal(bovis_string)\n sbcode = \"N/A\"\n print(\"%s %s %s %s\" % (octal, sbcode, hexadecimal, bovis_string))\n print(\"%s %s %s %s\" % (octal, sbcode, hexadecimal, bovis_string), file=write_out)\n print(\"SPOLIGO SB NUMBER NOT FOUND\\n\")\n print(\"\\nSPOLIGO SB NUMBER NOT FOUND\\n\", file=write_out)\n print(\"\\nOne mismatch allowed spacer search against both R1 and R2 reads.\\n\", file=write_out)\n for k, v in count_summary.items():\n print(k, v, file=write_out)\n\n write_out.close()\n os.chdir(sample_directory)\n\n\ndef add_zero_coverage(sample_name, sample_reference, nodupbam, hapall, zero_coverage_vcf):\n print(\"\\n@@@ Depth of coverage using pysam: {}\" .format(sample_name))\n coverage_dict = {}\n coverage_list = pysam.depth(nodupbam, split_lines=True)\n for line in coverage_list:\n chrom, position, depth = line.split('\\t')\n coverage_dict[chrom + \"-\" + position] = depth\n coverage_df = pd.DataFrame.from_dict(coverage_dict, orient='index', columns=[\"depth\"])\n zero_dict = {}\n for record in SeqIO.parse(sample_reference, \"fasta\"):\n chrom = record.id\n total_len = len(record.seq)\n for pos in list(range(1, total_len + 1)):\n zero_dict[str(chrom) + \"-\" + str(pos)] = 0\n zero_df = pd.DataFrame.from_dict(zero_dict, orient='index', columns=[\"depth\"])\n #df with depth_x and depth_y columns, depth_y index is NaN\n coverage_df = zero_df.merge(coverage_df, left_index=True, right_index=True, how='outer')\n #depth_x \"0\" column no longer needed\n coverage_df = coverage_df.drop(columns=['depth_x'])\n coverage_df = coverage_df.rename(columns={'depth_y': 'depth'})\n #covert the NaN to 0 coverage\n coverage_df = coverage_df.fillna(0)\n coverage_df['depth'] = coverage_df['depth'].apply(int)\n\n print(\"...coverage found\")\n total_length = len(coverage_df)\n ave_coverage = coverage_df['depth'].mean()\n\n zero_df = coverage_df[coverage_df['depth'] == 0]\n total_zero_coverage = len(zero_df)\n print(\"Total zero coverage positions: {:,}\" .format(total_zero_coverage))\n total_coverage = total_length - total_zero_coverage\n genome_coverage = \"{:.2%}\".format(total_coverage / total_length)\n\n vcf_df = pd.read_csv(hapall, sep='\\t', header=None, names=[\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\", \"Sample\"], comment='#')\n good_snp_count = len(vcf_df[(vcf_df['ALT'].str.len() == 1) & (vcf_df['REF'].str.len() == 1) & (vcf_df['QUAL'] > 150)])\n\n if total_zero_coverage > 0:\n header_out = open('v_header.csv', 'w+')\n with open(hapall) as fff:\n for line in fff:\n if re.search('^#', line):\n print(line.strip(), file=header_out)\n header_out.close()\n vcf_df_snp = vcf_df[vcf_df['REF'].str.len() == 1]\n vcf_df_snp = vcf_df_snp[vcf_df_snp['ALT'].str.len() == 1]\n vcf_df_snp['ABS_VALUE'] = vcf_df_snp['CHROM'].map(str) + '-' + vcf_df_snp['POS'].map(str)\n vcf_df_snp = vcf_df_snp.set_index('ABS_VALUE')\n cat_df = pd.concat([vcf_df_snp, zero_df], axis=1, sort=False)\n cat_df = cat_df.drop(columns=['CHROM', 'POS', 'depth'])\n cat_df[['ID', 'ALT', 'QUAL', 'FILTER', 'INFO']] = cat_df[['ID', 'ALT', 'QUAL', 'FILTER', 'INFO']].fillna('.')\n cat_df['REF'] = cat_df['REF'].fillna('N')\n cat_df['FORMAT'] = cat_df['FORMAT'].fillna('GT')\n cat_df['Sample'] = cat_df['Sample'].fillna('./.')\n cat_df['temp'] = cat_df.index.str.split('-')\n cat_df[['CHROM', 'POS']] = pd.DataFrame(cat_df.temp.values.tolist(), index=cat_df.index)\n cat_df = cat_df[['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'Sample']]\n cat_df['POS'] = cat_df['POS'].astype(int)\n cat_df = cat_df.sort_values(['CHROM', 'POS'])\n cat_df.to_csv('v_annotated_body.csv', sep='\\t', header=False, index=False)\n cat_files = ['v_header.csv', 'v_annotated_body.csv']\n with open(zero_coverage_vcf, \"wb\") as outfile:\n for cf in cat_files:\n with open(cf, \"rb\") as infile:\n outfile.write(infile.read())\n else:\n shutil.copyfile(hapall, zero_coverage_vcf)\n return (zero_coverage_vcf, good_snp_count, ave_coverage, genome_coverage)\n\n\ndef send_email_step1(email_list, runtime, path_found, summary_file, st):\n text = \"See attached: \"\n send_from = \"[email protected]\"\n send_to = email_list\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Date'] = formatdate(localtime=True)\n if not path_found:\n msg['Subject'] = \"###CUMULATIVE STATS NOT UPDATED - Script1 stats summary\"\n else:\n msg['Subject'] = \"Script1 stats summary, runtime: {}\" .format(runtime)\n msg.attach(MIMEText(text))\n\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload(open(summary_file, \"rb\").read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"stat_summary_{}.xlsx\"' .format(st))\n msg.attach(part)\n\n #context = ssl.SSLContext(ssl.PROTOCOL_SSLv3)\n #SSL connection only working on Python 3+\n smtp = smtplib.SMTP('10.10.8.12')\n\n smtp.send_message(msg)\n #smtp.sendmail(send_from, send_to, msg.as_string())\n smtp.quit()\n\n\ndef group_files(each_vcf, arg_options):\n\n mal = \"\"\n list_pass = []\n list_amb = []\n dict_amb = {}\n group_calls = []\n\n try:\n vcf_reader = vcf.Reader(open(each_vcf, 'r'))\n # PUT VCF NAME INTO LIST, capturing for htmlfile\n group_calls.append(each_vcf)\n # for each single vcf getting passing position\n for record in vcf_reader:\n try:\n # Freebayes VCFs place MQ values are placed into a list. GATK as a float\n record.INFO['MQ'] = record.INFO['MQ'][0]\n except TypeError:\n pass\n except KeyError:\n pass\n chrom = record.CHROM\n position = record.POS\n absolute_positon = str(chrom) + \"-\" + str(position)\n # find quality SNPs and put absolute positions into list\n try:\n record_alt_length = len(record.ALT[0])\n except TypeError:\n record_alt_length = 0\n try:\n record_ref_length = len(record.REF)\n except TypeError:\n record_alt_length = 0\n try:\n if str(record.ALT[0]) != \"None\" and record_ref_length == 1 and record_alt_length == 1 and record.INFO['AC'][0] == 2 and record.QUAL > arg_options['qual_threshold'] and record.INFO['MQ'] > 45:\n list_pass.append(absolute_positon)\n # capture ambigous defining SNPs in htmlfile\n elif str(record.ALT[0]) != \"None\" and record.INFO['AC'][0] == 1:\n list_amb.append(absolute_positon)\n except ZeroDivisionError as ex:\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), each_vcf, absolute_positon)\n print(\"bad line in %s at %s\" % (each_vcf, absolute_positon))\n\n for key in arg_options['inverted_position'].keys():\n if key not in list_pass:\n print(\"key %s not in list_pass\" % key)\n directory = arg_options['inverted_position'][key]\n print(\"*** INVERTED POSITION FOUND *** PASSING POSITION FOUND: \\t%s\\t\\t%s\" % (each_vcf, directory))\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except FileExistsError:\n pass\n shutil.copy(each_vcf, directory)\n # ADD GROUP TO LIST\n group_calls.append(directory)\n\n #if passing:\n # if a passing position is in the defining SNPs\n defining_snps = arg_options['defining_snps']\n for passing_position in list_pass:\n # normal grouping\n if passing_position in defining_snps:\n directory = defining_snps[passing_position]\n print(\"PASSING POSITION FOUND: \\t%s\\t\\t%s\" % (each_vcf, directory))\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except FileExistsError:\n pass\n shutil.copy(each_vcf, directory)\n # ADD GROUP TO LIST\n group_calls.append(directory)\n # find mixed isolates if defining snp is ambigous\n for amb_position in list_amb:\n if amb_position in defining_snps:\n directory = defining_snps[amb_position]\n dict_amb.update({each_vcf + \"\\t\" + directory: amb_position})\n # ADD AMBIGIOUS CALL TO LIST\n group_calls.append(\"*\" + directory + \"-mix\")\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except FileExistsError:\n pass\n shutil.copy(each_vcf, directory)\n # if -a or -e (non elites already deleted from the analysis) copy all vcfs to All_VCFs\n if arg_options['all_vcf'] or arg_options['elite']:\n if not os.path.exists(\"All_VCFs\"):\n os.makedirs(\"All_VCFs\")\n shutil.move(each_vcf, \"All_VCFs\")\n else:\n try:\n os.remove(each_vcf)\n except FileNotFoundError:\n pass\n #print(dict_amb, group_calls, malformed)\n\n except ZeroDivisionError as ex:\n os.remove(each_vcf)\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), each_vcf)\n mal = \"ZeroDivisionError: corrupt VCF, removed %s \" % each_vcf\n group_calls.append(\"error\")\n except ValueError as ex:\n os.remove(each_vcf)\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), each_vcf)\n mal = \"ValueError: corrupt VCF, removed %s \" % each_vcf\n group_calls.append(\"error\")\n except UnboundLocalError as ex:\n os.remove(each_vcf)\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), each_vcf)\n mal = \"UnboundLocalError: corrupt VCF, removed %s \" % each_vcf\n group_calls.append(\"error\")\n except TypeError as ex:\n os.remove(each_vcf)\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), each_vcf)\n mal = \"TypeError: corrupt VCF, removed %s \" % each_vcf\n group_calls.append(\"error\")\n except SyntaxError as ex:\n os.remove(each_vcf)\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), each_vcf)\n mal = \"SyntaxError: corrupt VCF, removed %s \" % each_vcf\n group_calls.append(\"error\")\n except KeyError as ex:\n os.remove(each_vcf)\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), each_vcf)\n mal = \"KeyError: corrupt VCF, removed %s \" % each_vcf\n group_calls.append(\"error\")\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), each_vcf)\n except StopIteration as ex:\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), each_vcf)\n mal = \"StopIteration: corrupt VCF, removed %s \" % each_vcf\n group_calls.append(\"error\")\n except IndexError as ex:\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), each_vcf)\n mal = \"IndexError: corrupt VCF, removed %s \" % each_vcf\n group_calls.append(\"error\")\n\n the_sample_name = group_calls[0:1]\n list_of_groups = sorted(group_calls[1:]) # order the groups\n for i in list_of_groups:\n the_sample_name.append(i) # a is group_calls\n group_calls = the_sample_name\n return dict_amb, group_calls, mal\n\n\ndef run_script2(arg_options):\n\n # IF AVX2 IS AVAILABE (CHECK WITH `cat /proc/cpuinfo | grep -i \"avx\"`). CREATE A LINK TO: `ln -s path_to_raxmlHPC-PTHREADS-AVX2 raxml. Place \"raxml\" in your path. This will allow \"raxml\" to be found first which will call AVX2 version of RAxML\n try:\n subprocess.call(\"raxml\", stdout=open(os.devnull, 'wb'))\n sys_raxml = \"raxml\"\n #print(\"%s found\" % sys_raxml)\n except OSError:\n print(\"looking for RAxML\")\n try:\n subprocess.call(\"raxmlHPC-PTHREADS\")\n sys_raxml = \"raxmlHPC-PTHREADS\"\n print(\"%s found\" % sys_raxml)\n except OSError:\n try:\n subprocess.call(\"raxmlHPC-SSE3\")\n sys_raxml = \"raxmlHPC-SSE3\"\n print(\"%s found\" % sys_raxml)\n except OSError:\n print(\"looking for RAxML\")\n try:\n subprocess.call(\"raxmlHPC\")\n sys_raxml = \"raxmlHPC\"\n print(\"RAxML found\")\n except OSError as ex:\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), \"#####RAxML is not in you PATH\")\n sys.exit(0)\n arg_options['sys_raxml'] = sys_raxml\n print(\"\\n\\n----> RAxML found in $PATH as: %s <-----\" % arg_options['sys_raxml'])\n if arg_options['cpu_count'] < 20:\n raxml_cpu = 2\n else:\n raxml_cpu = int(arg_options['cpu_count'] / 10)\n arg_options['raxml_cpu'] = raxml_cpu\n\n all_parameters = Get_Specie_Parameters() # Class of possible parameters\n print(\"Sample will be ran as {}\" .format(arg_options['species']))\n parameters, genotype_codes = all_parameters.choose(arg_options['species'])\n if parameters['qual_threshold'] is None:\n print(\"### See species_selection_step2 function\")\n sys.exit(0)\n arg_options.update(parameters)\n\n htmlfile_name = arg_options['root_dir'] + \"/summary_log.html\"\n arg_options['htmlfile_name'] = htmlfile_name\n htmlfile = open(htmlfile_name, 'at')\n\n startTime = datetime.now()\n print(\"Start time: %s\" % startTime)\n\n # DIRECTORY TEST AND BACKUP\n if getattr(sys, 'frozen', False):\n script_used = os.path.realpath(sys.executable)\n elif __file__:\n script_used = os.path.realpath(__file__)\n\n # make backup\n os.makedirs('starting_files')\n all_starting_files = glob.glob('*vcf')\n for i in all_starting_files:\n shutil.copy(i, 'starting_files')\n\n test_duplicate()\n\n print(\"\\ndefiningSNPs: %s \" % arg_options['definingSNPs'])\n print(\"filter_file: %s \" % arg_options['filter_file'])\n print(\"remove_from_analysis: %s \" % arg_options['remove_from_analysis'])\n print(\"step2_upload: %s \\n\" % arg_options['step2_upload'])\n\n if genotype_codes:\n print(\"\\nUpdating VCF file names\")\n arg_options = change_names(arg_options, genotype_codes)\n malformed = arg_options['malformed']\n names_not_changed = arg_options['names_not_changed']\n else:\n print(\"Genotypingcode file unavailable. VCF file names not updated\")\n names_not_changed = glob.glob(\"*.vcf\")\n arg_options['malformed'] = []\n arg_options['names_not_changed'] = []\n\n malformed = arg_options['malformed']\n names_not_changed = arg_options['names_not_changed']\n\n files = glob.glob('*vcf')\n print(\"REMOVING FROM ANALYSIS...\")\n wb = xlrd.open_workbook(arg_options['remove_from_analysis'])\n ws = wb.sheet_by_index(0)\n for each_sample in ws.col_values(0):\n each_sample = str(each_sample)\n each_sample = re.sub(r'(.*?)[._].*', r'\\1', each_sample)\n #print(\"each sample %s\" % each_sample)\n myregex = re.compile(each_sample + '.*') # create regular expression to search for in VCF list\n #print(\"myregex %s\" % myregex)\n for i in files:\n if myregex.search(i):\n print(\"### --> %s removed from the analysis\" % i)\n #print(files)\n #print(\"\\n<h4>### --> %s removed from the analysis</h4>\" % i, file=htmlfile)\n try:\n os.remove(i)\n except FileNotFoundError:\n print(\"FileNotFoundError:\")\n vcf_starting_list = glob.glob(\"*.vcf\")\n\n print(\"CHECKING FOR EMPTY FILES...\")\n for filename in vcf_starting_list:\n if os.stat(filename).st_size == 0:\n print(\"### %s is an empty file and has been deleted\" % filename)\n malformed.append(\"File was empty %s\" % filename)\n os.remove(filename)\n\n all_starting_files = glob.glob('*vcf')\n file_number = len(all_starting_files)\n\n print(\"SORTING FILES...\")\n defining_snps = {}\n inverted_position = {}\n wb = xlrd.open_workbook(arg_options['definingSNPs'])\n ws = wb.sheet_by_index(0)\n\n if arg_options['only_all_vcf']:\n print(\"Only running an All_VCF tree\")\n else:\n print(\"Grouping files...\")\n for rownum in range(ws.nrows):\n position = ws.row_values(rownum)[1:][0]\n grouping = ws.row_values(rownum)[:1][0]\n # inverted positions will NOT be found in the passing positions\n # inverted positions are indicated in Defining SNPs by ending with \"!\"\n if position.endswith('!'):\n position = re.sub('!', '', position)\n inverted_position.update({position: grouping})\n else:\n defining_snps.update({position: grouping})\n files = glob.glob('*vcf')\n\n arg_options['inverted_position'] = inverted_position\n arg_options['defining_snps'] = defining_snps\n\n all_list_amb = {}\n group_calls_list = []\n \n if arg_options['debug_call'] and not arg_options['get']:\n for i in files:\n dict_amb, group_calls, mal = group_files(i, arg_options)\n all_list_amb.update(dict_amb)\n group_calls_list.append(group_calls)\n malformed.append(mal)\n else:\n with futures.ProcessPoolExecutor() as pool:\n for dict_amb, group_calls, mal in pool.map(group_files, files, itertools_repeat(arg_options)):\n all_list_amb.update(dict_amb)\n group_calls_list.append(group_calls) # make list of list\n malformed.append(mal)\n malformed = [x for x in malformed if x] # remove empty sets from listn\n\n print(\"Getting directory list\\n\")\n directory_list = next(os.walk('.'))[1] # get list of subdirectories\n directory_list.remove('starting_files')\n\n print(\"Placing positions to filter into dictionary...\")\n filter_dictionary = get_filters(arg_options)\n arg_options['filter_dictionary'] = filter_dictionary\n\n if arg_options['gbk_file'] and not arg_options['no_annotation']:\n print(\"Putting gbk into indexed dataframe...\")\n annotation_dict = {}\n for gbk in arg_options['gbk_file']:\n gbk_dict = SeqIO.to_dict(SeqIO.parse(gbk, \"genbank\"))\n gbk_chrome = list(gbk_dict.keys())[0]\n write_out = open('temp.csv', 'w+')\n for key, value in gbk_dict.items():\n for feature in value.features:\n if \"CDS\" in feature.type or \"rRNA\" in feature.type:\n myproduct = None\n mylocus = None\n mygene = None\n try:\n myproduct = feature.qualifiers['product'][0]\n except KeyError:\n pass\n try:\n mylocus = feature.qualifiers['locus_tag'][0]\n except KeyError:\n pass\n try:\n mygene = feature.qualifiers['gene'][0]\n except KeyError:\n pass\n print(key, int(feature.location.start), int(feature.location.end), mylocus, myproduct, mygene, sep='\\t', file=write_out)\n write_out.close()\n\n df = pd.read_csv('temp.csv', sep='\\t', names=[\"chrom\", \"start\", \"stop\", \"locus\", \"product\", \"gene\"])\n os.remove('temp.csv')\n df = df.sort_values(['start', 'gene'], ascending=[True, False])\n df = df.drop_duplicates('start')\n pro = df.reset_index(drop=True)\n pro.index = pd.IntervalIndex.from_arrays(pro['start'], pro['stop'], closed='both')\n annotation_dict[gbk_chrome] = pro\n arg_options['annotation_dict'] = annotation_dict\n\n samples_in_output = []\n print(\"Getting SNPs in each directory\")\n if arg_options['debug_call']:\n for i in directory_list:\n samples_in_fasta = get_snps(i, arg_options)\n samples_in_output.append(samples_in_fasta)\n else:\n cpu_restriction = int(arg_options['cpu_count'] / 2)\n if cpu_restriction < 1:\n cpu_restriction = 2\n with futures.ProcessPoolExecutor(max_workers=cpu_restriction) as pool:\n for samples_in_fasta in pool.map(get_snps, directory_list, itertools_repeat(arg_options), chunksize=5):\n samples_in_output.append(samples_in_fasta)\n\n arg_options.pop('filter_dictionary', None) # filters no longer need, get rid of them to make arg_option more managable.\n\n flattened_list = [item for sublist in samples_in_output for item in sublist]\n flattened_list = set(flattened_list)\n\n count_flattened_list = len(flattened_list)\n count_vcf_starting_list = len(vcf_starting_list)\n start_end_file_diff_count = count_vcf_starting_list - count_flattened_list\n\n pretext_flattened_list = get_pretext_list(flattened_list)\n pretext_vcf_starting_list = get_pretext_list(vcf_starting_list)\n pretext_vcf_starting_list = set(pretext_vcf_starting_list)\n try:\n pretext_flattened_list.remove('root')\n except ValueError as ex:\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), \"Defining SNPs needed. If there are no defining SNP then rerun using -a option\")\n exit(0)\n difference_start_end_file = pretext_vcf_starting_list.symmetric_difference(pretext_flattened_list)\n difference_start_end_file = list(difference_start_end_file)\n difference_start_end_file.sort()\n\n # Zip dependency files\n dependents_dir = arg_options['root_dir'] + \"/dependents\"\n os.makedirs(dependents_dir)\n shutil.copy(arg_options['definingSNPs'], dependents_dir)\n shutil.copy(arg_options['filter_file'], dependents_dir)\n zip(dependents_dir, dependents_dir)\n shutil.rmtree(dependents_dir)\n\n # remove empty list elements\n arg_options['malformed'] = [x for x in arg_options['malformed'] if x]\n arg_options['names_not_changed'] = [x for x in arg_options['names_not_changed'] if x]\n #############################################\n #MAKE HTML FILE:\n print(\"<html>\\n<head><style> table { font-family: arial, sans-serif; border-collapse: collapse; width: 40%; } td, th { border: 1px solid #dddddd; padding: 4px; text-align: left; font-size: 11px; } </style></head>\\n<body style=\\\"font-size:12px;\\\">\", file=htmlfile)\n print(\"<h2>Script ran using <u>%s</u> variables</h2>\" % arg_options['species'].upper(), file=htmlfile)\n print(\"<h4>There are %s VCFs in this run</h4>\" % file_number, file=htmlfile)\n\n #OPTIONS\n print(f\"Additional options ran: email: {arg_options['email_list']}, filter: {arg_options['filter_finder']}, all_vcf: {arg_options['all_vcf']}, elite: {arg_options['elite']}, no annotation: {arg_options['no_annotation']}, debug: {arg_options['debug_call']}, get: {arg_options['get']}, uploaded: {arg_options['upload']}, ignore filters: {arg_options['ignore_filters']}\", file=htmlfile)\n if arg_options['all_vcf']:\n print(\"\\n<h4>All_VCFs is available</h4>\", file=htmlfile)\n elif arg_options['elite']:\n print(\"\\n<h4>Elite VCF comparison available</h4>\", file=htmlfile)\n\n #TIME\n print(\"\\n<h4>Start time: %s <br>\" % startTime, file=htmlfile)\n print(\"End time: %s <br>\" % datetime.now(), file=htmlfile)\n runtime = (datetime.now() - startTime)\n print(\"Total run time: %s: </h4>\" % runtime, file=htmlfile)\n\n # ERROR LIST\n if len(arg_options['malformed']) < 1:\n print(\"<h2>No corrupt VCF removed</h2>\", file=htmlfile)\n else:\n print(\"\\n<h2>Corrupt VCF removed</h2>\", file=htmlfile)\n for i in arg_options['malformed']:\n print(\"%s <br>\" % i, file=htmlfile)\n print(\"<br>\", file=htmlfile)\n\n # AMBIGIOUS DEFINING SNPS\n if len(all_list_amb) < 1:\n print(\"\\n<h2>No ambiguous defining SNPs</h2>\", file=htmlfile)\n else:\n print(\"\\n<h2>Defining SNPs are ambiguous. They may be mixed isolates.</h2>\", file=htmlfile)\n print(\"<table>\", file=htmlfile)\n print(\"<tr align=\\\"left\\\"><th>Sample Name</th><th>Division</th><th>Absolute Position</th><tr>\", file=htmlfile)\n ordered_all_list_amb = OrderedDict(sorted(all_list_amb.items()))\n for k, v in ordered_all_list_amb.items():\n k_split = k.split('\\t')\n print(\"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\" % (k_split[0], k_split[1], v), file=htmlfile)\n print(\"</table>\", file=htmlfile)\n print(\"<br>\", file=htmlfile)\n\n #GROUPING TABLE\n print(\"<h2>Groupings</h2>\", file=htmlfile)\n print(\"<table>\", file=htmlfile)\n print(\"<tr align=\\\"left\\\"><th>Sample Name</th><tr>\", file=htmlfile)\n\n group_calls_list = list(filter(None, group_calls_list))\n try:\n group_calls_list.sort(key=lambda x: x[0]) # sort list of list by first element\n except IndexError as ex:\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), \"Unable to sort grouping list\")\n pass\n\n for i in group_calls_list:\n print(\"<tr>\", file=htmlfile)\n for x in i:\n print(\"<td>%s</td>\" % x, end='\\t', file=htmlfile)\n print(\"</tr>\", file=htmlfile)\n print(\"</table>\", file=htmlfile)\n\n # REPORT DIFFERENCES BETWEEN STARTING FILES AND ENDING FILES REPRESENTED IN ALIGNMENTS AND TABLES\n if start_end_file_diff_count < 1:\n print(\"\\n<h2>No files dropped from the analysis. Input files are equal to those represented in output.</h2>\", file=htmlfile)\n else:\n print(\"\\n<h2>{} files have been dropped. They either need a group, mixed and not finding a group or an error occured.</h2>\" .format(start_end_file_diff_count), file=htmlfile)\n print(\"<table>\", file=htmlfile)\n print(\"<tr align=\\\"left\\\"><th>Sample Name</th><tr>\", file=htmlfile)\n for i in difference_start_end_file:\n print(\"<tr><td>{}</td></tr>\" .format(i), file=htmlfile)\n print(\"</table>\", file=htmlfile)\n print(\"<br>\", file=htmlfile)\n #Capture program versions for step 2\n try:\n print(\"\\n<h2>Program versions:</h2>\", file=htmlfile)\n versions = os.popen('conda list biopython | grep -v \"^#\"; \\\n conda list numpy | egrep -v \"^#|numpydoc\"; \\\n conda list pandas | grep -v \"^#\"; \\\n conda list pysam | grep -v \"^#\"; \\\n conda list pyvcf | grep -v \"^#\"; \\\n conda list xlrd | grep -v \"^#\"; \\\n conda list xlsxwriter | grep -v \"^#\"; \\\n conda list raxml | grep -v \"^#\"').read()\n versions = versions.split('\\n')\n for i in versions:\n print(\"%s<br>\" % i, file=htmlfile)\n except:\n logging.debug(\"Unable to capture versions\")\n pass\n print(\"Dependent source: {}<br>\" .format(arg_options['script_dependents']), file=htmlfile)\n\n #FILES NOT RENAMED\n if names_not_changed is None:\n print(\"\\n<h2>File names did not get changed:</h2>\", file=htmlfile)\n for i in sorted(names_not_changed):\n print(\"%s<br>\" % i, file=htmlfile)\n\n print(\"</body>\\n</html>\", file=htmlfile)\n #############################################\n os.chdir(arg_options['root_dir'])\n print(\"Zipping files...\")\n zip(\"starting_files\", \"starting_files\") # zip starting files directory\n shutil.rmtree(\"starting_files\")\n\n htmlfile.close()\n\n print(\"\\n\\nruntime: %s: \\n\" % runtime)\n\n if arg_options['email_list'] is None:\n print(\"\\n\\tEmail not sent\")\n elif arg_options['email_list']:\n send_email_step2(arg_options)\n print(\"\\n\\tEmail sent to: {}\" .format(arg_options['email_list']))\n else:\n print(\"\\n\\tEmail not sent\")\n\n if arg_options['upload']:\n print(\"Uploading Samples...\")\n\n def copytree(src, dst, symlinks=False, ignore=None): #required to ignore permissions\n try:\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n try:\n if os.path.isdir(s):\n shutil.copytree(s, d, symlinks, ignore)\n else:\n shutil.copy2(s, d)\n except shutil.Error:\n pass\n except FileNotFoundError:\n print(\"except FileNotFoundError: file not found\")\n\n #upload to bioinfoVCF\n src = arg_options['root_dir']\n try:\n dst = arg_options['step2_upload'] + \"/\" + os.path.basename(os.path.normpath(arg_options['root_dir']))\n print(\"\\n\\t%s is copying to %s\" % (src, dst))\n os.makedirs(dst, exist_ok=True)\n copy_tree(src, dst, preserve_mode=0, preserve_times=0)\n print(\"Samples were uploaded to {}\" .format(dst))\n except TypeError as ex:\n pass\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()), \"No place to upload, check parameters\")\n else:\n # logging.debug(\"Samples were not copied or uploaded to additional location\")\n print(\"\\tSamples were not copied or uploaded to additional location\")\n\n print(\"\\n\\tComparisons have been made with no obvious error.\\n\")\n\n\ndef send_email_step2(arg_options):\n htmlfile_name = arg_options['htmlfile_name']\n email_list = arg_options['email_list']\n\n msg = MIMEMultipart()\n msg['From'] = \"[email protected]\"\n msg['To'] = email_list\n msg['Subject'] = \"Script 2 \" + arg_options['species']\n with open(htmlfile_name) as fp:\n msg.attach(MIMEText(fp.read(), 'html'))\n\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload(open(\"summary_log.html\", \"r\").read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"summary_log.html\"')\n msg.attach(part)\n\n smtp = smtplib.SMTP('10.10.8.12')\n smtp.send_message(msg)\n smtp.quit()\n\n\ndef get_pretext_list(in_list):\n outlist = []\n for i in in_list:\n pretext = re.sub('[_.].*', '', i)\n outlist.append(pretext)\n return outlist\n\n\n# def flatten(l):\n# for el in l:\n# if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):\n# yield from flatten(el)\n# else:\n# yield el\n\n\ndef zip(src, dst):\n zf = zipfile.ZipFile(\"%s.zip\" % (dst), \"w\", zipfile.ZIP_DEFLATED)\n abs_src = os.path.abspath(src)\n for dirname, subdirs, files in os.walk(src):\n for filename in files:\n absname = os.path.abspath(os.path.join(dirname, filename))\n arcname = absname[len(abs_src) + 1:]\n zf.write(absname, arcname)\n zf.close()\n\n\ndef test_duplicate():\n dup_list = []\n list_of_files = glob.glob('*vcf')\n for line in list_of_files:\n line = re.sub(r'(.*)[_.].*', r'\\1', line)\n dup_list.append(line)\n # find duplicates in list\n duplicates = [k for k, v in Counter(dup_list).items() if v > 1]\n if len(duplicates) > 0:\n print(\"Duplicates Found: %s \" % duplicates)\n print(\"\\n***Error: Duplicate VCFs\")\n sys.exit(0)\n else:\n pass\n\n\ndef change_names(arg_options, genotype_codes):\n \n names_not_changed = []\n list_of_files = glob.glob('*vcf')\n name_found = False\n for filename in list_of_files:\n each_vcf = filename.replace(\"‐\", \"-\")\n vcf_pretext = re.sub(r'(.*?)[._].*', r'\\1', each_vcf) # ? was needed to make greedy, in my view the regex was searching right to left without it.\n vcf_pretext = vcf_pretext.rstrip()\n #Added '^' because h37 18-2397 was finding bovis 18-011018-2397, 2018-06-19\n myregex = re.compile('^' + vcf_pretext + '_.*') #underscore required to make myregex.search below greedy. so it finds exact match and not all matches. ex: 10-01 must match 10-01 not 10-010 also\n name_found = False\n try:\n prename = filename.replace(\".vcf\", \"\")\n foundname = genotype_codes[prename]\n name_found = True\n except KeyError:\n for key, value in genotype_codes.items():\n try:\n if myregex.search(key):\n name_found = True\n foundname = key.strip('_')\n except TypeError:\n pass\n if name_found:\n os.rename(filename, foundname + \".vcf\")\n print(\"Name Changed {} --> {}\" .format(filename, foundname + \".vcf\"))\n else:\n os.rename(filename, each_vcf)\n names_not_changed.append(each_vcf)\n print(\"File NOT Changed: {} --> {}\" .format(filename, each_vcf))\n names_not_changed = set(names_not_changed) # remove duplicates\n arg_options['names_not_changed'] = names_not_changed\n\n if arg_options['elite']:\n list_of_files = []\n list_of_files = glob.glob('*vcf')\n if not os.path.exists(\"temp_hold\"):\n print(\"making temp_hold directory\")\n os.makedirs(\"temp_hold\") # make all_vcfs if none exists\n for each_vcf in list_of_files:\n # Default 1 * 24 * 60 *60\n time_test = time.time() - os.path.getmtime(each_vcf) < (1 * 24 * 60 * 60) # 1day * (24*60*60)sec in day\n print(\"%s each_vcf\" % each_vcf)\n vcf_pretext = re.sub(r'(.*?)[._].*', r'\\1', each_vcf) # ? was needed to make greedy, in my view the regex was searching right to left without it.\n vcf_pretext = vcf_pretext.rstrip()\n myregex = re.compile(vcf_pretext + '.*')\n if time_test:\n print(\"time_test true %s\" % each_vcf)\n shutil.copy(each_vcf, \"temp_hold\")\n else:\n for k, v in genotype_codes.items():\n if myregex.search(k):\n try:\n print(\"##### %s\" % time_test)\n if v == \"Yes\": # if marked yes in column 2 of genotyping codes\n print(\"marked yes %s\" % each_vcf)\n shutil.copy(each_vcf, \"temp_hold\") # if \"Yes\" then moved to temp_hold\n else:\n print(\"file will be discarded %s\" % each_vcf)\n except FileNotFoundError:\n print(\"except FileNotFoundError %s\" % each_vcf)\n os.remove(each_vcf)\n shutil.rmtree('starting_files')\n os.makedirs('starting_files')\n os.renames('temp_hold', 'starting_files')\n list_of_files = glob.glob('starting_files/*vcf')\n file_number = len(list_of_files) # update the file_number to present on summary\n for each_vcf in list_of_files:\n shutil.copy(each_vcf, arg_options['root_dir'])\n print(file_number)\n\n return arg_options\n\n\ndef get_filters(arg_options):\n #get first header to apply all filters to vcf\n worksheet = pd.read_excel(arg_options['filter_file'])\n arg_options[\"first_column_header\"] = worksheet.dtypes.index[0]\n filter_dictionary = defaultdict(list) # key: group_name, values: expanded list\n wb = xlrd.open_workbook(arg_options['filter_file'])\n sheets = wb.sheet_names()\n for sheet in sheets:\n ws = wb.sheet_by_name(sheet)\n for colnum in range(ws.ncols): # for each column in worksheet\n group_name = ws.col_values(colnum)[0] # column header naming file\n mylist = ws.col_values(colnum)[1:] # list of each field in column, minus the header\n mylist = [x for x in mylist if x] # remove blank cells\n for value in mylist:\n value = str(value)\n value = value.replace(sheet + \"-\", '')\n if \"-\" not in value:\n value = int(float(value)) # change str to float to int\n filter_dictionary[group_name].append(str(sheet) + \"-\" + str(value))\n elif \"-\" in value:\n value = value.split(\"-\")\n for position in range(int(value[0]), int(value[1]) + 1):\n filter_dictionary[group_name].append(str(sheet) + \"-\" + str(position))\n return(filter_dictionary)\n\n\ndef get_read_mean(rec):\n mean_q = int(mean(rec.letter_annotations['phred_quality']))\n return mean_q\n\n\ndef find_filter_dict(each_vcf):\n dict_qual = {}\n dict_map = {}\n vcf_reader = vcf.Reader(open(each_vcf, 'r'))\n for record in vcf_reader:\n try:\n # Freebayes VCFs place MQ values are placed into a list. GATK as a float\n record.INFO['MQ'] = record.INFO['MQ'][0]\n except TypeError:\n pass\n except KeyError:\n pass\n absolute_positon = str(record.CHROM) + \"-\" + str(record.POS)\n try:\n returned_qual = []\n returned_map = []\n if int(record.QUAL) > 0:\n returned_qual.append(record.QUAL)\n returned_map.append(record.INFO['MQ'])\n dict_qual[absolute_positon] = returned_qual\n dict_map[absolute_positon] = returned_map\n except Exception:\n pass\n return dict_qual, dict_map\n\n\ndef find_positions(filename, arg_options):\n found_positions = {}\n vcf_reader = vcf.Reader(open(filename, 'r'))\n try:\n for record in vcf_reader:\n try:\n # Freebayes VCFs place MQ values are placed into a list. GATK as a float\n record.INFO['MQ'] = record.INFO['MQ'][0]\n except TypeError:\n pass\n except KeyError:\n pass\n chrom = record.CHROM\n position = record.POS\n absolute_positon = str(chrom) + \"-\" + str(position)\n # Usable positins are those that:\n # ADD PARAMETERS HERE TO CHANGE WHAT'S SNP WILL BE USED\n # IF NOT FOUND HERE THE SNP WILL BE IGNORED. WILL NOT BE REPRESENTED. HARD REMOVAL\n # parameters\n # str(record.ALT[0]) != \"None\" --> filter deletions\n # len(record.REF) == 1 --> filter bad ref call with 2 nt present\n # len(record.ALT[0]) == 1 --> filter bad alt call with 2 nt present\n # record.heterozygosity == 0.0 --> filter AC=1, heterozygosity.\n # record.QUAL > 150 --> filter poor quality\n # record.INFO['MQ'] --> filter low map quality\n try:\n if arg_options['species'] == 'flu':\n # use both AC=1 and AC=2 as valid position\n if str(record.ALT[0]) != \"None\" and len(record.REF) == 1 and record.QUAL > arg_options['qual_threshold']:\n found_positions.update({absolute_positon: record.REF})\n else:\n if str(record.ALT[0]) != \"None\" and record.INFO['AC'][0] == 2 and len(record.REF) == 1 and record.QUAL > arg_options['qual_threshold'] and record.INFO['MQ'] > 56:\n found_positions.update({absolute_positon: record.REF})\n except KeyError as ex:\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), filename, absolute_positon)\n pass\n except ZeroDivisionError as ex:\n pass\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), filename, absolute_positon)\n except ValueError as ex:\n pass\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), filename, absolute_positon)\n except UnboundLocalError as ex:\n pass\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), filename, absolute_positon)\n except TypeError as ex:\n pass\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), filename, absolute_positon)\n return found_positions\n\n\ndef check_label(chromesome_position, label, annotation_dict):\n chromesome, position = chromesome_position.split('-')\n pattern_check_list = []\n for gbk_chrome, pro in annotation_dict.items():\n if gbk_chrome == chromesome:\n try:\n aaa=pro.iloc[pro.index.get_loc(int(position))][['chrom', 'locus', 'product', 'gene']]\n chrom, name, locus, tag = aaa.values[0]\n pattern = re.compile(label, re.IGNORECASE)\n pattern_check = pattern.search(str(chrom) + str(position) + str(name) + str(locus) + str(tag))\n ind_annotation = (f\"{chrom} {position} {name} {locus} {tag}\")\n return pattern_check, ind_annotation\n except KeyError:\n return None\n\n\ndef get_snps(directory, arg_options):\n\n time_mark = datetime.fromtimestamp(time.time()).strftime('D%Y%m%d_%H%M')\n\n os.chdir(arg_options['root_dir'] + \"/\" + directory)\n print(\"\\n----------------------------\")\n print(\"\\nworking on: %s \" % directory)\n outdir = str(os.getcwd()) + \"/\"\n\n filter_dictionary = arg_options['filter_dictionary']\n first_column_header = arg_options[\"first_column_header\"]\n\n files = glob.glob('*vcf')\n all_positions = {}\n if arg_options['debug_call'] and not arg_options['get']:\n for i in files:\n found_positions = find_positions(i, arg_options)\n all_positions.update(found_positions)\n else:\n with futures.ProcessPoolExecutor() as pool:\n for found_positions in pool.map(find_positions, files, itertools_repeat(arg_options)):\n all_positions.update(found_positions)\n\n print(\"Directory %s found positions %s\" % (directory, len(all_positions)))\n presize = len(all_positions)\n\n # Filter applied to all positions\n if not arg_options['ignore_filters']: #and not arg_options['label']:\n try:\n for pos in filter_dictionary[first_column_header]: #filter_list\n all_positions.pop(pos, None)\n except KeyError as ex:\n # Allow keyerror if group is not represented in filter worksheet\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()), f'Not in filter worksheet: {first_column_header}')\n pass\n\n # Filter applied to group\n if not arg_options['ignore_filters']: #and not arg_options['label']:\n try:\n for pos in filter_dictionary[directory]: #filter_list\n all_positions.pop(pos, None)\n except KeyError as ex:\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()), f'Not in filter worksheet: {directory}')\n pass\n\n if arg_options['label']:\n filter_label_list = []\n label = arg_options['label']\n label = label.replace('/', '|')\n label = label.replace('-', '|')\n label = label.replace(' ', '|')\n label = label.replace(',', '|')\n label_filter = open(\"label_filter.txt\", 'w')\n print(f\"Annotation labels filtered: {label}\", file=label_filter)\n for chromesome_position in all_positions:\n try:\n pattern_check, ind_annotation = check_label(chromesome_position, label, arg_options['annotation_dict'])\n except TypeError:\n pass\n if pattern_check:\n print(f\"{chromesome_position} {ind_annotation}\", file=label_filter)\n filter_label_list.append(chromesome_position)\n for chromesome_position in filter_label_list:\n all_positions.pop(chromesome_position, None)\n label_filter.close()\n\n\n print(\"\\nDirectory: {}\" .format(directory))\n print(\"Total positions found: {}\" .format(presize))\n print(\"Possible positions filtered {}\" .format(len(filter_dictionary)))\n print(\"Positions after filtering {}\" .format(len(all_positions)))\n\n if arg_options['filter_finder']:\n #write to files\n positions_to_filter = \"positions_to_filter.txt\"\n positions_to_filter_details = \"positions_to_filter_details.txt\"\n good_snps = \"good_snps_details.txt\"\n write_out_positions = open(positions_to_filter, 'w')\n write_out_details = open(positions_to_filter_details, 'w')\n write_out_good_snps = open(good_snps, 'w')\n\n files = glob.glob('*vcf')\n\n #calculate mean/max qual and map at all possible positions\n dd_qual = {}\n dd_map = {}\n if arg_options['debug_call']:\n for each_vcf in files:\n print(\"working on: %s\" % each_vcf)\n dict_qual, dict_map = find_filter_dict(each_vcf)\n keys = set(dd_qual).union(dict_qual)\n no = []\n #make position (key) and qual/maps list (value)\n dd_qual = dict((k, dd_qual.get(k, no) + dict_qual.get(k, no)) for k in keys)\n keys = set(dd_map).union(dict_map)\n no = []\n dd_map = dict((k, dd_map.get(k, no) + dict_map.get(k, no)) for k in keys)\n else:\n with Pool(maxtasksperchild=4) as pool:\n for dict_qual, dict_map in pool.map(find_filter_dict, files, chunksize=8):\n keys = set(dd_qual).union(dict_qual)\n no = []\n dd_qual = dict((k, dd_qual.get(k, no) + dict_qual.get(k, no)) for k in keys)\n keys = set(dd_map).union(dict_map)\n no = []\n dd_map = dict((k, dd_map.get(k, no) + dict_map.get(k, no)) for k in keys)\n\n #dict_qual=dict((k, v) for k, v in dict_qual.items() if v)\n #dict_map=dict((k, v) for k, v in dict_map.items() if v)\n\n ave_qual = {}\n max_qual = {}\n for k, v in dd_qual.items():\n #only use if > 3 positions have been called\n if len(v) > 3:\n ave_qual[k] = np.mean(v)\n max_qual[k] = np.max(v)\n\n #provides dictionary as key -> absolute poisiton, value -> average qual/map\n ave_map = {}\n max_map = {}\n for k, v in dd_map.items():\n if len(v) > 3:\n ave_map[k] = np.mean(v)\n max_map[k] = np.max(v)\n\n # get all possible used positions\n all_maybe_filter = []\n for k in ave_qual.keys():\n all_maybe_filter.append(k)\n for k in max_qual.keys():\n all_maybe_filter.append(k)\n for k in ave_map.keys():\n all_maybe_filter.append(k)\n for k in max_map.keys():\n all_maybe_filter.append(k)\n # remove duplicates\n all_maybe_filter = list(set(all_maybe_filter))\n\n # Removing those already from all positions to filter\n if arg_options['filter_file']:\n for pos in filter_dictionary[first_column_header]: #filter_list\n try:\n all_maybe_filter.remove(pos)\n except ValueError:\n pass\n # Removing those already being filtered for specific group\n try:\n for pos in filter_dictionary[directory]: #filter_list\n try:\n all_maybe_filter.remove(pos)\n except ValueError:\n pass\n except KeyError as ex:\n pass\n # debug_log(ex, inspect.getframeinfo(inspect.currentframe()), f'Not in filter worksheet: {directory}')\n # for each possible posible position check if to filter.\n for absolute_positon in all_maybe_filter:\n ave_qual_value = ave_qual[absolute_positon]\n max_qual_value = max_qual[absolute_positon]\n ave_map_value = ave_map[absolute_positon]\n max_map_value = max_map[absolute_positon]\n print(\"%s, max_qual_value: %s, ave_qual_value: %s, max_map_value: %s, ave_map_value: %s\" % (absolute_positon, max_qual_value, ave_qual_value, max_map_value, ave_map_value))\n if max_qual_value < 1300 and ave_qual_value < 700 or ave_map_value < 56:\n print(\"%s, max_qual_value: %s, ave_qual_value: %s, max_map_value: %s, ave_map_value: %s\" % (absolute_positon, max_qual_value, ave_qual_value, max_map_value, ave_map_value), file=write_out_details)\n print(absolute_positon, file=write_out_positions)\n else:\n print(\"%s, max_qual_value: %s, ave_qual_value: %s, max_map_value: %s, ave_map_value: %s\" % (absolute_positon, max_qual_value, ave_qual_value, max_map_value, ave_map_value), file=write_out_good_snps)\n write_out_positions.close()\n write_out_details.close()\n write_out_good_snps.close()\n\n table_location = outdir + directory + \"-table.txt\"\n table = open(table_location, 'wt')\n\n # write absolute positions to table\n # order before adding to file to match with ordering of individual samples below\n # all_positions is abs_pos:REF\n all_positions = OrderedDict(sorted(all_positions.items()))\n # Add the positions to the table\n print(\"reference_pos\", end=\"\\t\", file=table)\n for k, v in all_positions.items():\n print(k, end=\"\\t\", file=table)\n print(\"\", file=table)\n\n list_of_files = glob.glob('*vcf')\n\n # for each vcf\n all_map_qualities = {}\n for file_name in list_of_files:\n sample_map_qualities = {}\n just_name = file_name.replace('.vcf', '')\n just_name = re.sub('\\..*', '*', just_name) # if after the .vcf is removed there is stilll a \".\" in the name it is assumed the name did not get changed\n print(just_name, end=\"\\t\", file=table)\n # for each line in vcf\n vcf_reader = vcf.Reader(open(file_name, 'r'))\n sample_dict = {}\n for record in vcf_reader:\n try:\n # Freebayes VCFs place MQ values are placed into a list. GATK as a float\n record.INFO['MQ'] = record.INFO['MQ'][0]\n except TypeError:\n pass\n except KeyError:\n pass\n record_position = str(record.CHROM) + \"-\" + str(record.POS)\n if record_position in all_positions:\n #print(\"############, %s, %s\" % (file_name, record_position))\n # NOT SURE THIS IS THE BEST PLACE TO CAPTURE MQ AVERAGE\n # MAY BE FASTER AFTER PARSIMONY SNPS ARE DECIDED, BUT THEN IT WILL REQUIRE OPENING THE FILES AGAIN.\n if str(record.ALT[0]) != \"None\" and str(record.INFO['MQ']) != \"nan\": #on rare occassions MQ gets called \"NaN\" thus passing a string when a number is expected when calculating average.\n #print(\"getting map quality: %s %s %s\" % (record.INFO['MQ'], file_name, str(record.POS)))\n sample_map_qualities.update({record_position: record.INFO['MQ']})\n # ADD PARAMETERS HERE TO CHANGE WHAT'S EACH VCF REPRESENTS.\n # SNP IS REPRESENTED IN TABLE, NOW HOW WILL THE VCF REPRESENT THE CALLED POSITION\n # str(record.ALT[0]) != \"None\", which means a deletion as ALT\n # not record.FILTER, or rather PASSED.\n # check record.QUAL\n # In GATK VCFs \"!= None\" not used.\n if str(record.ALT[0]) != \"None\" and len(record.ALT[0]) == 1 and record.INFO['AC'][0] == 2 and record.QUAL > arg_options['N_threshold']:\n sample_dict.update({record_position: record.ALT[0]})\n elif str(record.ALT[0]) != \"None\" and len(record.ALT[0]) == 1 and record.INFO['AC'][0] == 1 and int(record.QUAL) > arg_options['N_threshold']:\n ref_alt = str(record.ALT[0]) + str(record.REF[0])\n if ref_alt == \"AG\":\n sample_dict.update({record_position: \"R\"})\n elif ref_alt == \"CT\":\n sample_dict.update({record_position: \"Y\"})\n elif ref_alt == \"GC\":\n sample_dict.update({record_position: \"S\"})\n elif ref_alt == \"AT\":\n sample_dict.update({record_position: \"W\"})\n elif ref_alt == \"GT\":\n sample_dict.update({record_position: \"K\"})\n elif ref_alt == \"AC\":\n sample_dict.update({record_position: \"M\"})\n elif ref_alt == \"GA\":\n sample_dict.update({record_position: \"R\"})\n elif ref_alt == \"TC\":\n sample_dict.update({record_position: \"Y\"})\n elif ref_alt == \"CG\":\n sample_dict.update({record_position: \"S\"})\n elif ref_alt == \"TA\":\n sample_dict.update({record_position: \"W\"})\n elif ref_alt == \"TG\":\n sample_dict.update({record_position: \"K\"})\n elif ref_alt == \"CA\":\n sample_dict.update({record_position: \"M\"})\n else:\n sample_dict.update({record_position: \"N\"})\n # Poor calls\n elif str(record.ALT[0]) != \"None\" and int(record.QUAL) <= 50:\n sample_dict.update({record_position: record.REF[0]})\n elif str(record.ALT[0]) != \"None\" and int(record.QUAL) <= arg_options['N_threshold']:\n sample_dict.update({record_position: \"N\"})\n elif str(record.ALT[0]) != \"None\": #Insurance -- Will still report on a possible SNP even if missed with above statement\n sample_dict.update({record_position: str(record.REF[0])})\n elif str(record.ALT[0]) == \"None\":\n sample_dict.update({record_position: \"-\"})\n\n # After iterating through VCF combine dict to nested dict\n all_map_qualities.update({just_name: sample_map_qualities})\n\n # merge dictionaries and order\n merge_dict = {}\n merge_dict.update(all_positions) #abs_pos:REF\n merge_dict.update(sample_dict) # abs_pos:ALT replacing all_positions, because keys must be unique\n merge_dict = OrderedDict(sorted(merge_dict.items())) #OrderedDict of ('abs_pos', ALT_else_REF), looks like a list of lists\n for k, v in merge_dict.items():\n #print(\"k %s, v %s\" % (k, v))\n print(str(v) + \"\\t\", file=table, end=\"\")\n print(\"\", file=table) # sample printed to file\n table.close() #end of loop. All files done\n\n # Select parsimony informative SNPs\n mytable = pd.read_csv(table_location, sep='\\t')\n # drop NaN rows and columns\n mytable = mytable.dropna(axis=1)\n\n # SELECT PARISOMONY INFORMATIVE SNPSs\n # removes columns where all fields are the same\n parsimony = mytable.loc[:, (mytable != mytable.iloc[0]).any()]\n parsimony_positions = list(parsimony)\n #write over table (table_location) containing all snps\n parsimony.to_csv(table_location, sep=\"\\t\", index=False)\n table = open(table_location, 'a')\n # The reference calls are added after the parsimony positions are selected.\n # added corresponding reference to parsimony table\n print(\"reference_call\", end=\"\\t\", file=table)\n #all_positions_list=list(all_positions)\n try: #if there is only one file in the group exception is needed to return a value\n parsimony_positions.remove('reference_pos')\n except ValueError:\n samples_in_fasta = []\n return(samples_in_fasta)\n\n list_of_ref = []\n for abs_pos in parsimony_positions:\n list_of_ref.append(all_positions.get(abs_pos))\n string_of_ref = \"\\t\".join(list_of_ref)\n print(string_of_ref, file=table)\n table.close()\n\n samples_in_fasta = []\n #Print out fasta alignment file from table\n alignment_file = outdir + directory + \"_\" + time_mark + \".fasta\"\n write_out = open(alignment_file, 'wt')\n with open(table_location, 'rt') as f:\n count = 0\n for line in f:\n if count > 0:\n line = re.sub('^', '>', line)\n line = line.replace('reference_call', 'root')\n line = line.replace('\\t', '\\n', 1)\n samples_in_fasta.append(line.split('\\n')[0].replace('>', ''))\n line = line.replace('\\t', '')\n print(line, end=\"\", file=write_out)\n count = count + 1\n write_out.close()\n\n try: #if there are no SNP is the table\n mytable = pd.read_csv(table_location, sep='\\t')\n except:\n samples_in_fasta = []\n return(samples_in_fasta)\n\n # move reference to top row\n myref = mytable[-1:]\n myother = mytable[:-1]\n frames = [myref, myother]\n mytable = pd.concat(frames)\n mytable.to_csv(table_location, sep=\"\\t\", index=False)\n\n print(\"\\n%s table dimensions: %s\" % (directory, str(mytable.shape)))\n\n print(\"%s RAxML running...\" % directory)\n try:\n if arg_options['only_all_vcf']:\n os.system(\"{} -s {} -n raxml -m GTRCATI -o root -p 12345 -T {} > /dev/null 2>&1\" .format(arg_options['sys_raxml'], alignment_file, arg_options['cpu_count']))\n else:\n os.system(\"{} -s {} -n raxml -m GTRCATI -o root -p 12345 -T {} > /dev/null 2>&1\" .format(arg_options['sys_raxml'], alignment_file, arg_options['raxml_cpu']))\n except:\n logging.warning('RAxML failed')\n write_out = open('RAXML_FAILED', 'w+')\n write_out.close()\n pass\n try:\n ordered_list_from_tree = outdir + directory + \"-cleanedAlignment.txt\"\n write_out = open(ordered_list_from_tree, 'w+')\n print(\"reference_pos\", file=write_out)\n print(\"reference_call\", file=write_out)\n if os.path.isfile(\"RAxML_bestTree.raxml\"):\n with open(\"RAxML_bestTree.raxml\", 'rt') as f:\n for line in f:\n line = re.sub('[:,]', '\\n', line)\n line = re.sub('[)(]', '', line)\n line = re.sub('[0-9].*\\.[0-9].*\\n', '', line)\n line = re.sub('root\\n', '', line)\n write_out.write(line)\n best_raxml_tre = directory + \"_\" + time_mark + \"-RAxML-bestTree.tre\"\n os.rename(\"RAxML_bestTree.raxml\", best_raxml_tre)\n write_out.close()\n best_raxml_svg = directory + \"_\" + time_mark + \"-RAxML-bestTree.svg\"\n try:\n os.system(\"cat {} | nw_display -s -S -w 1300 -t -v 30 -i 'opacity:0' -b 'opacity:0' -l 'font-size:14;font-family:serif;font-style:italic' -d 'stroke-width:1;stroke:blue' - > {}\" .format(best_raxml_tre, best_raxml_svg)) #-s produces svg, -S suppress scale bar, -w to set the number of columns available for display, -t tab format, -v vertical spacing, -i inner node label, -b branch style\n except:\n logging.debug(f'{directory} nw_display failed, likely RAxML related, not enough samples in group to build tree')\n pass\n out_org = str(os.getcwd()) + \"/\" + directory + \"_\" + time_mark + \"-organized-table.txt\"\n out_sort = str(os.getcwd()) + \"/\" + directory + \"_\" + time_mark + \"-sorted-table.txt\"\n\n sort_table(table_location, ordered_list_from_tree, out_org) #function\n\n print(\"%s Getting map quality...\" % directory)\n average = lambda x: x.mean()\n all_map_qualities = pd.DataFrame(all_map_qualities)\n #ave_mq = Type: Series\n ave_mq = all_map_qualities.apply(average, axis=1)\n ave_mq = ave_mq.astype(int)\n ave_mq.to_csv('outfile.txt', sep='\\t', header='True') # write to csv\n\n write_out = open('map_quality.txt', 'w+')\n print('reference_pos\\tmap-quality', file=write_out)\n with open('outfile.txt', 'rt') as f:\n for line in f:\n write_out.write(line)\n write_out.close()\n #seemed pooling did not like a function with no parameters given\n quality = pd.read_csv('map_quality.txt', sep='\\t')\n\n mytable = pd.read_csv(table_location, sep='\\t')\n mytable = mytable.set_index('reference_pos')\n\n # order list is from tree file\n # gives order for samples to be listed in table to be phylogenetically correct\n ordered_list = []\n with open(ordered_list_from_tree) as infile:\n for i in infile:\n i = i.rstrip()\n ordered_list.append(i)\n # sinces this is set as the mytable index do not include in ordering\n ordered_list.remove('reference_pos')\n\n # reorder table based on order of list\n mytable = mytable.reindex(ordered_list)\n mytable.to_csv(table_location, sep='\\t')\n\n mytable_sort = pd.read_csv(table_location, sep='\\t') #sorted\n mytable_sort = mytable_sort.set_index('reference_pos') #sorted\n mytable_sort = mytable_sort.transpose() #sort\n mytable_sort.to_csv(out_sort, sep='\\t', index_label='reference_pos') #sort\n\n mytable = pd.read_csv(out_org, sep='\\t') #org\n mytable = mytable.set_index('reference_pos') #org\n mytable = mytable.transpose() #org\n mytable.to_csv(out_org, sep='\\t', index_label='reference_pos') #org\n\n if arg_options['gbk_file'] and not arg_options['no_annotation']:\n\n print(\"{} annotating from annotation dictionary... {}\" .format(directory, time_mark))\n mytable_sort = pd.read_csv(out_sort, sep='\\t') #sort\n mytable_sort = mytable_sort.merge(quality, on='reference_pos', how='inner') #sort\n mytable_sort.to_json('mytable_sort.json')\n\n annotation_dict = arg_options['annotation_dict']\n for gbk_chrome, pro in annotation_dict.items():\n ref_pos = mytable_sort[['reference_pos']]\n ref_pos = ref_pos.rename(columns={'index': 'reference_pos'})\n ref_pos = pd.DataFrame(ref_pos.reference_pos.str.split('-', expand=True).values, columns=['reference', 'position'])\n ref_pos = ref_pos[ref_pos['reference'] == gbk_chrome]\n\n write_out = open('annotations.csv', 'a')\n positions = ref_pos.position.to_frame()\n for index, row in positions.iterrows():\n pos = row.position\n try:\n aaa = pro.iloc[pro.index.get_loc(int(pos))][['chrom', 'locus', 'product', 'gene']]\n try:\n chrom, name, locus, tag = aaa.values[0]\n print(\"{}-{}\\t{}, {}, {}\".format(chrom, pos, locus, tag, name), file=write_out)\n except ValueError:\n # if only one annotation entire chromosome (such with flu) then having [0] fails\n chrom, name, locus, tag = aaa.values\n print(\"{}-{}\\t{}, {}, {}\".format(chrom, pos, locus, tag, name), file=write_out)\n except KeyError:\n print(\"{}-{}\\tNo annotated product\" .format(gbk_chrome, pos), file=write_out)\n write_out.close()\n\n annotations_df = pd.read_csv('annotations.csv', sep='\\t', header=None, names=['index', 'annotations'], index_col='index')\n\n annotations_df.index.names = ['reference_pos']\n mytable_sort = mytable_sort.set_index('reference_pos')\n annotations_df.index.names = ['reference_pos']\n mytable_sort = mytable_sort.merge(annotations_df, left_index=True, right_index=True)\n mytable_sort = mytable_sort.transpose() #sort\n mytable_sort.to_csv(out_sort, sep='\\t', index_label='reference_pos') #sort\n\n mytable_org = pd.read_csv(out_org, sep='\\t') #org\n mytable_org = mytable_org.merge(quality, on='reference_pos', how='inner') #org\n mytable_org = mytable_org.set_index('reference_pos')\n mytable_org = mytable_org.merge(annotations_df, left_index=True, right_index=True)\n mytable_org = mytable_org.transpose() #org\n mytable_org.to_csv(out_org, sep='\\t', index_label='reference_pos') #org\n\n else:\n print(\"No gbk file or no table to annotate\")\n mytable_sort = pd.read_csv(out_sort, sep='\\t') #sort\n mytable_sort = mytable_sort.merge(quality, on='reference_pos', how='inner') #sort\n mytable_sort = mytable_sort.set_index('reference_pos') #sort\n mytable_sort = mytable_sort.transpose() #sort\n mytable_sort.to_csv(out_sort, sep='\\t', index_label='reference_pos') #sort\n # add when no annotation\n with open(out_sort, 'rt') as f:\n line = f.readline()\n f.close()\n column_count = line.count('\\t') #sort\n column_count = column_count - 1 #sort\n #print(\"column_count: %s\" % column_count)\n with open(out_sort, 'at') as f:\n print(\"no_annotation\", end='', file=f)\n print('\\t' * column_count, file=f)\n f.close()\n\n print(\"No gbk file or no table to annotate\")\n mytable = pd.read_csv(out_org, sep='\\t') #org\n mytable = mytable.merge(quality, on='reference_pos', how='inner') #org\n mytable = mytable.set_index('reference_pos') #org\n mytable = mytable.transpose() #org\n mytable.to_csv(out_org, sep='\\t', index_label='reference_pos') #org\n # add when no annotation\n with open(out_org, 'rt') as f:\n line = f.readline()\n f.close()\n column_count = line.count('\\t')\n column_count = column_count - 1\n #print(\"column_count: %s\" % column_count)\n with open(out_org, 'at') as f:\n print(\"no_annotation\", end='', file=f)\n print('\\t' * column_count, file=f)\n f.close()\n\n excelwriter(out_sort) #***FUNCTION CALL #sort\n excelwriter(out_org) #***FUNCTION CALL #org\n\n for r in glob.glob('*vcf'):\n os.remove(r)\n\n except ValueError as ex:\n # warning_log(ex, inspect.getframeinfo(inspect.currentframe()), file_name, 'Possible table creation failure')\n return\n\n try:\n os.remove(ordered_list_from_tree)\n if arg_options['gbk_file']:\n os.remove(\"annotations.csv\")\n os.remove(\"outfile.txt\")\n os.remove(\"map_quality.txt\")\n os.remove(out_sort)\n os.remove(out_org) # organized.txt table\n os.remove(table_location) # unorganized table\n os.remove('RAxML_info.raxml')\n os.remove('RAxML_log.raxml')\n os.remove('RAxML_parsimonyTree.raxml')\n os.remove('RAxML_result.raxml')\n os.remove(directory + \"_\" + time_mark + '.fasta.reduced')\n\n except FileNotFoundError:\n pass\n\n # PANDA NOTES\n # get the index: mytable.index\n # get columns: mytable.columns\n # get a column: mytable.AF2122_NC002945_105651, shows index (sample names)\n # get a row: mytable.ix['reference'], shows columns (positions and SNPs)\n # values: mytable.values, SNPs - series\n # strip off the bottom row: mytable[:-1]\n # get the bottom row: mytable[-1:]\n\n with open(directory + \"_\" + time_mark + \"-samples_in_fasta.json\", 'w') as outfile:\n json.dump(samples_in_fasta, outfile)\n\n return(samples_in_fasta)\n\n\ndef sort_table(table_location, ordered, out_org):\n mytable = pd.read_csv(table_location, sep='\\t')\n #mytable=mytable.set_index('reference_pos')\n\n # order list is from tree file\n # gives order for samples to be listed in table to be phylogenetically correct\n ordered_list = []\n with open(ordered) as infile:\n for i in infile:\n i = i.rstrip()\n ordered_list.append(i)\n\n # Convert reference_pos-column to category and in set the ordered_list as categories hierarchy\n mytable.reference_pos = mytable.reference_pos.astype(\"category\")\n mytable.reference_pos.cat.set_categories(ordered_list, inplace=True)\n mytable = mytable.sort_values([\"reference_pos\"]) # 'sort' changed to 'sort_values'\n\n # count number of SNPs in each column\n snp_per_column = []\n for column_header in mytable:\n count = 0\n column = mytable[column_header]\n # for each element in the column\n for element in column:\n if element != column[0]:\n count = count + 1\n snp_per_column.append(count)\n #print(\"the count is: %s\" % count)\n row1 = pd.Series(snp_per_column, mytable.columns, name=\"snp_per_column\")\n #row1 = row1.drop('reference_pos')\n\n # get the snp count per column\n # for each column in the table\n snp_from_top = []\n for column_header in mytable:\n count = 0\n column = mytable[column_header]\n # for each element in the column\n # skip the first element\n for element in column[1:]:\n if element == column[0]:\n count = count + 1\n else:\n break\n snp_from_top.append(count)\n row2 = pd.Series(snp_from_top, mytable.columns, name=\"snp_from_top\")\n #row2 = row2.drop('reference_pos')\n\n mytable = mytable.append([row1])\n mytable = mytable.append([row2])\n #In pandas=0.18.1 even this does not work:\n # abc = row1.to_frame()\n # abc = abc.T --> mytable.shape (5, 18), abc.shape (1, 18)\n # mytable.append(abc)\n #Continue to get error: \"*** ValueError: all the input arrays must have same number of dimensions\"\n\n mytable = mytable.T\n mytable = mytable.sort_values(['snp_from_top', 'snp_per_column'], ascending=[True, False])\n mytable = mytable.T\n\n # remove snp_per_column and snp_from_top rows\n mytable = mytable[:-2]\n mytable.to_csv(out_org, sep='\\t', index=False)\n\n\ndef excelwriter(filename):\n orginal_name = filename\n filename = filename.replace(\".txt\", \".xlsx\")\n wb = xlsxwriter.Workbook(filename)\n ws = wb.add_worksheet(\"Sheet1\")\n with open(orginal_name, 'r') as csvfile:\n table = csv.reader(csvfile, delimiter='\\t')\n i = 0\n for row in table:\n ws.write_row(i, 0, row)\n i += 1\n\n col = len(row)\n col = col + 1\n #print(i, \"x\", col)\n\n formatA = wb.add_format({'bg_color': '#58FA82'})\n formatG = wb.add_format({'bg_color': '#F7FE2E'})\n formatC = wb.add_format({'bg_color': '#0000FF'})\n formatT = wb.add_format({'bg_color': '#FF0000'})\n formatnormal = wb.add_format({'bg_color': '#FDFEFE'})\n formatlowqual = wb.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})\n formathighqual = wb.add_format({'font_color': '#000000', 'bg_color': '#FDFEFE'})\n formatambigous = wb.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})\n formatN = wb.add_format({'bg_color': '#E2CFDD'})\n\n ws.conditional_format(i - 2, 1, i - 2, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 60, 'format': formathighqual})\n ws.conditional_format(i - 2, 1, i - 2, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 59, 'format': formathighqual})\n ws.conditional_format(i - 2, 1, i - 2, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 58, 'format': formathighqual})\n ws.conditional_format(i - 2, 1, i - 2, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 57, 'format': formathighqual})\n ws.conditional_format(i - 2, 1, i - 2, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 56, 'format': formathighqual})\n ws.conditional_format(i - 2, 1, i - 2, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 55, 'format': formathighqual})\n ws.conditional_format(i - 2, 1, i - 2, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 54, 'format': formathighqual})\n ws.conditional_format(i - 2, 1, i - 2, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 53, 'format': formathighqual})\n ws.conditional_format(i - 2, 1, i - 2, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 52, 'format': formathighqual})\n ws.conditional_format(i - 2, 1, i - 2, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 51, 'format': formathighqual})\n ws.conditional_format(i - 2, 1, i - 2, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 50, 'format': formathighqual})\n ws.conditional_format(i - 2, 1, i - 2, col - 2, {'type': 'text', 'criteria': 'not containing', 'value': 100, 'format': formatlowqual})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'cell', 'criteria': '==', 'value': 'B$2', 'format': formatnormal})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 'A', 'format': formatA})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 'G', 'format': formatG})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 'C', 'format': formatC})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 'T', 'format': formatT})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 'S', 'format': formatambigous})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 'Y', 'format': formatambigous})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 'R', 'format': formatambigous})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 'W', 'format': formatambigous})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 'K', 'format': formatambigous})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 'M', 'format': formatambigous})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'text', 'criteria': 'containing', 'value': 'N', 'format': formatN})\n ws.conditional_format(2, 1, i - 3, col - 2, {'type': 'text', 'criteria': 'containing', 'value': '-', 'format': formatN})\n\n ws.set_column(0, 0, 30)\n ws.set_column(1, col - 2, 2)\n ws.freeze_panes(2, 1)\n format_rotation = wb.add_format({'rotation': '90'})\n ws.set_row(0, 140, format_rotation)\n formatannotation = wb.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})\n #set last row\n ws.set_row(i - 1, 400, formatannotation)\n\n wb.close()\n"
]
| [
[
"numpy.max",
"pandas.DataFrame.from_dict",
"pandas.DataFrame",
"pandas.read_excel",
"numpy.mean",
"pandas.concat",
"pandas.IntervalIndex.from_arrays",
"pandas.read_csv",
"pandas.Series"
]
]
|
keiranrowan/bugle | [
"05c979296a852ee06512aae2897c30216a3cbb31"
]
| [
"bugle.py"
]
| [
"import pdfkit\nimport jinja2\nimport json\nimport argparse\nimport matplotlib.pyplot as plt\n\n\ndef main():\n # Initialize STDIN Args\n parser = argparse.ArgumentParser(description='Generate Custom Reports from Templates')\n parser.add_argument('template', help='Report template to render')\n parser.add_argument('data', help='Report data to parse')\n parser.add_argument('output', help='Name of generated report')\n parser.add_argument('-v', '--verbose', action='store_true', help='Output additional data at runtime')\n args = parser.parse_args()\n\n if args.verbose:\n print(args.template)\n print(args.data)\n\n # Generate Assets\n with open('data/' + args.data + '.json') as f:\n data = json.load(f)[0]\n assets = []\n i = 0\n if 'graphs' in data:\n for graph in data['graphs']:\n print(list(graph)[0])\n createGraph(data['graphs'][graph], graph)\n assets.append(graph)\n i += 1\n\n # Compile Template\n load = jinja2.FileSystemLoader(searchpath=\"./templates/\")\n templateEnv = jinja2.Environment(loader=load)\n TEMPLATE = args.template + '.rpt'\n template = templateEnv.get_template(TEMPLATE)\n with open('data/' + args.data + '.json') as f:\n data = json.load(f)[0]\n tempargs = {}\n tempargs['assets'] = './assets'\n\n # Load Asset Variables\n for item in assets:\n tempargs[item] = item\n if args.verbose:\n print(item)\n\n # Load Variables\n for index in data:\n tempargs[index] = data[index]\n if args.verbose:\n print(data[index])\n output = template.render(tempargs)\n outputHTML(args, output)\n f.close()\n\n outputPDF(args)\n\n\ndef outputHTML(args, output):\n html_file = open('reports/html/' + args.output + '.html', 'w')\n html_file.write(output)\n html_file.close()\n\n\ndef outputPDF(args):\n pdfkit.from_file('reports/html/' + args.output + '.html', 'reports/pdf/' + args.output + '.pdf')\n\n\ndef createGraph(data, name):\n if data['type'] == 'bar':\n title = data['title']\n xindex = data['x']\n yindex = data['y']\n xlabel = data['x-label']\n ylabel = data['y-label']\n\n chart = plt.figure()\n axis = chart.add_axes([0, 0, 1, 1])\n x = data[xindex]\n y = data[yindex]\n axis.bar(x, y)\n axis.set_title(title)\n axis.set_xlabel(xlabel)\n axis.set_ylabel(ylabel)\n chart.savefig('./reports/html/assets/' + name + '.png', dpi=600, bbox_inches='tight')\n elif data['type'] == 'line':\n title = data['title']\n lines = []\n labels = []\n for line in data['lines']:\n labels.append(line)\n lines.append(data[data['lines'][line]])\n\n print(lines)\n print(labels)\n xindex = data['x']\n xlabel = data['x-label']\n ylabel = data['y-label']\n\n chart = plt.figure()\n axis = chart.add_axes([0, 0, 1, 1])\n x = data[xindex]\n i = 0\n\n for line in lines:\n axis.plot(x, lines[i], label=labels[i])\n i += 1\n axis.set_title(title)\n axis.set_xlabel(xlabel)\n axis.set_ylabel(ylabel)\n chart.savefig('./reports/html/assets/' + name + '.png', dpi=600, bbox_inches='tight')\n # elif data['type'] == 'histogram':\n\n \nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"matplotlib.pyplot.figure"
]
]
|
Sologa/fairseq_extended | [
"ab9b67319c8fd1388a8b9d1df2c7d3f87976af50"
]
| [
"fairseq/checkpoint_utils.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport ast\nimport collections\nimport contextlib\nimport logging\nimport os\nimport re\nimport traceback\nfrom collections import OrderedDict\nfrom typing import Any, Dict, Optional, Union\n\nimport torch\nfrom fairseq.dataclass.configs import CheckpointConfig, FairseqConfig\nfrom fairseq.dataclass.utils import (\n convert_namespace_to_omegaconf,\n overwrite_args_by_name,\n)\nfrom fairseq.file_io import PathManager\nfrom fairseq.models import FairseqDecoder, FairseqEncoder\nfrom omegaconf import Container, DictConfig, open_dict, OmegaConf\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss):\n from fairseq import meters\n\n # only one worker should attempt to create the required dir\n if trainer.data_parallel_rank == 0:\n os.makedirs(cfg.save_dir, exist_ok=True)\n\n prev_best = getattr(save_checkpoint, \"best\", val_loss)\n if val_loss is not None:\n best_function = max if cfg.maximize_best_checkpoint_metric else min\n save_checkpoint.best = best_function(val_loss, prev_best)\n\n if cfg.no_save:\n return\n\n trainer.consolidate_optimizer() # TODO(SS): do we need this if no_save_optimizer_state\n\n if not trainer.should_save_checkpoint_on_current_rank:\n return\n\n write_timer = meters.StopwatchMeter()\n write_timer.start()\n\n epoch = epoch_itr.epoch\n end_of_epoch = epoch_itr.end_of_epoch()\n updates = trainer.get_num_updates()\n\n logger.info(f\"Preparing to save checkpoint for epoch {epoch} @ {updates} updates\")\n\n def is_better(a, b):\n return a >= b if cfg.maximize_best_checkpoint_metric else a <= b\n\n suffix = trainer.checkpoint_suffix\n checkpoint_conds = collections.OrderedDict()\n checkpoint_conds[\"checkpoint{}{}.pt\".format(epoch, suffix)] = (False)\n checkpoint_conds[\"checkpoint_{}_{}{}.pt\".format(epoch, updates, suffix)] = (False)\n# checkpoint_conds[\"checkpoint{}{}.pt\".format(epoch, suffix)] = (\n# end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0\n# )\n# checkpoint_conds[\"checkpoint_{}_{}{}.pt\".format(epoch, updates, suffix)] = (\n# not end_of_epoch\n# and cfg.save_interval_updates > 0\n# and updates % cfg.save_interval_updates == 0\n# )\n checkpoint_conds[\"checkpoint_best{}.pt\".format(suffix)] = val_loss is not None and (\n not hasattr(save_checkpoint, \"best\")\n or is_better(val_loss, save_checkpoint.best)\n )\n if val_loss is not None and cfg.keep_best_checkpoints > 0:\n checkpoint_conds[\n \"checkpoint.best_{}_{:.2f}.pt\".format(cfg.best_checkpoint_metric, val_loss)\n ] = not hasattr(save_checkpoint, \"best\") or is_better(\n val_loss, save_checkpoint.best\n )\n checkpoint_conds[\n \"checkpoint_last{}.pt\".format(suffix)\n ] = not cfg.no_last_checkpoints\n\n extra_state = {\"train_iterator\": epoch_itr.state_dict(), \"val_loss\": val_loss}\n if hasattr(save_checkpoint, \"best\"):\n extra_state.update({\"best\": save_checkpoint.best})\n\n checkpoints = [\n os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond\n ]\n if len(checkpoints) > 0:\n trainer.save_checkpoint(checkpoints[0], extra_state)\n for cp in checkpoints[1:]:\n if cfg.write_checkpoints_asynchronously:\n # TODO[ioPath]: Need to implement a delayed asynchronous\n # file copying/moving feature.\n logger.warning(\n f\"ioPath is not copying {checkpoints[0]} to {cp} \"\n \"since async write mode is on.\"\n )\n else:\n assert PathManager.copy(\n checkpoints[0], cp, overwrite=True\n ), f\"Failed to copy {checkpoints[0]} to {cp}\"\n\n write_timer.stop()\n logger.info(\n \"Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)\".format(\n checkpoints[0], epoch, updates, val_loss, write_timer.sum\n )\n )\n\n if not end_of_epoch and cfg.keep_interval_updates > 0:\n # remove old checkpoints; checkpoints are sorted in descending order\n checkpoints = checkpoint_paths(\n cfg.save_dir, pattern=r\"checkpoint_\\d+_(\\d+){}\\.pt\".format(suffix)\n )\n for old_chk in checkpoints[cfg.keep_interval_updates :]:\n if os.path.lexists(old_chk):\n os.remove(old_chk)\n\n if cfg.keep_last_epochs > 0:\n # remove old epoch checkpoints; checkpoints are sorted in descending order\n checkpoints = checkpoint_paths(cfg.save_dir, pattern=r\"checkpoint(\\d+){}\\.pt\".format(suffix))\n for old_chk in checkpoints[cfg.keep_last_epochs :]:\n if os.path.lexists(old_chk):\n os.remove(old_chk)\n\n if cfg.keep_best_checkpoints > 0:\n # only keep the best N checkpoints according to validation metric\n checkpoints = checkpoint_paths(\n cfg.save_dir,\n pattern=r\"checkpoint\\.best_{}_(\\d+\\.?\\d*){}\\.pt\".format(\n cfg.best_checkpoint_metric, suffix\n ),\n )\n if not cfg.maximize_best_checkpoint_metric:\n checkpoints = checkpoints[::-1]\n for old_chk in checkpoints[cfg.keep_best_checkpoints :]:\n if os.path.lexists(old_chk):\n os.remove(old_chk)\n\n\ndef load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args):\n \"\"\"\n Load a checkpoint and restore the training iterator.\n\n *passthrough_args* will be passed through to\n ``trainer.get_train_iterator``.\n \"\"\"\n\n reset_optimizer = cfg.reset_optimizer\n reset_lr_scheduler = cfg.reset_lr_scheduler\n optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides)\n reset_meters = cfg.reset_meters\n reset_dataloader = cfg.reset_dataloader\n\n if cfg.finetune_from_model is not None and (\n reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader\n ):\n raise ValueError(\n \"--finetune-from-model can not be set together with either --reset-optimizer\"\n \" or reset_lr_scheduler or reset_meters or reset_dataloader\"\n )\n\n suffix = trainer.checkpoint_suffix\n if (\n cfg.restore_file == \"checkpoint_last.pt\"\n ): # default value of restore_file is 'checkpoint_last.pt'\n checkpoint_path = os.path.join(\n cfg.save_dir, \"checkpoint_last{}.pt\".format(suffix)\n )\n first_launch = not PathManager.exists(checkpoint_path)\n if cfg.finetune_from_model is not None and first_launch:\n # if there is no last checkpoint to restore, start the finetune from pretrained model\n # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc.\n if PathManager.exists(cfg.finetune_from_model):\n checkpoint_path = cfg.finetune_from_model\n reset_optimizer = True\n reset_lr_scheduler = True\n reset_meters = True\n reset_dataloader = True\n logger.info(\n f\"loading pretrained model from {checkpoint_path}: \"\n \"optimizer, lr scheduler, meters, dataloader will be reset\"\n )\n else:\n raise ValueError(\n f\"--funetune-from-model {cfg.finetune_from_model} does not exist\"\n )\n elif suffix is not None:\n checkpoint_path = cfg.restore_file.replace(\".pt\", suffix + \".pt\")\n else:\n checkpoint_path = cfg.restore_file\n\n if cfg.restore_file != \"checkpoint_last.pt\" and cfg.finetune_from_model:\n raise ValueError(\n \"--finetune-from-model and --restore-file (non-default value) \"\n \"can not be specified together: \" + str(cfg)\n )\n\n extra_state = trainer.load_checkpoint(\n checkpoint_path,\n reset_optimizer,\n reset_lr_scheduler,\n optimizer_overrides,\n reset_meters=reset_meters,\n )\n\n if (\n extra_state is not None\n and \"best\" in extra_state\n and not reset_optimizer\n and not reset_meters\n ):\n save_checkpoint.best = extra_state[\"best\"]\n\n if extra_state is not None and not reset_dataloader:\n # restore iterator from checkpoint\n itr_state = extra_state[\"train_iterator\"]\n epoch_itr = trainer.get_train_iterator(\n epoch=itr_state[\"epoch\"], load_dataset=True, **passthrough_args\n )\n epoch_itr.load_state_dict(itr_state)\n else:\n epoch_itr = trainer.get_train_iterator(\n epoch=1, load_dataset=True, **passthrough_args\n )\n\n trainer.lr_step(epoch_itr.epoch)\n\n return extra_state, epoch_itr\n\n\ndef load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):\n \"\"\"Loads a checkpoint to CPU (with upgrading for backward compatibility).\n\n If doing single-GPU training or if the checkpoint is only being loaded by at\n most one process on each node (current default behavior is for only rank 0\n to read the checkpoint from disk), load_on_all_ranks should be False to\n avoid errors from torch.distributed not having been initialized or\n torch.distributed.barrier() hanging.\n\n If all processes on each node may be loading the checkpoint\n simultaneously, load_on_all_ranks should be set to True to avoid I/O\n conflicts.\n\n There's currently no support for > 1 but < all processes loading the\n checkpoint on each node.\n \"\"\"\n local_path = PathManager.get_local_path(path)\n # The locally cached file returned by get_local_path() may be stale for\n # remote files that are periodically updated/overwritten (ex:\n # checkpoint_last.pt) - so we remove the local copy, sync across processes\n # (if needed), and then download a fresh copy.\n if local_path != path and PathManager.path_requires_pathmanager(path):\n try:\n os.remove(local_path)\n except FileNotFoundError:\n # With potentially multiple processes removing the same file, the\n # file being missing is benign (missing_ok isn't available until\n # Python 3.8).\n pass\n if load_on_all_ranks:\n torch.distributed.barrier()\n local_path = PathManager.get_local_path(path)\n\n with open(local_path, \"rb\") as f:\n state = torch.load(f, map_location=torch.device(\"cpu\"))\n\n if \"args\" in state and state[\"args\"] is not None and arg_overrides is not None:\n args = state[\"args\"]\n for arg_name, arg_val in arg_overrides.items():\n setattr(args, arg_name, arg_val)\n\n if \"cfg\" in state and state[\"cfg\"] is not None:\n\n # hack to be able to set Namespace in dict config. this should be removed when we update to newer\n # omegaconf version that supports object flags, or when we migrate all existing models\n from omegaconf import _utils\n\n old_primitive = _utils.is_primitive_type\n _utils.is_primitive_type = lambda _: True\n\n state[\"cfg\"] = OmegaConf.create(state[\"cfg\"])\n\n _utils.is_primitive_type = old_primitive\n OmegaConf.set_struct(state[\"cfg\"], True)\n\n if arg_overrides is not None:\n overwrite_args_by_name(state[\"cfg\"], arg_overrides)\n\n state = _upgrade_state_dict(state)\n return state\n\n\ndef load_model_ensemble(\n filenames,\n arg_overrides: Optional[Dict[str, Any]] = None,\n task=None,\n strict=True,\n suffix=\"\",\n num_shards=1,\n state=None,\n):\n \"\"\"Loads an ensemble of models.\n\n Args:\n filenames (List[str]): checkpoint files to load\n arg_overrides (Dict[str,Any], optional): override model args that\n were used during model training\n task (fairseq.tasks.FairseqTask, optional): task to use for loading\n \"\"\"\n assert not (\n strict and num_shards > 1\n ), \"Cannot load state dict with strict=True and checkpoint shards > 1\"\n ensemble, args, _task = load_model_ensemble_and_task(\n filenames,\n arg_overrides,\n task,\n strict,\n suffix,\n num_shards,\n state,\n )\n return ensemble, args\n\n\ndef load_model_ensemble_and_task(\n filenames,\n arg_overrides: Optional[Dict[str, Any]] = None,\n task=None,\n strict=True,\n suffix=\"\",\n num_shards=1,\n state=None,\n):\n assert state is None or len(filenames) == 1\n\n from fairseq import tasks\n\n assert not (\n strict and num_shards > 1\n ), \"Cannot load state dict with strict=True and checkpoint shards > 1\"\n ensemble = []\n cfg = None\n for filename in filenames:\n orig_filename = filename\n assert num_shards > 0\n for shard_idx in range(num_shards):\n if num_shards == 1:\n filename = filename.replace(\".pt\", suffix + \".pt\")\n else:\n filename = orig_filename[:-3] + f\"_part{shard_idx}.pt\"\n\n if not PathManager.exists(filename):\n raise IOError(\"Model file not found: {}\".format(filename))\n if state is None:\n state = load_checkpoint_to_cpu(filename, arg_overrides)\n if \"args\" in state and state[\"args\"] is not None:\n cfg = convert_namespace_to_omegaconf(state[\"args\"])\n elif \"cfg\" in state and state[\"cfg\"] is not None:\n cfg = state[\"cfg\"]\n else:\n raise RuntimeError(\n f\"Neither args nor cfg exist in state keys = {state.keys()}\"\n )\n\n if task is None:\n task = tasks.setup_task(cfg.task)\n\n if \"task_state\" in state:\n task.load_state_dict(state[\"task_state\"])\n\n # build model for ensemble\n model = task.build_model(cfg.model)\n\n model.load_state_dict(state[\"model\"], strict=strict, model_cfg=cfg.model)\n\n # reset state so it gets loaded for the next model in ensemble\n state = None\n\n ensemble.append(model)\n return ensemble, cfg, task\n\n\ndef checkpoint_paths(path, pattern=r\"checkpoint(\\d+)\\.pt\"):\n \"\"\"Retrieves all checkpoints found in `path` directory.\n\n Checkpoints are identified by matching filename to the specified pattern. If\n the pattern contains groups, the result will be sorted by the first group in\n descending order.\n \"\"\"\n pt_regexp = re.compile(pattern)\n files = os.listdir(path)\n\n entries = []\n for i, f in enumerate(files):\n m = pt_regexp.fullmatch(f)\n if m is not None:\n idx = float(m.group(1)) if len(m.groups()) > 0 else i\n entries.append((idx, m.group(0)))\n return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]\n\n\ndef torch_persistent_save(obj, filename, async_write: bool = False):\n if async_write:\n with PathManager.opena(filename, \"wb\") as f:\n _torch_persistent_save(obj, f)\n else:\n if PathManager.supports_rename(filename):\n # do atomic save\n with PathManager.open(filename + \".tmp\", \"wb\") as f:\n _torch_persistent_save(obj, f)\n PathManager.rename(filename + \".tmp\", filename)\n else:\n # fallback to non-atomic save\n with PathManager.open(filename, \"wb\") as f:\n _torch_persistent_save(obj, f)\n\n\ndef _torch_persistent_save(obj, f):\n if isinstance(f, str):\n with PathManager.open(f, \"wb\") as h:\n torch_persistent_save(obj, h)\n return\n for i in range(3):\n try:\n return torch.save(obj, f)\n except Exception:\n if i == 2:\n logger.error(traceback.format_exc())\n\n\ndef _upgrade_state_dict(state):\n \"\"\"Helper for upgrading old model checkpoints.\"\"\"\n from fairseq import models, registry, tasks\n\n # add optimizer_history\n if \"optimizer_history\" not in state:\n state[\"optimizer_history\"] = [\n {\"criterion_name\": \"CrossEntropyCriterion\", \"best_loss\": state[\"best_loss\"]}\n ]\n state[\"last_optimizer_state\"] = state[\"optimizer\"]\n del state[\"optimizer\"]\n del state[\"best_loss\"]\n # move extra_state into sub-dictionary\n if \"epoch\" in state and \"extra_state\" not in state:\n state[\"extra_state\"] = {\n \"epoch\": state[\"epoch\"],\n \"batch_offset\": state[\"batch_offset\"],\n \"val_loss\": state[\"val_loss\"],\n }\n del state[\"epoch\"]\n del state[\"batch_offset\"]\n del state[\"val_loss\"]\n # reduce optimizer history's memory usage (only keep the last state)\n if \"optimizer\" in state[\"optimizer_history\"][-1]:\n state[\"last_optimizer_state\"] = state[\"optimizer_history\"][-1][\"optimizer\"]\n for optim_hist in state[\"optimizer_history\"]:\n del optim_hist[\"optimizer\"]\n # record the optimizer class name\n if \"optimizer_name\" not in state[\"optimizer_history\"][-1]:\n state[\"optimizer_history\"][-1][\"optimizer_name\"] = \"FairseqNAG\"\n # move best_loss into lr_scheduler_state\n if \"lr_scheduler_state\" not in state[\"optimizer_history\"][-1]:\n state[\"optimizer_history\"][-1][\"lr_scheduler_state\"] = {\n \"best\": state[\"optimizer_history\"][-1][\"best_loss\"]\n }\n del state[\"optimizer_history\"][-1][\"best_loss\"]\n # keep track of number of updates\n if \"num_updates\" not in state[\"optimizer_history\"][-1]:\n state[\"optimizer_history\"][-1][\"num_updates\"] = 0\n # old model checkpoints may not have separate source/target positions\n if \"args\" in state and hasattr(state[\"args\"], \"max_positions\") and not hasattr(\n state[\"args\"], \"max_source_positions\"\n ):\n state[\"args\"].max_source_positions = state[\"args\"].max_positions\n state[\"args\"].max_target_positions = state[\"args\"].max_positions\n # use stateful training data iterator\n if \"train_iterator\" not in state[\"extra_state\"]:\n state[\"extra_state\"][\"train_iterator\"] = {\n \"epoch\": state[\"extra_state\"][\"epoch\"],\n \"iterations_in_epoch\": state[\"extra_state\"].get(\"batch_offset\", 0),\n }\n\n # backward compatibility, cfg updates\n if \"args\" in state and state[\"args\"] is not None:\n # default to translation task\n if not hasattr(state[\"args\"], \"task\"):\n state[\"args\"].task = \"translation\"\n # --raw-text and --lazy-load are deprecated\n if getattr(state[\"args\"], \"raw_text\", False):\n state[\"args\"].dataset_impl = \"raw\"\n elif getattr(state[\"args\"], \"lazy_load\", False):\n state[\"args\"].dataset_impl = \"lazy\"\n # epochs start at 1\n if state[\"extra_state\"][\"train_iterator\"] is not None:\n state[\"extra_state\"][\"train_iterator\"][\"epoch\"] = max(\n state[\"extra_state\"][\"train_iterator\"].get(\"epoch\", 1), 1\n )\n # --remove-bpe ==> --postprocess\n if hasattr(state[\"args\"], \"remove_bpe\"):\n state[\"args\"].post_process = state[\"args\"].remove_bpe\n # --min-lr ==> --stop-min-lr\n if hasattr(state[\"args\"], \"min_lr\"):\n state[\"args\"].stop_min_lr = state[\"args\"].min_lr\n del state[\"args\"].min_lr\n # binary_cross_entropy => wav2vec criterion\n if (\n hasattr(state[\"args\"], \"criterion\")\n and state[\"args\"].criterion == \"binary_cross_entropy\"\n ):\n state[\"args\"].criterion = \"wav2vec\"\n # speech_pretraining => audio pretraining\n if (\n hasattr(state[\"args\"], \"task\")\n and state[\"args\"].task == \"speech_pretraining\"\n ):\n state[\"args\"].task = \"audio_pretraining\"\n # audio_cpc => wav2vec\n if hasattr(state[\"args\"], \"arch\") and state[\"args\"].arch == \"audio_cpc\":\n state[\"args\"].arch = \"wav2vec\"\n # convert legacy float learning rate to List[float]\n if hasattr(state[\"args\"], \"lr\") and isinstance(state[\"args\"].lr, float):\n state[\"args\"].lr = [state[\"args\"].lr]\n # convert task data arg to a string instead of List[string]\n if (\n hasattr(state[\"args\"], \"data\")\n and isinstance(state[\"args\"].data, list)\n and len(state[\"args\"].data) > 0\n ):\n state[\"args\"].data = state[\"args\"].data[0]\n\n state[\"cfg\"] = convert_namespace_to_omegaconf(state[\"args\"])\n\n if \"cfg\" in state and state[\"cfg\"] is not None:\n cfg = state[\"cfg\"]\n with open_dict(cfg):\n # any upgrades for Hydra-based configs\n if (\n \"task\" in cfg\n and \"eval_wer_config\" in cfg.task\n and isinstance(cfg.task.eval_wer_config.print_alignment, bool)\n ):\n cfg.task.eval_wer_config.print_alignment = \"hard\"\n if \"generation\" in cfg and isinstance(cfg.generation.print_alignment, bool):\n cfg.generation.print_alignment = \"hard\"\n if (\n \"model\" in cfg\n and \"w2v_args\" in cfg.model\n and cfg.model.w2v_args is not None\n and (\n hasattr(cfg.model.w2v_args, \"task\") or \"task\" in cfg.model.w2v_args\n )\n and isinstance(\n cfg.model.w2v_args.task.eval_wer_config.print_alignment, bool\n )\n ):\n cfg.model.w2v_args.task.eval_wer_config.print_alignment = \"hard\"\n\n return state\n\n\ndef prune_state_dict(state_dict, model_cfg: Optional[DictConfig]):\n \"\"\"Prune the given state_dict if desired for LayerDrop\n (https://arxiv.org/abs/1909.11556).\n\n Training with LayerDrop allows models to be robust to pruning at inference\n time. This function prunes state_dict to allow smaller models to be loaded\n from a larger model and re-maps the existing state_dict for this to occur.\n\n It's called by functions that load models from checkpoints and does not\n need to be called directly.\n \"\"\"\n arch = None\n if model_cfg is not None:\n arch = (\n model_cfg._name\n if isinstance(model_cfg, DictConfig)\n else getattr(model_cfg, \"arch\", None)\n )\n\n if not model_cfg or arch is None or arch == \"ptt_transformer\":\n # args should not be none, but don't crash if it is.\n return state_dict\n\n encoder_layers_to_keep = getattr(model_cfg, \"encoder_layers_to_keep\", None)\n decoder_layers_to_keep = getattr(model_cfg, \"decoder_layers_to_keep\", None)\n\n if not encoder_layers_to_keep and not decoder_layers_to_keep:\n return state_dict\n\n # apply pruning\n logger.info(\n \"Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop\"\n )\n\n def create_pruning_pass(layers_to_keep, layer_name):\n keep_layers = sorted(\n int(layer_string) for layer_string in layers_to_keep.split(\",\")\n )\n mapping_dict = {}\n for i in range(len(keep_layers)):\n mapping_dict[str(keep_layers[i])] = str(i)\n\n regex = re.compile(r\"^{layer}.*\\.layers\\.(\\d+)\".format(layer=layer_name))\n return {\"substitution_regex\": regex, \"mapping_dict\": mapping_dict}\n\n pruning_passes = []\n if encoder_layers_to_keep:\n pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, \"encoder\"))\n if decoder_layers_to_keep:\n pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, \"decoder\"))\n\n new_state_dict = {}\n for layer_name in state_dict.keys():\n match = re.search(r\"\\.layers\\.(\\d+)\\.\", layer_name)\n # if layer has no number in it, it is a supporting layer, such as an\n # embedding\n if not match:\n new_state_dict[layer_name] = state_dict[layer_name]\n continue\n\n # otherwise, layer should be pruned.\n original_layer_number = match.group(1)\n # figure out which mapping dict to replace from\n for pruning_pass in pruning_passes:\n if original_layer_number in pruning_pass[\"mapping_dict\"] and pruning_pass[\n \"substitution_regex\"\n ].search(layer_name):\n new_layer_number = pruning_pass[\"mapping_dict\"][original_layer_number]\n substitution_match = pruning_pass[\"substitution_regex\"].search(\n layer_name\n )\n new_state_key = (\n layer_name[: substitution_match.start(1)]\n + new_layer_number\n + layer_name[substitution_match.end(1) :]\n )\n new_state_dict[new_state_key] = state_dict[layer_name]\n\n # Since layers are now pruned, *_layers_to_keep are no longer needed.\n # This is more of \"It would make it work fix\" rather than a proper fix.\n if isinstance(model_cfg, DictConfig):\n context = open_dict(model_cfg)\n else:\n context = contextlib.ExitStack()\n with context:\n if hasattr(model_cfg, \"encoder_layers_to_keep\"):\n model_cfg.encoder_layers_to_keep = None\n if hasattr(model_cfg, \"decoder_layers_to_keep\"):\n model_cfg.decoder_layers_to_keep = None\n\n return new_state_dict\n\n\ndef load_pretrained_component_from_model(\n component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str\n):\n \"\"\"\n Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the\n provided `component` object. If state_dict fails to load, there may be a\n mismatch in the architecture of the corresponding `component` found in the\n `checkpoint` file.\n \"\"\"\n if not PathManager.exists(checkpoint):\n raise IOError(\"Model file not found: {}\".format(checkpoint))\n state = load_checkpoint_to_cpu(checkpoint)\n if isinstance(component, FairseqEncoder):\n component_type = \"encoder\"\n elif isinstance(component, FairseqDecoder):\n component_type = \"decoder\"\n else:\n raise ValueError(\n \"component to load must be either a FairseqEncoder or \"\n \"FairseqDecoder. Loading other component types are not supported.\"\n )\n component_state_dict = OrderedDict()\n for key in state[\"model\"].keys():\n if key.startswith(component_type):\n # encoder.input_layers.0.0.weight --> input_layers.0.0.weight\n component_subkey = key[len(component_type) + 1 :]\n component_state_dict[component_subkey] = state[\"model\"][key]\n component.load_state_dict(component_state_dict, strict=True)\n return component\n\n\ndef verify_checkpoint_directory(save_dir: str) -> None:\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n temp_file_path = os.path.join(save_dir, \"dummy\")\n try:\n with open(temp_file_path, \"w\"):\n pass\n except OSError as e:\n logger.warning(\n \"Unable to access checkpoint save directory: {}\".format(save_dir)\n )\n raise e\n else:\n os.remove(temp_file_path)\n"
]
| [
[
"torch.device",
"torch.save",
"torch.distributed.barrier"
]
]
|
peterhabib/BRCAI | [
"c242c662d623010dfb66c9afcb55aeb9cd6c63ed"
]
| [
"Scripts/common_scaler.py"
]
| [
"import pickle\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\nimport Scripts\nfrom Scripts import common\n\n\n# Get Scaler\nx_scaler_file_name = common.root_model_folder + 'x_scaler.sav'\ny_scaler_file_name = common.root_model_folder + 'y_scaler.sav'\n\ndef create_scaler(x_values, y_values):\n x_scaler = StandardScaler()\n x_scaler.fit(x_values)\n\n y_scaler = StandardScaler()\n y_scaler.fit(y_values)\n\n # Save scalers state for later use\n pickle.dump(x_scaler, open(x_scaler_file_name, 'wb'))\n pickle.dump(y_scaler, open(y_scaler_file_name, 'wb'))\n\n return x_scaler, y_scaler\n\ndef load_scaler():\n x_scaler = pickle.load(open(x_scaler_file_name, 'rb'))\n y_scaler = pickle.load(open(y_scaler_file_name, 'rb'))\n\n return x_scaler, y_scaler\n\ndef scale_dataset(x_source, x_scaler, y_source = None, y_scaler = None):\n scaled_columns = x_scaler.transform(x_source.values[:,0:4])\n arr_x_train = np.concatenate(( scaled_columns, x_source.values[:,5:]), axis=1)\n\n if y_source is not None:\n arr_y_train = y_scaler.transform(y_source.values)\n else:\n arr_y_train = None\n\n return arr_x_train, arr_y_train\n"
]
| [
[
"numpy.concatenate",
"sklearn.preprocessing.StandardScaler"
]
]
|
markm42/plancklens | [
"3d8830d39d52036e318d89ac1fc42083df2c528c"
]
| [
"plancklens/qcinv/opfilt_kk.py"
]
| [
"\"\"\"lending map Wiener and inverse variance filtering module.\n\nThis is literally the very same spin-0 inverse variance filtering codes than for temperatures,\nwith indices 'tt' replaced with 'pp' and potential to k remapping\n\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport hashlib\nimport numpy as np\nimport healpy as hp\n\nfrom healpy import alm2map, map2alm\n#: Exporting these two methods so that they can be easily customized / optimized.\n\nfrom plancklens.utils import clhash, enumerate_progress\n\nfrom . import util\nfrom . import template_removal\nfrom . import dense\n\ndef _cli(cl):\n ret = np.zeros_like(cl)\n ret[np.where(cl != 0.)] = 1. / cl[np.where(cl != 0.)]\n return ret\n\ndef p2k(lmax):\n return 0.5 * np.arange(lmax + 1) * np.arange(1, lmax + 2, dtype=float)\ndef pp2kk(lmax):\n return p2k(lmax) ** 2\n\n\ndef calc_prep(m, s_cls, n_inv_filt):\n \"\"\"Missing doc.\"\"\"\n kmap = np.copy(m)\n n_inv_filt.apply_map(kmap)\n alm = map2alm(kmap, lmax=len(n_inv_filt.b_transf) - 1, iter=0)\n hp.almxfl(alm, n_inv_filt.b_transf * (len(m) / (4. * np.pi)), inplace=True)\n return alm\n\ndef apply_fini(alm, s_cls, n_inv_filt):\n \"\"\" This final operation turns the Wiener-filtered klm cg-solution to the inverse-variance filtered klm. \"\"\"\n hp.almxfl(alm, _cli(s_cls['pp'] * pp2kk(len(s_cls['pp']) - 1)), inplace=True)\n\nclass dot_op:\n \"\"\"Scalar product definition for kk cg-inversion\n\n \"\"\"\n def __init__(self):\n pass\n\n def __call__(self, alm1, alm2):\n lmax1 = hp.Alm.getlmax(alm1.size)\n assert lmax1 == hp.Alm.getlmax(alm2.size)\n return np.sum(hp.alm2cl(alm1, alms2=alm2) * (2. * np.arange(0, lmax1 + 1) + 1))\n\n\nclass fwd_op:\n \"\"\"Conjugate-gradient inversion forward operation definition. \"\"\"\n def __init__(self, s_cls, n_inv_filt):\n self.clkk_inv = _cli(s_cls['pp'] * pp2kk(len(s_cls['pp']) - 1))\n self.n_inv_filt = n_inv_filt\n\n def hashdict(self):\n return {'clkk_inv': clhash(self.clkk_inv),\n 'n_inv_filt': self.n_inv_filt.hashdict()}\n\n def __call__(self, klm):\n return self.calc(klm)\n\n def calc(self, klm):\n if np.all(klm == 0): # do nothing if zero\n return klm\n alm = np.copy(klm)\n self.n_inv_filt.apply_alm(alm)\n alm += hp.almxfl(klm, self.clkk_inv)\n return alm\n\n\nclass pre_op_diag:\n def __init__(self, s_cls, n_inv_filt):\n \"\"\"Harmonic space diagonal pre-conditioner operation. \"\"\"\n clkk = pp2kk(len(s_cls['pp']) - 1) * s_cls['pp']\n assert len(clkk) >= len(n_inv_filt.b_transf)\n n_inv_cl = np.sum(n_inv_filt.n_inv) / (4.0 * np.pi)\n lmax = len(n_inv_filt.b_transf) - 1\n assert lmax <= (len(clkk) - 1)\n\n filt = _cli(clkk[:lmax + 1])\n filt += n_inv_cl * n_inv_filt.b_transf[:lmax + 1] ** 2\n self.filt = _cli(filt)\n\n def __call__(self, klm):\n return self.calc(klm)\n\n def calc(self, klm):\n return hp.almxfl(klm, self.filt)\n\ndef pre_op_dense(lmax, fwd_op, cache_fname=None):\n \"\"\"Missing doc. \"\"\"\n return dense.pre_op_dense_kk(lmax, fwd_op, cache_fname=cache_fname)\n\nclass alm_filter_ninv(object):\n \"\"\"Missing doc. \"\"\"\n def __init__(self, n_inv, b_transf,\n marge_monopole=False, marge_dipole=False, marge_uptolmin=-1, marge_maps=(), nlev_fkl=None):\n if isinstance(n_inv, list):\n n_inv_prod = util.load_map(n_inv[0])\n if len(n_inv) > 1:\n for n in n_inv[1:]:\n n_inv_prod = n_inv_prod * util.load_map(n)\n n_inv = n_inv_prod\n else:\n n_inv = util.load_map(n_inv)\n print(\"opfilt_kk: inverse noise map std dev / av = %.3e\" % (\n np.std(n_inv[np.where(n_inv != 0.0)]) / np.average(n_inv[np.where(n_inv != 0.0)])))\n templates = []\n templates_hash = []\n for kmap in [util.load_map(m) for m in marge_maps]:\n assert (len(n_inv) == len(kmap))\n templates.append(template_removal.template_map(kmap))\n templates_hash.append(hashlib.sha1(kmap.view(np.uint8)).hexdigest())\n\n if marge_uptolmin >= 0:\n templates.append(template_removal.template_uptolmin(marge_uptolmin))\n else:\n if marge_monopole: templates.append(template_removal.template_monopole())\n if marge_dipole: templates.append(template_removal.template_dipole())\n\n if len(templates) != 0:\n nmodes = int(np.sum([t.nmodes for t in templates]))\n modes_idx_t = np.concatenate(([t.nmodes * [int(im)] for im, t in enumerate(templates)]))\n modes_idx_i = np.concatenate(([range(0, t.nmodes) for t in templates]))\n Pt_Nn1_P = np.zeros((nmodes, nmodes))\n for i, ir in enumerate_progress(range(nmodes), label='filling template (%s) projection matrix'%nmodes):\n kmap = np.copy(n_inv)\n templates[modes_idx_t[ir]].apply_mode(kmap, int(modes_idx_i[ir]))\n\n ic = 0\n for tc in templates[0:modes_idx_t[ir] + 1]:\n Pt_Nn1_P[ir, ic:(ic + tc.nmodes)] = tc.dot(kmap)\n Pt_Nn1_P[ic:(ic + tc.nmodes), ir] = Pt_Nn1_P[ir, ic:(ic + tc.nmodes)]\n ic += tc.nmodes\n eigv, eigw = np.linalg.eigh(Pt_Nn1_P)\n eigv_inv = 1.0 / eigv\n self.Pt_Nn1_P_inv = np.dot(np.dot(eigw, np.diag(eigv_inv)), np.transpose(eigw))\n\n self.n_inv = n_inv\n self.b_transf = b_transf\n self.npix = len(self.n_inv)\n\n self.nside = hp.npix2nside(self.npix)\n self.marge_monopole = marge_monopole\n self.marge_dipole = marge_dipole\n self.marge_uptolmin = marge_uptolmin\n self.templates = templates\n self.templates_hash = templates_hash\n\n if nlev_fkl is None:\n nlev_fkl = 10800. / np.sqrt(np.sum(self.n_inv) / (4.0 * np.pi)) / np.pi\n self.nlev_fkl = nlev_fkl\n print(\"ninv_fkl: using %.2e uK-amin noise Cl\"%self.nlev_fkl)\n\n def hashdict(self):\n return {'n_inv': clhash(self.n_inv),\n 'b_transf': clhash(self.b_transf),\n 'marge_monopole': self.marge_monopole,\n 'marge_dipole': self.marge_dipole,\n 'templates_hash': self.templates_hash,\n 'marge_uptolmin': self.marge_uptolmin}\n\n def get_fkl(self):\n return self.b_transf ** 2 / (self.nlev_fkl / 60. /180. * np.pi) ** 2\n\n\n def degrade(self, nside):\n \"\"\"Missing doc. \"\"\"\n if nside == hp.npix2nside(len(self.n_inv)):\n return self\n else:\n print(\"DEGRADING WITH NO MARGE MAPS\")\n marge_maps = []\n return alm_filter_ninv(hp.ud_grade(self.n_inv, nside, power=-2), self.b_transf,\n marge_monopole=self.marge_monopole, marge_dipole= self.marge_dipole,\n marge_uptolmin=self.marge_uptolmin, marge_maps=marge_maps)\n\n def apply_alm(self, alm):\n \"\"\"Missing doc. \"\"\"\n npix = len(self.n_inv)\n hp.almxfl(alm, self.b_transf, inplace=True)\n kmap = alm2map(alm, hp.npix2nside(npix), verbose=False)\n self.apply_map(kmap)\n alm[:] = map2alm(kmap, lmax=hp.Alm.getlmax(alm.size), iter=0)\n hp.almxfl(alm, self.b_transf * (npix / (4. * np.pi)), inplace=True)\n\n\n def apply_map(self, kmap):\n \"\"\"Missing doc. \"\"\"\n kmap *= self.n_inv\n if len(self.templates) != 0:\n coeffs = np.concatenate(([t.dot(kmap) for t in self.templates]))\n coeffs = np.dot(self.Pt_Nn1_P_inv, coeffs)\n pmodes = np.zeros(len(self.n_inv))\n im = 0\n for t in self.templates:\n t.accum(pmodes, coeffs[im:(im + t.nmodes)])\n im += t.nmodes\n pmodes *= self.n_inv\n kmap -= pmodes"
]
| [
[
"numpy.zeros_like",
"numpy.dot",
"numpy.zeros",
"numpy.sum",
"numpy.copy",
"numpy.linalg.eigh",
"numpy.where",
"numpy.arange",
"numpy.transpose",
"numpy.all",
"numpy.diag"
]
]
|
swaroop9ai9/Fintech | [
"a101092634bc04fdc56feb36f54552960ae64767"
]
| [
"stock_tweet_polarity/stock_tweet_polarity.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"Stock Tweet Polarity\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1y7mnum01SwBEvcCXkV4rpLAJpX9WTLXQ\n\nCreated on Sun June 7 20:54:36 2020\n@author: Sm\n\"\"\"\n\nfrom google.colab import drive\nfrom google.colab import files\ndrive.mount('/content/gdrive')\n\nclass Tweet(object):\n def __init__(self, content, polarity):\n self.content = content\n self.polarity = polarity\n\n#!pip3 install nltk\nimport nltk\nnltk.download('punkt')\n\n# Stock investment recommendations based on Machine Learning predictions from last year's values of any market symbol \n# and also based on Twitter sentiment analysis from retrieved tweets containing the symbol as text inside of it.\n#!pip3 install tweepy\n#!pip3 install textblob\n#!pip3 install tweet\n#!pip3 install fix_yahoo_finance\n#!pip3 install constants\n#!pip3 install nsepy\nimport datetime as dt\nimport math\n\nimport fix_yahoo_finance as yf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tweepy\nfrom matplotlib import style\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom textblob import TextBlob\n# import nsepy\n\nstyle.use('ggplot')\n\nconsumer_key = 'fICIFmImYjp8Bvv7YoHHFe08G'\nconsumer_secret = 'DwOHmSgPje3QSFcJCj8uiqyVCRuLzvXe7cwGb1CGu4LDcCncfw'\naccess_token = '1249705548724842497-zFKFrZrHAqZiEP0ImPrubYhO9Ztrqw'\naccess_token_secret = 'XUYDqOb0JARbTVGM7TNsY0adSSXDJKlU0w48BO3bPvJSH'\n\nnum_of_tweets = 1000\n\n'''\nTo check if the stock symbol introduced via prompt is valid (or if it exists), we can check it using 'companylist.csv'.\n'''\ndef check_stock_symbol(flag=False, companies_file='companylist.csv'):\n df = pd.read_csv('gdrive/My Drive/DATA/company_list/companylist.csv', usecols=[0])\n dfind = pd.read_csv('gdrive/My Drive/DATA/company_list/ind_nifty500list.csv')\n while flag is False:\n symbol = input('Enter a stock symbol to retrieve data from: ').upper()\n if symbol in df['Symbol']:\n print('yes')\n for index in range(len(df)):\n if df['Symbol'][index] == symbol:\n flag = True\n for index in range(len(dfind)):\n if dfind['Symbol'][index] == symbol:\n flag = True\n return flag, symbol\n\n'''\nNow, the next step is to create the Pandas DataFrame of the introduced symbol stock market values from the last year from now\n'''\ndef get_stock_data(symbol, from_date, to_date):\n data = yf.download(symbol, start=from_date, end=to_date)\n df = pd.DataFrame(data=data)\n\n df = df[['Open', 'High', 'Low', 'Close', 'Volume']]\n df['HighLoad'] = (df['High'] - df['Close']) / df['Close'] * 100.0\n df['Change'] = (df['Close'] - df['Open']) / df['Open'] * 100.0\n\n df = df[['Close', 'HighLoad', 'Change', 'Volume']]\n return df\n\n''' Splitting the Dataset and using Linear Regression to predict'''\ndef stock_forecasting(df):\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.1*len(df)))\n df['Label'] = df[[forecast_col]].shift(-forecast_out)\n\n X = np.array(df.drop(['Label'], axis=1))\n X = preprocessing.scale(X)\n X_forecast = X[-forecast_out:]\n X = X[:-forecast_out]\n\n df.dropna(inplace=True)\n y = np.array(df['Label'])\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.7)\n\n clf = LinearRegression(n_jobs=-1)\n clf.fit(X_train, y_train)\n accuracy = clf.score(X_test, y_test)\n forecast = clf.predict(X_forecast)\n\n df['Prediction'] = np.nan\n\n last_date = df.iloc[-1].name\n last_date = dt.datetime.strptime(str(last_date), \"%Y-%m-%d %H:%M:%S\")\n\n for pred in forecast:\n last_date += dt.timedelta(days=1)\n df.loc[last_date.strftime(\"%Y-%m-%d\")] = [np.nan for _ in range(len(df.columns) - 1)] + [pred]\n return df, forecast_out\n\ndef forecast_plot(df):\n plt.figure(figsize=(40,15))\n df['Close'].plot(color='black')\n df['Prediction'].plot(color='green')\n plt.legend(loc=4)\n plt.xlabel('Date')\n plt.ylabel('Price')\n plt.show()\n\n'''\n the Twitter sentiment analysis starts and it retrieves a list of the last 10000 tweets posted in english containing the\n symbol introduced and they are later stored in a list of Tweet class, defined in Tweet.py with the tweet's text and polarity \n from TextBlob. '''\ndef retrieving_tweets_polarity(symbol):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n user = tweepy.API(auth)\n tweets = tweepy.Cursor(user.search, q=str(symbol), tweet_mode='extended', lang='en').items(num_of_tweets)\n\n tweet_list = []\n global_polarity = 0\n for tweet in tweets:\n tw = tweet.full_text\n blob = TextBlob(tw)\n polarity = 0\n for sentence in blob.sentences:\n polarity += sentence.sentiment.polarity\n global_polarity += sentence.sentiment.polarity\n tweet_list.append(Tweet(tw, polarity))\n\n global_polarity = global_polarity / len(tweet_list)\n return global_polarity\n\ndef recommending(df, forecast_out, global_polarity):\n if df.iloc[-forecast_out-1]['Close'] < df.iloc[-1]['Prediction']:\n if global_polarity > 0:\n print(\"According to the predictions and twitter sentiment analysis -> Investing in %s is a GREAT idea!\" % str(symbol))\n elif global_polarity < 0:\n print(\"According to the predictions and twitter sentiment analysis -> Investing in %s is a BAD idea!\" % str(symbol))\n else:\n print(\"According to the predictions and twitter sentiment analysis -> Investing in %s is a BAD idea!\" % str(symbol))\n\n# By default the program takes today's price and last one year stock price.\nif __name__ == \"__main__\":\n (flag, symbol) = check_stock_symbol(False, 'companylist.csv')\n if flag:\n actual_date = dt.date.today()\n past_date = actual_date - dt.timedelta(days=365 * 3)\n actual_date = actual_date.strftime(\"%Y-%m-%d\")\n past_date = past_date.strftime(\"%Y-%m-%d\")\n print(\"Retrieving Stock Data from introduced symbol...\")\n dataframe = get_stock_data(symbol, past_date, actual_date)\n print(\"Forecasting stock DataFrame...\")\n (dataframe, forecast_out) = stock_forecasting(dataframe)\n print(\"Plotting existing and forecasted values...\")\n forecast_plot(dataframe)\n print(\"Retrieving %s related tweets polarity...\" % symbol)\n polarity = retrieving_tweets_polarity(symbol)\n print(\"Generating recommendation based on prediction & polarity...\")\n recommending(dataframe, forecast_out, polarity)\n\n"
]
| [
[
"numpy.array",
"matplotlib.style.use",
"sklearn.linear_model.LinearRegression",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"sklearn.preprocessing.scale",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
]
|
DanielDls-exe/mid-project-euro2020 | [
"1eb120b9d19aaf4578cf9b57d0a151de2577e0fc"
]
| [
"streamlit/main.py"
]
| [
"from nbformat import write\r\nimport streamlit as st\r\nfrom data.get_data import get_all_name_teams, get_team, get_stadistic_team, get_all_name_players, get_player\r\nimport pages\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport pandas as pd\r\nimport plotly.express as px\r\nfrom pages.compare_teams import compare_teams\r\nfrom pages.compare_players import compare_players\r\nfrom pages.search_player import search_player\r\nfrom pages.search_team import search_team\r\n\r\nselect = st.sidebar.selectbox(\"Select page\", [\"Home\", \"Search Team for stadistic\" , \"Search Player for stadistic\", \"Compare Teams\", \"Compare Players\"])\r\n\r\ndf = pd.DataFrame()\r\nteam_fixed = {}\r\nplayer_fixed = {}\r\ndf_1 = pd.DataFrame()\r\n\r\nif select == \"Home\":\r\n st.title('Welcome to Euro 2020 Data Analysis')\r\n st.image('euro2020.jpg')\r\n name_teams = get_all_name_teams()\r\n team = st.selectbox('Select a team to view its statistics', name_teams)\r\n stadistic_team = get_team(team)\r\n team_fixed['Team'] = stadistic_team['team']\r\n team_fixed['Possession'] = stadistic_team['possession_total']\r\n team_fixed['Goals'] = stadistic_team['goals_favor']\r\n team_fixed['Goals received'] = stadistic_team['goals_received']\r\n team_fixed['Penalty Goals'] = stadistic_team['penaltys_total']\r\n team_fixed['Shots'] = stadistic_team['shots']\r\n df_team = df.append(team_fixed, ignore_index=True)\r\n df_team.index = ['Data']\r\n st.dataframe(df_team)\r\n name_players = get_all_name_players()\r\n player = st.selectbox('Select a player to view its statistics', name_players)\r\n stadistic_player = get_player(player)\r\n player_fixed['Player'] = stadistic_player['name']\r\n player_fixed['Goals'] = stadistic_player['goals']\r\n player_fixed['Assistance'] = stadistic_player['assistance']\r\n player_fixed['Yellow cards'] = stadistic_player['yellow_cards']\r\n player_fixed['Red cards'] = stadistic_player['red_cards']\r\n df_player = df_1.append(player_fixed, ignore_index=True)\r\n df_player.index = ['Data']\r\n st.dataframe(df_player)\r\n \r\nif select == \"Search Team for stadistic\":\r\n search_team()\r\n\r\nif select == \"Search Player for stadistic\":\r\n search_player()\r\n\r\nif select == \"Compare Teams\":\r\n compare_teams()\r\nif select == \"Compare Players\":\r\n compare_players()\r\n"
]
| [
[
"pandas.DataFrame"
]
]
|
williamFalcon/torchbearer | [
"1d30401468f147bfcecb611f37ab3139beaa3649"
]
| [
"tests/callbacks/test_cutout.py"
]
| [
"from unittest import TestCase\n\nimport torch\n\nimport torchbearer\nfrom torchbearer.callbacks.cutout import Cutout, RandomErase, CutMix\n\n\nclass TestCutOut(TestCase):\n def test_cutout(self):\n random_image = torch.rand(2, 3, 100, 100)\n co = Cutout(1, 10, seed=7)\n state = {torchbearer.X: random_image}\n co.on_sample(state)\n reg_img = state[torchbearer.X].view(-1)\n\n x = [21, 86]\n y = [15, 92]\n\n known_cut = random_image.clone().numpy()\n known_cut[0, :, y[0]-10//2:y[0]+10//2, x[0]-10//2:x[0]+10//2] = 0\n known_cut[1, :, y[1]-10//2:y[1]+10//2, x[1]-10//2:x[1]+10//2] = 0\n known_cut = torch.from_numpy(known_cut)\n known_cut = known_cut.view(-1)\n\n diff = (torch.abs(known_cut-reg_img) > 1e-4).any()\n self.assertTrue(diff.item() == 0)\n\n def test_cutout_constant(self):\n random_image = torch.rand(2, 3, 100, 100)\n co = Cutout(1, 10, constant=0.5, seed=7)\n state = {torchbearer.X: random_image}\n co.on_sample(state)\n reg_img = state[torchbearer.X].view(-1)\n\n x = [21, 86]\n y = [15, 92]\n\n known_cut = random_image.clone().numpy()\n known_cut[0, :, y[0]-10//2:y[0]+10//2, x[0]-10//2:x[0]+10//2] = 0.5\n known_cut[1, :, y[1]-10//2:y[1]+10//2, x[1]-10//2:x[1]+10//2] = 0.5\n known_cut = torch.from_numpy(known_cut)\n known_cut = known_cut.view(-1)\n\n diff = (torch.abs(known_cut-reg_img) > 1e-4).any()\n self.assertTrue(diff.item() == 0)\n\n # TODO: Find a better test for this\n def test_random_erase(self):\n random_image = torch.rand(2, 3, 100, 100)\n co = RandomErase(1, 10, seed=7)\n state = {torchbearer.X: random_image}\n co.on_sample(state)\n reg_img = state[torchbearer.X].view(-1)\n\n x = [21, 86]\n y = [15, 92]\n\n known_cut = random_image.clone().numpy()\n known_cut[0, :, y[0]-10//2:y[0]+10//2, x[0]-10//2:x[0]+10//2] = 0\n known_cut[1, :, y[1]-10//2:y[1]+10//2, x[1]-10//2:x[1]+10//2] = 0\n known_cut = torch.from_numpy(known_cut)\n\n known_cut = known_cut.view(-1)\n masked_pix = known_cut == 0\n\n diff = (torch.abs(known_cut[masked_pix]-reg_img[masked_pix]) > 1e-4).any()\n self.assertTrue(diff.item() > 0)\n\n def test_cutmix(self):\n random_image = torch.rand(5, 3, 100, 100)\n state = {torchbearer.X: random_image, torchbearer.Y_TRUE: torch.randint(10, (5,)).long(), torchbearer.DEVICE: 'cpu'}\n co = CutMix(0.25, classes=10, seed=7)\n co.on_sample(state)\n reg_img = state[torchbearer.X].view(-1)\n\n x = [72, 83, 18, 96, 40]\n y = [8, 17, 62, 30, 66]\n perm = [0, 4, 3, 2, 1]\n sz = 3\n\n rnd = random_image.clone().numpy()\n known_cut = random_image.clone().numpy()\n known_cut[0, :, y[0]-sz//2:y[0]+sz//2, x[0]-sz//2:x[0]+sz//2] = rnd[perm[0], :, y[0]-sz//2:y[0]+sz//2, x[0]-sz//2:x[0]+sz//2]\n known_cut[1, :, y[1]-sz//2:y[1]+sz//2, x[1]-sz//2:x[1]+sz//2] = rnd[perm[1], :, y[1]-sz//2:y[1]+sz//2, x[1]-sz//2:x[1]+sz//2]\n known_cut[2, :, y[2]-sz//2:y[2]+sz//2, x[2]-sz//2:x[2]+sz//2] = rnd[perm[2], :, y[2]-sz//2:y[2]+sz//2, x[2]-sz//2:x[2]+sz//2]\n known_cut[3, :, y[3]-sz//2:y[3]+sz//2, x[3]-sz//2:x[3]+sz//2] = rnd[perm[3], :, y[3]-sz//2:y[3]+sz//2, x[3]-sz//2:x[3]+sz//2]\n known_cut[4, :, y[4]-sz//2:y[4]+sz//2, x[4]-sz//2:x[4]+sz//2] = rnd[perm[4], :, y[4]-sz//2:y[4]+sz//2, x[4]-sz//2:x[4]+sz//2]\n known_cut = torch.from_numpy(known_cut)\n known_cut = known_cut.view(-1)\n\n diff = (torch.abs(known_cut-reg_img) > 1e-4).any()\n self.assertTrue(diff.item() == 0)\n\n def test_cutmix_targets(self):\n random_image = torch.rand(2, 3, 100, 100)\n co = CutMix(1.0, classes=4, seed=7)\n target = torch.tensor([\n [0., 1., 0., 0.],\n [0., 0., 0., 1.]\n ])\n state = {torchbearer.X: random_image, torchbearer.Y_TRUE: torch.tensor([1, 3]).long(), torchbearer.DEVICE: 'cpu'}\n co.on_sample(state)\n self.assertTrue(((state[torchbearer.TARGET] - target).abs() < 0.00001).all())\n state = {torchbearer.X: random_image, torchbearer.Y_TRUE: torch.tensor([1, 3]).long()}\n co.on_sample_validation(state)\n self.assertTrue(((state[torchbearer.TARGET] - target).abs() < 0.00001).all())\n state = {torchbearer.X: random_image, torchbearer.Y_TRUE: target.long()}\n co.on_sample_validation(state)\n self.assertTrue(((state[torchbearer.TARGET] - target).abs() < 0.00001).all())\n"
]
| [
[
"torch.rand",
"torch.from_numpy",
"torch.abs",
"torch.randint",
"torch.tensor"
]
]
|
vvviet2908/flowrec | [
"b5077421a187ee7952289c5a39abf65ec041a86d"
]
| [
"recommendation/efdt_wrapper.py"
]
| [
"from skmultiflow.core import BaseSKMObject, ClassifierMixin\r\nfrom skmultiflow.utils import get_dimensions\r\nfrom collections import deque\r\nfrom collections import defaultdict\r\nimport numpy as np\r\nfrom collections import Counter\r\nfrom skmultiflow.trees import HATT\r\nfrom utils.shared_data import SharedData as Data\r\n\r\n\r\nclass EFDTWrapper(BaseSKMObject, ClassifierMixin):\r\n \"\"\"Wrapper for the Hoeffding Tree learner of Scikit-Multiflow\r\n Notes\r\n ----------\r\n Provides recommendation interface for the HATT class\r\n Internally represented as a decision stump with Naive Bayes at the leaves\r\n References\r\n ----------\r\n \"FlowRec: Prototyping Session-based Recommender Systems in Streaming Mode\"\r\n Parameters\r\n ----------\r\n estimator: BaseSKMObject (default=HATT)\r\n Estimator to wrap.\r\n weight_mc: float (default=10)\r\n Weight of the sequence 'previous item -> current item' (1st order Markov Chain)\r\n weight_inv: float (default=0.3)\r\n Weight of inverse sequences 'current item -> other item'\r\n Used as a fixed penalty for inverse sequences\r\n max_session_size: int (default=20)\r\n Cutoff for the session size. Used to filter out very long sessions.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n estimator=HATT(leaf_prediction='nba'),\r\n weight_mc=10,\r\n weight_inv=0.3,\r\n max_session_size=20\r\n ):\r\n super().__init__()\r\n self.ht = estimator\r\n self.w_mc = weight_mc\r\n self.w_inv = weight_inv\r\n self.counter = Counter()\r\n self.max_session_size = max_session_size\r\n self._rec_tracker = defaultdict(list)\r\n\r\n def configure(self, **kwargs):\r\n self.ht.classes = list(range(len(Data.classes)))\r\n self.ht.set_params(nominal_attributes=[0])\r\n self.ht.partial_fit(np.array([[-1]]), np.array([0]))\r\n\r\n def partial_fit(self, X, y, classes=None, sample_weight=None):\r\n if y is not None:\r\n row_cnt, _ = get_dimensions(X)\r\n for i in range(row_cnt):\r\n y_idx = np.searchsorted(Data.classes, y)\r\n if Data.session_vector is not None:\r\n session_vector = Data.session_vector[-self.max_session_size:]\r\n for pos, y_o_idx in enumerate(session_vector):\r\n if y_o_idx == session_vector[-1]:\r\n w = self.w_mc\r\n else:\r\n w = 1 / (len(session_vector) - pos)\r\n self.ht.partial_fit(np.array([[y_o_idx]]),\r\n y_idx, sample_weight=[w])\r\n # inverse fit\r\n self.ht.partial_fit(np.array([y_idx]),\r\n np.array([y_o_idx]),\r\n sample_weight=[w * self.w_inv])\r\n\r\n def predict(self, X):\r\n predictions = deque()\r\n r, _ = get_dimensions(X)\r\n y_proba = np.zeros((r, len(Data.classes)))\r\n for i in range(r):\r\n session_vector = Data.session_vector[-self.max_session_size:]\r\n for pos, y_o_idx in enumerate(session_vector):\r\n weight = self.w_mc if y_o_idx == session_vector[-1] else 1\r\n y_proba_current = self.ht.predict_proba(np.array([[y_o_idx]]))\r\n y_proba_current *= weight / (len(session_vector) - pos)\r\n y_proba += y_proba_current\r\n y_proba[i][Data.session_vector[-1]] = 0.0\r\n nonzero = np.flatnonzero(y_proba[i])\r\n if len(nonzero > 0):\r\n sorted_desc = np.argsort(y_proba[i][nonzero])[::-1]\r\n sorted_ids = nonzero[sorted_desc]\r\n if not Data.allow_reminders:\r\n sorted_ids = sorted_ids[~np.isin(sorted_ids, Data.session_vector)]\r\n if not Data.allow_repeated:\r\n session = X[i, Data.sid]\r\n sorted_ids = sorted_ids[~np.isin(sorted_ids, self._rec_tracker[session])]\r\n self._rec_tracker[session].extend(sorted_ids[:Data.rec_size])\r\n y_pred = Data.classes[sorted_ids[:Data.rec_size]]\r\n else:\r\n y_pred = np.array([])\r\n predictions.append(y_pred)\r\n return np.array(predictions)\r\n\r\n def predict_proba(self, X):\r\n \"\"\"Not implemented for this method.\"\"\"\r\n raise NotImplementedError"
]
| [
[
"numpy.array",
"numpy.flatnonzero",
"numpy.argsort",
"numpy.searchsorted",
"numpy.isin"
]
]
|
bench-os/bench-os | [
"38ade08e097ca215f7465047dfa70503af11d612"
]
| [
"scripts/pslab_context_switching.py"
]
| [
"\"\"\"\nThis script retrieves the context switching time between two tasks.\n\"\"\"\nimport serial\nimport signal\nimport _thread\nimport time\nimport matplotlib.pyplot as plt\nimport sys\nfrom PSL import sciencelab\n\nBENCH_CONTEXT_SWITCHING_FLAG = '[BENCH_CONTEXT_SWITCHING]' \nSER = serial.Serial('/dev/ttyUSB0', 115200)\n\nI = sciencelab.connect()\nVALUES = []\nTASKS = {}\n\ndef show_results():\n global VALUES\n print()\n print(\"{} context switchs\".format(len(VALUES)))\n print(\"{} tasks\".format(len(TASKS)))\n for key in TASKS:\n print(\"task {} with {} runs of context switching times\".format(key, TASKS[key]))\n\n for _ in range(int(len(VALUES) * 0.05)):\n VALUES.remove(max(VALUES)) \n VALUES.remove(min(VALUES))\n print(\"Mean: {} usec\".format((sum(VALUES)/len(VALUES))*10**6))\n print(\"Min {} usec\".format(min(VALUES)*10**6))\n print(\"Max {} usec\".format(max(VALUES)*10**6))\n\n\n VALUES = sorted(VALUES)\n intervals = []\n dic = {}\n for v in VALUES:\n v = v*10**6\n intervals.append(v)\n dic[v] = dic.get(v, 0) + 1\n\n print(intervals)\n plt.hist(intervals, bins=3000)\n #plt.bar(dic.keys(), dic.values())\n #plt.axis([10,20,0,500])\n plt.show()\n\n sys.exit()\n\ndef read_uart(threadName):\n while True:\n line = str(SER.readline(), 'utf-8')\n if BENCH_CONTEXT_SWITCHING_FLAG in line:\n if \"Start\" in line:\n size = int(line.split()[2])\n for _ in range(size):\n task = str(SER.readline(), 'utf-8').rstrip()\n TASKS[task] = TASKS.get(task, 0) + 1\n else:\n pass\n #print(\"[UART] {}\".format(line), end=\"\")\n\n\nready = False\nwhile not ready:\n try:\n line = str(SER.readline(), 'utf-8')\n #print(\"[UART] {}\".format(line), end=\"\")\n if BENCH_CONTEXT_SWITCHING_FLAG in line:\n if \"Ready\" in line:\n SER.write(bytes(\"{} Ready\\n\".format(BENCH_CONTEXT_SWITCHING_FLAG), 'utf-8'))\n ready = True\n except:\n pass\n\nprint('Ready signal received')\n_thread.start_new_thread(read_uart, (\"READ_UART\",))\n#signal.signal(signal.SIGINT, hand_inter)\n\nwhile True:\n CS_TIME = I.MeasureInterval('ID1', 'ID1', 'falling', 'rising')\n\n if CS_TIME > 0:\n #print(CS_TIME*10**6)\n VALUES.append(CS_TIME)\n\n if len(VALUES) == 1000 * 1.1:\n show_results()\n\n"
]
| [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist"
]
]
|
ShivamShrirao/contrastive-unpaired-translation | [
"e81611a5bd8b7aee6aedab10aadf9e22a0804a63"
]
| [
"models/custom_unet.py"
]
| [
"import numpy as np\nimport functools\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom torch.nn.utils.parametrizations import spectral_norm\nfrom torchvision.models import vgg16_bn\nfrom torchvision.models.feature_extraction import create_feature_extractor\n\n\ndef icnr_init(x, scale=2, init=nn.init.kaiming_normal_):\n ni, nf, h, w = x.shape\n ni2 = int(ni / (scale**2))\n k = init(x.new_zeros([ni2, nf, h, w])).transpose(0, 1)\n k = k.contiguous().view(ni2, nf, -1)\n k = k.repeat(1, 1, scale**2)\n return k.contiguous().view([nf, ni, h, w]).transpose(0, 1)\n\n\nclass ConvNorm(nn.Module):\n def __init__(self, ni, nf, ks=3, stride=1, padding=None, groups=1, bias=None, bn=True, bn_zero=False,\n act_cls=nn.ReLU, norm_lyr=nn.BatchNorm2d, spectral=False, icnr=False):\n super().__init__()\n if padding is None:\n padding = 'same' if stride == 1 else int(np.ceil((ks - 1) / 2))\n if bias is None:\n bias = not bn\n while ni % groups:\n groups //= 2\n while nf % groups:\n groups //= 2\n self.conv = nn.Conv2d(ni, nf, ks, stride, padding, groups=groups, bias=bias)\n if icnr:\n self.conv.weight.data.copy_(icnr_init(self.conv.weight.data))\n self.conv.bias.data.zero_()\n if spectral:\n self.conv = spectral_norm(self.conv)\n if bn:\n self.bn = norm_lyr(nf)\n if bn_zero and norm_lyr is nn.BatchNorm2d:\n self.bn.weight.data.fill_(0.)\n else:\n self.bn = nn.Identity()\n if act_cls is None:\n self.act = nn.Identity()\n else:\n self.act = act_cls(inplace=True) if act_cls is nn.ReLU else act_cls()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x\n\n\nclass ResBlock(nn.Module):\n def __init__(self, ni, nf, ks=3, stride=1, groups=1, reduction=0, spectral=False,\n act_cls=nn.ReLU, self_attn=False, norm_lyr=nn.BatchNorm2d):\n super().__init__()\n self.conv1 = ConvNorm(ni, nf, ks, stride, groups=groups, act_cls=act_cls, spectral=spectral,\n norm_lyr=norm_lyr)\n self.conv2 = ConvNorm(nf, nf, ks, groups=1, act_cls=None, spectral=spectral, bn_zero=True,\n norm_lyr=norm_lyr)\n self.act = act_cls(inplace=True) if act_cls is nn.ReLU else act_cls()\n\n shortcut = []\n if ni != nf:\n shortcut.append(ConvNorm(ni, nf, 1, act_cls=nn.Identity, norm_lyr=norm_lyr))\n if stride > 1:\n shortcut.append(nn.MaxPool2d(stride))\n self.shortcut = nn.Sequential(*shortcut)\n\n if self_attn:\n self.atn = SelfAttention(nf)\n elif reduction:\n self.atn = SqueezeExcite(nf, reduction)\n else:\n self.atn = nn.Identity()\n\n def forward(self, x):\n inp = x\n x = self.conv2(self.conv1(x))\n x = self.atn(x)\n return self.act(x.add_(self.shortcut(inp)))\n\n\nclass UnetBlock(nn.Module):\n def __init__(self, ni, nf, skip_in, blur=True, act_cls=nn.ReLU, groups=1,\n self_attn=False, reduction=0, spectral=False, norm_lyr=nn.BatchNorm2d):\n super().__init__()\n self.pix_shuf = PixelShuffle_ICNR(ni, ni // 2, blur=blur, act_cls=act_cls,\n spectral=spectral, norm_lyr=norm_lyr)\n rin = ni // 2 + skip_in\n self.resb = ResBlock(rin, nf, groups=groups, reduction=reduction, spectral=spectral,\n act_cls=act_cls, self_attn=self_attn, norm_lyr=norm_lyr)\n\n def forward(self, x, skip=None):\n x = self.pix_shuf(x)\n # x = F.interpolate(x, skip.shape[-2:], mode='nearest')\n if skip is not None:\n x = torch.cat([x, skip], dim=1)\n return self.resb(x)\n\n\nclass PixelShuffle_ICNR(nn.Sequential):\n def __init__(self, ni, nf, scale=2, blur=True, act_cls=nn.ReLU, spectral=False, norm_lyr=nn.BatchNorm2d):\n super().__init__()\n layers = [ConvNorm(ni, nf * (scale**2), ks=1, bn=False, act_cls=act_cls, spectral=spectral,\n icnr=True, norm_lyr=norm_lyr),\n nn.PixelShuffle(scale)]\n if blur:\n layers += [nn.ReplicationPad2d((1, 0, 1, 0)), nn.AvgPool2d(2, stride=1)]\n super().__init__(*layers)\n\n\nclass SqueezeExcite(nn.Module):\n def __init__(self, ch, reduction, act_cls=nn.ReLU) -> None:\n super().__init__()\n nf = ch // reduction\n self.sq = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n ConvNorm(ch, nf, ks=1, bn=False, act_cls=act_cls),\n ConvNorm(nf, ch, ks=1, bn=False, act_cls=nn.Sigmoid)\n )\n\n def forward(self, x):\n return x * self.sq(x)\n\n\nclass SelfAttention(nn.Module):\n def __init__(self, n_channels):\n super().__init__()\n self.qkv_c = (n_channels // 8, n_channels // 8, n_channels)\n self.to_qkv = spectral_norm(nn.Conv2d(n_channels, sum(self.qkv_c), kernel_size=1, bias=False))\n self.gamma = nn.Parameter(torch.tensor([0.]))\n\n def forward(self, x): # [B, C, H, W]\n size = x.size()\n qkv = self.to_qkv(x)\n q, k, v = qkv.flatten(2).split(self.qkv_c, dim=1) # [B, (dq,dk,dv), H*W]\n attn = F.softmax(torch.bmm(q.transpose(1, 2), k), dim=1) # [B, lq, lk]\n o = torch.bmm(v, attn)\n del attn, q, k, v, qkv\n o = o.view(*size) # .contiguous()\n o = o.mul_(self.gamma) + x\n return o\n\n\nclass Unet(nn.Module):\n def __init__(self, in_c=3, out_c=3, ngf=32, num_scale=1, groups=32, reduction=16, spectral=True,\n self_attn=False, norm_lyr=nn.InstanceNorm2d):\n super().__init__()\n self.conv_in = ConvNorm(in_c, ngf, ks=3, norm_lyr=norm_lyr, act_cls=nn.ReLU)\n kwargs = dict(groups=groups, reduction=reduction, spectral=spectral, norm_lyr=norm_lyr)\n self.down = self.get_block(ngf, 64, num=1, **kwargs)\n self.down0 = self.get_block(64, 96, num=1, **kwargs)\n self.down1 = self.get_block(96, 128, num=1, self_attn=self_attn, **kwargs)\n self.down2 = self.get_block(128, 256, num=1, **kwargs)\n self.down3 = self.get_block(256, 512, num=1, **kwargs)\n\n self.middle_conv = nn.Sequential() # ConvNorm(512, 1024, spectral=spectral, norm_lyr=norm_lyr,\n # act_cls=nn.ReLU),\n # ConvNorm(1024, 512, spectral=spectral, norm_lyr=norm_lyr,\n # act_cls=nn.ReLU),\n # )\n\n self.up3 = UnetBlock(512, 256, 256, **kwargs)\n self.up2 = UnetBlock(256, 128, 128, **kwargs)\n self.up1 = UnetBlock(128, 96, 96, **kwargs)\n self.up0 = UnetBlock(96, 64, 64, **kwargs)\n self.up = UnetBlock(64, ngf, ngf, **kwargs)\n\n n_up = (ngf, 64, 96, 128, 256, 512)\n self.deep_convs = nn.ModuleList([nn.Conv2d(n_up[i], out_c, kernel_size=3 if i == 0 else 1,\n padding='same') for i in range(num_scale)])\n\n def forward(self, x, get_feat=False, encode_only=False): # 3, 768\n x = self.conv_in(x) # 32, 768\n d = self.down(x) # 64, 384\n d0 = self.down0(d) # 96, 192\n d1 = self.down1(d0) # 128, 96\n d2 = self.down2(d1) # 256, 48\n d3 = self.down3(d2) # 512, 24\n\n u3 = self.middle_conv(d3) # 512, 24\n\n u2 = self.up3(u3, d2) # 256, 48\n u1 = self.up2(u2, d1) # 128, 96\n u0 = self.up1(u1, d0) # 96, 192\n u = self.up0(u0, d) # 64, 384\n o = self.up(u, x) # 32, 768\n if get_feat:\n feats = x, d0, d1, d2, d3\n if encode_only:\n return feats\n else:\n return torch.tanh(self.deep_convs[0](o)), feats\n return torch.tanh(self.deep_convs[0](o))\n\n def get_block(self, ni, nf, num=2, self_attn=False, **kwargs):\n return nn.Sequential(*[ResBlock(ni if i == 0 else nf, nf, stride=2 if i == 0 else 1,\n self_attn=self_attn if i == 0 else False, **kwargs)\n for i in range(num)])\n\n\nclass PatchSampleF(nn.Module):\n def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]):\n # potential issues: currently, we use the same patch_ids for multiple images in the batch\n super(PatchSampleF, self).__init__()\n self.l2norm = Normalize(2)\n self.nc = nc # hard-coded\n self.mlp_init = False\n self.init_type = init_type\n self.init_gain = init_gain\n self.gpu_ids = gpu_ids\n self.use_mlp = use_mlp\n\n def create_mlp(self, feats):\n for mlp_id, feat in enumerate(feats):\n input_nc = feat.shape[1]\n mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])\n setattr(self, 'mlp_%d' % mlp_id, mlp)\n self.mlp_init = True\n\n def forward(self, feats, num_patches=256, patch_ids=None):\n return_ids = []\n return_feats = []\n if self.use_mlp and not self.mlp_init:\n self.create_mlp(feats)\n for feat_id, feat in enumerate(feats):\n B, H, W = feat.shape[0], feat.shape[2], feat.shape[3]\n feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2)\n if num_patches > 0:\n if patch_ids is not None:\n patch_id = patch_ids[feat_id]\n else:\n # torch.randperm produces cudaErrorIllegalAddress for newer versions of PyTorch. https://github.com/taesungp/contrastive-unpaired-translation/issues/83\n patch_id = torch.randperm(feat_reshape.shape[1], dtype=torch.long, device=feat.device)\n # patch_id = np.random.permutation(feat_reshape.shape[1])\n patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)\n # patch_id = torch.tensor(patch_id, dtype=torch.long, device=feat.device)\n x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])\n else:\n x_sample = feat_reshape\n patch_id = []\n if self.use_mlp:\n mlp = getattr(self, 'mlp_%d' % feat_id)\n x_sample = mlp(x_sample)\n if isinstance(patch_id, np.ndarray):\n patch_id = torch.tensor(patch_id, device=feats[0].device)\n return_ids.append(patch_id)\n x_sample = self.l2norm(x_sample)\n\n if num_patches == 0:\n x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])\n return_feats.append(x_sample)\n return return_feats, return_ids\n\n\nclass PatchNCELoss(nn.Module):\n def __init__(self, opt):\n super().__init__()\n self.opt = opt\n self.cross_entropy_loss = torch.nn.CrossEntropyLoss()\n self.mask_dtype = torch.bool\n\n def forward(self, feat_q, feat_k):\n num_patches = feat_q.shape[0]\n dim = feat_q.shape[1]\n feat_k = feat_k.detach()\n\n # pos logit\n l_pos = torch.bmm(\n feat_q.view(num_patches, 1, -1), feat_k.view(num_patches, -1, 1))\n l_pos = l_pos.view(num_patches, 1)\n\n # neg logit\n\n # Should the negatives from the other samples of a minibatch be utilized?\n # In CUT and FastCUT, we found that it's best to only include negatives\n # from the same image. Therefore, we set\n # --nce_includes_all_negatives_from_minibatch as False\n # However, for single-image translation, the minibatch consists of\n # crops from the \"same\" high-resolution image.\n # Therefore, we will include the negatives from the entire minibatch.\n if self.opt.nce_includes_all_negatives_from_minibatch:\n # reshape features as if they are all negatives of minibatch of size 1.\n batch_dim_for_bmm = 1\n else:\n batch_dim_for_bmm = self.opt.batch_size\n\n # reshape features to batch size\n feat_q = feat_q.view(batch_dim_for_bmm, -1, dim)\n feat_k = feat_k.view(batch_dim_for_bmm, -1, dim)\n npatches = feat_q.size(1)\n l_neg_curbatch = torch.bmm(feat_q, feat_k.transpose(2, 1))\n\n # diagonal entries are similarity between same features, and hence meaningless.\n # just fill the diagonal with very small number, which is exp(-10) and almost zero\n diagonal = torch.eye(npatches, device=feat_q.device, dtype=self.mask_dtype)[None, :, :]\n l_neg_curbatch.masked_fill_(diagonal, -10.0)\n l_neg = l_neg_curbatch.view(-1, npatches)\n\n out = torch.cat((l_pos, l_neg), dim=1) / self.opt.nce_T\n loss = self.cross_entropy_loss(out, torch.zeros(out.size(0), dtype=torch.long, device=feat_q.device))\n return loss\n\n\nclass NLayerDiscriminator(nn.Module):\n \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):\n \"\"\"Construct a PatchGAN discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n n_layers (int) -- the number of conv layers in the discriminator\n norm_layer -- normalization layer\n \"\"\"\n super(NLayerDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n if(no_antialias):\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n else:\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n if(no_antialias):\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n else:\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n Downsample(ndf * nf_mult)]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n self.model = nn.Sequential(*sequence)\n\n def forward(self, input):\n \"\"\"Standard forward.\"\"\"\n return self.model(input)\n\n\nclass Downsample(nn.Module):\n def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):\n super(Downsample, self).__init__()\n self.filt_size = filt_size\n self.pad_off = pad_off\n self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]\n self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]\n self.stride = stride\n self.off = int((self.stride - 1) / 2.)\n self.channels = channels\n\n filt = get_filter(filt_size=self.filt_size)\n self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))\n\n self.pad = get_pad_layer(pad_type)(self.pad_sizes)\n\n def forward(self, inp):\n if(self.filt_size == 1):\n if(self.pad_off == 0):\n return inp[:, :, ::self.stride, ::self.stride]\n else:\n return self.pad(inp)[:, :, ::self.stride, ::self.stride]\n else:\n return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])\n\n\ndef get_filter(filt_size=3):\n if(filt_size == 1):\n a = np.array([1., ])\n elif(filt_size == 2):\n a = np.array([1., 1.])\n elif(filt_size == 3):\n a = np.array([1., 2., 1.])\n elif(filt_size == 4):\n a = np.array([1., 3., 3., 1.])\n elif(filt_size == 5):\n a = np.array([1., 4., 6., 4., 1.])\n elif(filt_size == 6):\n a = np.array([1., 5., 10., 10., 5., 1.])\n elif(filt_size == 7):\n a = np.array([1., 6., 15., 20., 15., 6., 1.])\n\n filt = torch.Tensor(a[:, None] * a[None, :])\n filt = filt / torch.sum(filt)\n return filt\n\ndef get_pad_layer(pad_type):\n if(pad_type in ['refl', 'reflect']):\n PadLayer = nn.ReflectionPad2d\n elif(pad_type in ['repl', 'replicate']):\n PadLayer = nn.ReplicationPad2d\n elif(pad_type == 'zero'):\n PadLayer = nn.ZeroPad2d\n else:\n print('Pad type [%s] not recognized' % pad_type)\n return PadLayer\n\n\nclass GaussianNoise(nn.Module):\n def __init__(self, std=0.1, decay_rate=0):\n super().__init__()\n self.std = std\n self.decay_rate = decay_rate\n\n def decay_step(self):\n self.std = max(self.std - self.decay_rate, 0)\n\n def forward(self, x):\n if self.training and self.std != 0.:\n return x + torch.empty_like(x).normal_(std=self.std)\n else:\n return x\n\n\nclass GANLoss(nn.Module):\n def __init__(self, use_lsgan=True):\n super().__init__()\n if use_lsgan:\n self.loss = nn.MSELoss()\n else:\n self.loss = nn.BCEWithLogitsLoss()\n\n def __call__(self, inp, target_is_real):\n target_tensor = torch.empty_like(inp).fill_(target_is_real)\n return self.loss(inp, target_tensor)\n\n\ndef gram_matrix(x):\n n, c, h, w = x.size()\n x = x.view(n, c, -1)\n return (x @ x.transpose(1, 2)) / (c * h * w)\n\n\nclass VGGLoss(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n layer_ids = [22, 32, 42]\n self.weights = [5, 15, 2]\n m = vgg16_bn(pretrained=True).features.eval()\n return_nodes = {f'{x}': f'feat{i}' for i, x in enumerate(layer_ids)}\n self.vgg_fx = create_feature_extractor(m, return_nodes=return_nodes)\n self.vgg_fx.requires_grad_(False)\n self.l1_loss = nn.L1Loss()\n\n def forward(self, x, y):\n x_vgg = self.vgg_fx(x)\n with torch.inference_mode():\n y_vgg = self.vgg_fx(y)\n loss = self.l1_loss(x, y)\n for i, k in enumerate(x_vgg.keys()):\n loss += self.weights[i] * self.l1_loss(x_vgg[k], y_vgg[k].detach_()) # feature loss\n loss += self.weights[i]**2 * 5e3 * self.l1_loss(gram_matrix(x_vgg[k]), gram_matrix(y_vgg[k])) # style loss\n return loss\n\n\nclass Normalize(nn.Module):\n def __init__(self, power=2):\n super(Normalize, self).__init__()\n self.power = power\n\n def forward(self, x):\n norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)\n out = x.div(norm + 1e-7)\n return out\n\n\ndef get_norm_layer(norm_type='instance'):\n \"\"\"Return a normalization layer\n Parameters:\n norm_type (str) -- the name of the normalization layer: batch | instance | none\n\n For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).\n For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.\n \"\"\"\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n elif norm_type == 'none':\n def norm_layer(x):\n return nn.Identity()\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n\n\ndef init_weights(net, init_type='normal', init_gain=0.02, debug=False):\n \"\"\"Initialize network weights.\n\n Parameters:\n net (network) -- network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n\n We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might\n work better for some applications. Feel free to try yourself.\n \"\"\"\n def init_func(m): # define the initialization function\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if debug:\n print(classname)\n if init_type == 'normal':\n nn.init.normal_(m.weight.data, 0.0, init_gain)\n elif init_type == 'xavier':\n nn.init.xavier_normal_(m.weight.data, gain=init_gain)\n elif init_type == 'kaiming':\n nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n nn.init.orthogonal_(m.weight.data, gain=init_gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.constant_(m.bias.data, 0.0)\n elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.\n nn.init.normal_(m.weight.data, 1.0, init_gain)\n nn.init.constant_(m.bias.data, 0.0)\n\n net.apply(init_func)"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.LeakyReLU",
"torch.nn.init.kaiming_normal_",
"torch.randperm",
"torch.bmm",
"torch.eye",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.CrossEntropyLoss",
"torch.sum",
"torch.nn.MaxPool2d",
"torch.nn.init.constant_",
"torch.nn.AvgPool2d",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.init.orthogonal_",
"torch.nn.init.xavier_normal_",
"torch.Tensor",
"numpy.array",
"torch.nn.Identity",
"torch.nn.ReplicationPad2d",
"torch.nn.Sequential",
"torch.nn.PixelShuffle",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.inference_mode",
"numpy.ceil",
"torch.nn.MSELoss",
"torch.nn.L1Loss",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.utils.parametrizations.spectral_norm",
"torch.empty_like"
]
]
|
zimfv/rat | [
"d1851d8ae5f69c0e12c852ebb191529813c48f13"
]
| [
"rat/ratmath.py"
]
| [
"import numpy as np\nimport pandas as pd\nimport cvxpy as cp\nfrom scipy.sparse import dok_matrix\n\ndef get_lines(table, name_cols=[], val_cols=None, sort=True):\n \"\"\"\n Returns lines of table. Line is vectored raw.\n \n Parameters:\n -----------\n table : DataFrame\n Table\n \n name_cols : array_like\n List of name-containing columns names \n \n val_cols : array_like\n List of value-containing columns names\n \n sort : bool\n Should the table be sorted by name_cols?\n \n Returns\n -------\n line : ndarray\n Matrix of size (len(table), len(val_cols))\n \n \"\"\"\n if val_cols is None:\n val_cols = np.setdiff1d(table.columns, name_cols)\n if sort:\n table = table.sort_values(by=name_cols)\n line = table[val_cols].values\n return line\n\n\ndef get_system(line_a, line_b):\n \"\"\"\n Returns matrix and vector of system with n+m condition-equations with rank n+m-1 if line_a.sum() == line_b.sum()\n \n Parameters:\n -----------\n line_a : ndarray\n vector\n \n line_b : ndarray\n vector\n \n Returns:\n --------\n matrix : dok_matrix\n Sprase matrix of size (n+m, n*m)\n \n r : ndarray:\n Vector of size len(line_a) + len(line_b)\n \n \"\"\"\n n = len(line_a)\n m = len(line_b)\n line_a, line_b = line_a.astype(float), line_b.astype(float)\n if (line_a.astype(int) == line_a).all() and (line_b.astype(int) == line_b).all():\n line_a, line_b = line_a.astype(int), line_b.astype(int)\n \n r = np.concatenate([line_a, line_b])\n '''\n matrix_a = np.concatenate(\n [\n np.concatenate([np.zeros(i*m), np.ones(m), np.zeros(m*(n-i-1))]).reshape([1, n*m]) for i in range(n)\n ]\n )\n matrix_b = np.concatenate([np.eye(m) for i in range(n)], axis=1)\n matrix = np.concatenate([matrix_a, matrix_b]).astype(int)\n '''\n matrix = dok_matrix((n+m, n*m), dtype=int)\n for i in range(n):\n for j in range(m):\n matrix[i, m*i+j] = 1\n matrix[n+j, m*i+j] = 1\n return matrix, r\n\n\ndef get_line_maximums(line_a, line_b):\n '''\n Returns vector of maximal possible values for nonnegative condition\n \n Parameters:\n -----------\n line_a : ndarray\n vector\n \n line_b : ndarray\n vector\n \n \n Returns:\n --------\n r : ndarray:\n Vector of maximal possible values\n '''\n r = np.array([np.concatenate([line_a for i in line_b]), np.concatenate([line_b for i in line_a])])\n r = np.min(r, axis=0)\n return r\n\n\ndef get_problem(line_a, line_b, integer=False, nonneg=True, obj_type='dependences'):\n \"\"\"\n Returns cvxpy-problem and variable.\n \n Parameters:\n -----------\n line_a : ndarray\n vector\n \n line_b : ndarray\n vector\n \n integer : bool\n Should the solve be integer?\n Use correct solver in restore_line. Some of them must be installed separately.\n https://www.cvxpy.org/tutorial/advanced/index.html\n \n nonneg : bool\n Should the solve be nonnegative?\n \n obj_type : str or function\n Type of minimizing object.\n If that is function, then will be minimize value obj_type(x) by x,\n Else:\n 'squares' : minimize sum of squares : (x_ij)^2\n 'dependeces' : minimize sum of dependence values : (s*x_ij - a_i*b_j)^2\n \n Returns:\n --------\n prob : Problem\n cvxpy.problems.problem.Problem\n \n x : Variable\n cvxpy.expressions.variable.Variable\n \n \"\"\"\n A, r = map(lambda i: i[:-1], get_system(line_a, line_b))\n if nonneg and integer:\n x = cp.Variable(len(line_a)*len(line_b), integer=integer)\n constraints = [A @ x == r, -x <= 0, x <= get_line_maximums(line_a, line_b)]\n else:\n x = cp.Variable(len(line_a)*len(line_b), integer=integer, nonneg=nonneg)\n constraints = [A @ x == r]\n \n if obj_type == 'squares':\n objective = cp.Minimize(cp.sum_squares(x))\n elif obj_type == 'dependences':\n s = line_a.sum()\n long_a = np.repeat(line_a, len(line_b))\n long_b = np.concatenate([line_b for i in line_a])\n objective = cp.Minimize(cp.sum_squares(s*x - long_a*long_b))\n else:\n objective = cp.Minimize(obj_type(x))\n \n prob = cp.Problem(objective, constraints)\n return prob, x\n\n\ndef restore_line(line_a, line_b, integer=False, nonneg=True, obj_type='dependences', solver='SCS', \n correct=True, print_status=False, throw_sums_error=True):\n \"\"\"\n Returns line vector restored from two lines (optimized by minimizing squares sum)\n \n Parameters:\n -----------\n line_a : ndarray\n Vector\n \n line_b : ndarray\n Vector\n \n integer : bool\n Should the solve be integer?\n Use correct solver. Some of them must be installed separately.\n https://www.cvxpy.org/tutorial/advanced/index.html\n \n nonneg : bool\n Should the solve be nonnegative?\n \n obj_type : str or function\n Type of minimizing object.\n If that is function, then will be minimize value obj_type(x) by x,\n Else:\n 'squares' : minimize sum of squares : (x_ij)^2\n 'dependeces' : minimize sum of dependence values : (s*x_ij - a_i*b_j)^2\n \n solver : string\n Solver keyword argument.\n \n correct: bool\n Should mistake be corrected?\n \n print_status : bool\n Should status be printed, when the problem be solved?\n \n throw_sums_error: bool\n Should the program be broken, where line_a and line_b has no equivalunt sums?\n \n \n Returns:\n --------\n line_res : ndarray\n Vector of size len(line_a)*len(line_b)\n \n \"\"\"\n if (line_a.sum() != line_b.sum()) and throw_sums_error:\n print((line_a.sum() != line_b.sum()), throw_sums_error, (line_a.sum() != line_b.sum()) and throw_sums_error)\n raise ValueError('Different sums.')\n prob, x = get_problem(line_a, line_b, integer=integer, nonneg=nonneg, obj_type=obj_type)\n prob.solve(solver=solver)\n if print_status:\n print(\"Status: \", prob.status)\n print(\"The optimal value is\", prob.value)\n print(\"A solution x is\")\n print(x.value, '\\n')\n line_res = x.value.copy()\n if correct and (line_res.sum() != line_a.sum()):\n line_res[np.argmax(abs(line_res))] += line_a.sum() - line_res.sum()\n return line_res"
]
| [
[
"numpy.concatenate",
"numpy.setdiff1d",
"scipy.sparse.dok_matrix",
"numpy.min"
]
]
|
emenriquez/Springboard-Coursework | [
"7ac89a5b8bf7855bcd5cefaa02367134cb81ce8a"
]
| [
"Capstone Project 2/Flask App/engines1.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 23 16:37:55 2018\n\n@author: Erik\n\"\"\"\n\n# import packages to read and work with data\nimport pandas as pd\nimport numpy as np\nfrom collections import defaultdict\n\n# Packages for working with text data\nfrom nltk import tokenize\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n# Tools for Recommendations\nfrom sklearn.neighbors import NearestNeighbors\n\n# Load the cleaned dataset from the data folder\ndata = pd.read_pickle('data_clean.pkl')\n\n# drop the words column of data\ndata.drop('words', axis=1, inplace=True)\n\n# Convert the recipe ingredient lists into strings\ningredient_strings = [', '.join(recipe) for recipe in data.ingredients]\ndata.ingredients = ingredient_strings\n\n# Custom tokenizer to separate list into tokens by commas\ntokenized = tokenize.regexp.RegexpTokenizer(pattern=\", \", gaps=True)\n\n# Create TF-IDF weighting dictionary for each cuisine, exclude terms that appear in every cuisine\ntfidf = TfidfVectorizer(tokenizer=tokenized.tokenize, max_df=0.12, binary=True, use_idf=False, norm=None)\n\n# Fit and transform cuisine ingredient lists to generate sparse matrix\ningredients_weighted = tfidf.fit_transform(data.ingredients)\n\ndef preprocess_user_input(user_input):\n return None\n\ndef similar_cuisine_recommendations(user_recipe_id, n_recommendations=3):\n \"\"\"\n This function takes in the id of a recipe for a user, and generates similar recipe recommendations.\n \"\"\"\n output_string = []\n try:\n user_recipe_id = int(user_recipe_id)\n except:\n output_string.append('Oh no! This recipe is not in our database!')\n output_string.append('Here are a few recipe IDs you can try:')\n output_string.append('4758, 23260, 37648, 11935, 4407')\n return output_string\n # Define a map between ingredients_weighted and data ID's\n assert ingredients_weighted.shape[0] == data.shape[0]\n \n ingredients_weighted_indices_dict = defaultdict(int)\n for i in range(data.shape[0]):\n ingredients_weighted_indices_dict[data.index[i]] = i\n\n if user_recipe_id not in data.index:\n output_string.append('Oh no! This recipe is not in our database!')\n output_string.append('Here are a few recipe IDs you can try:')\n output_string.append('4758, 23260, 37648, 11935, 4407')\n else: \n # Find nearest neigbors among all recipes\n nbrs = NearestNeighbors(n_recommendations+1).fit(ingredients_weighted)\n indices = nbrs.kneighbors(ingredients_weighted[ingredients_weighted_indices_dict[user_recipe_id]], return_distance=False)\n output_string.append('Your recipe:')\n output_string.append('RECIPE ID: {0}'.format(user_recipe_id))\n output_string.append('CUISINE: {0}'.format(data.loc[user_recipe_id].cuisine))\n output_string.append('INGREDIENTS: {0}'.format(data.loc[user_recipe_id].ingredients))\n output_string.append('********************')\n \n output_string.append('\\n\\nSimilar recipes you might be interested in:')\n for recipe in indices[0][1:]:\n output_string.append('RECIPE ID: {0}'.format(list(data.index)[recipe]))\n output_string.append('CUISINE: {0}'.format(data.iloc[recipe].cuisine))\n output_string.append('INGREDIENTS: {0}'.format(data.iloc[recipe].ingredients))\n output_string.append('========================')\n \n return output_string\n\n# Find nearest neighbors from cuisines distinct from input\ndef unique_cuisine_recommendations(user_recipe_id, n_recommendations=3):\n \"\"\"\n This function takes in the id of a recipe for a user, and generates similar recipe recommendations from other cuisines.\n \"\"\"\n output_string = []\n try:\n user_recipe_id = int(user_recipe_id)\n except:\n output_string.append('Oh no! This recipe is not in our database!')\n output_string.append('Here are a few recipe IDs you can try:')\n output_string.append('4758, 23260, 37648, 11935, 4407')\n return output_string\n \n # Define a map between ingredients_weighted and data ID's\n assert ingredients_weighted.shape[0] == data.shape[0]\n \n ingredients_weighted_indices_dict = defaultdict(int)\n for i in range(data.shape[0]):\n ingredients_weighted_indices_dict[data.index[i]] = i\n\n \n if user_recipe_id not in data.index:\n output_string.append('Oh no! This recipe is not in our database!')\n output_string.append('Here are a few recipe IDs you can try:')\n output_string.append('4758, 23260, 37648, 11935, 4407')\n else: \n # Display the record for the user's recipe\n output_string.append('Your recipe:')\n output_string.append('RECIPE ID: {0}'.format(user_recipe_id))\n output_string.append('CUISINE: {0}'.format(data.loc[user_recipe_id].cuisine))\n output_string.append('INGREDIENTS: {0}'.format(data.loc[user_recipe_id].ingredients))\n output_string.append('*********************')\n \n # Create a list of all cuisines\n cuisines_observed = [data.cuisine[user_recipe_id]]\n recommendations = []\n \n for recommendation in range(n_recommendations):\n data_subset = [data.index[i] for i in range(data.shape[0]) if data.loc[data.index[i]].cuisine not in cuisines_observed]\n ingredients_indices = [ingredients_weighted_indices_dict[value] for value in data_subset]\n ingredients_subset = ingredients_weighted[ingredients_indices]\n \n # Find nearest neigbors among cuisine subsets\n nbrs = NearestNeighbors(n_neighbors=2).fit(ingredients_subset)\n index = nbrs.kneighbors(ingredients_weighted[ingredients_weighted_indices_dict[user_recipe_id]],\n return_distance=False)\n \n # Append the result to the list for output and add the cuisine to cuisines_observed\n recommendations.append(index[0][0])\n cuisines_observed.append(data.iloc[index[0][0]].cuisine)\n \n # Display the results!\n output_string.append('\\n\\nTry some of these new recipes you might enjoy!')\n for recommendation in recommendations:\n output_string.append('RECIPE ID: {0}'.format(list(data.index)[recommendation]))\n output_string.append('CUISINE: {0}'.format(data.iloc[recommendation].cuisine))\n output_string.append('INGREDIENTS: {0}'.format(data.iloc[recommendation].ingredients))\n output_string.append('========================')\n \n return output_string\n\ndef ingredient_list_recommendations(user_ingredients_string, n_recommendations=3):\n \"\"\"\n This function takes in the list of ingredients from a user, and generates similar recipe recommendations.\n \"\"\"\n \n output_string = []\n # Transform the recipe ingredients vector to TF-IDF format\n user_tfidf = tfidf.transform([user_ingredients_string])\n user_ingredient_list = user_ingredients_string.split(', ')\n \n # Find nearest neigbors among all recipes\n nbrs = NearestNeighbors(n_recommendations).fit(ingredients_weighted)\n indices = nbrs.kneighbors(user_tfidf, return_distance=False)\n\n output_string.append('\\n\\nYou can try these recipes!')\n \n # loop through nearest neighbors = n_recommendations\n for neighbor in range(n_recommendations):\n \n # List ingredients (if needed) to make neighbor recipe\n neighbor_ingredients = (data.iloc[indices[0][neighbor]].ingredients).split(', ')\n missing_ingredients = [ingredient for ingredient in neighbor_ingredients if ingredient not in user_ingredient_list]\n \n # Print a border between recipes\n output_string.append('\\n------------------------------\\n')\n \n # Display recipes that user can make\n if len(missing_ingredients) == 0:\n output_string.append('\\nYou have all of the ingredients needed to make this recipe!')\n output_string.append('RECIPE ID: {0}'.format(list(data.index)[indices[0][neighbor]]))\n output_string.append('CUISINE: {0}'.format(data.iloc[indices[0][neighbor]].cuisine))\n output_string.append('INGREDIENTS: {0}'.format(data.iloc[indices[0][neighbor]].ingredients))\n \n else:\n # Display recipes that can be made\n output_string.append('\\nIf you buy: {0}'.format(missing_ingredients))\n output_string.append('\\nYou can make:')\n output_string.append('RECIPE ID: {0}'.format(list(data.index)[indices[0][neighbor]]))\n output_string.append('CUISINE: {0}'.format(data.iloc[indices[0][neighbor]].cuisine))\n output_string.append('INGREDIENTS: {0}'.format(data.iloc[indices[0][neighbor]].ingredients))\n \n output_string.append('\\n------------------------------\\n')\n \n return(output_string)\n\n# Convert to dense format for simpler handling\ningredients_dense = ingredients_weighted.todense()\n\n# create a matrix of zeros to fill co-occurence values into\ningredient_pairs = np.zeros((ingredients_dense.shape[1], ingredients_dense.shape[1]))\n\n# loop through ingredients and calculate fraction of recipes in which they occur together\nfor i in range(ingredients_dense.shape[1]):\n ingredient_indices = np.where(ingredients_dense[:,i] == 1)[0]\n ingredient_pairs[i,:] = ingredients_dense[ingredient_indices, :].sum(axis=0)/len(ingredient_indices)\n ingredient_pairs[i,i] = 0\n\ndef complimentary_ingredients(user_ingredient_list_string, n_recommendations=3):\n \"\"\"\n This function takes in a list of ingredients from a user and suggests ingredients that are \n most likely to compliment the set.\n \"\"\"\n output_string = []\n # Convert string input to list\n user_ingredient_list = user_ingredient_list_string.split(', ')\n \n # Create a sublists of users ingredients that are in the ingredient pairings matrix\n ingredient_sublist = [ingredient for ingredient in user_ingredient_list if ingredient in tfidf.vocabulary_.keys()]\n output_string.append('Searching for compliments to the following recognized ingredients:')\n if len(ingredient_sublist) == 0:\n output_string.append('Nothing!')\n else:\n output_string.append(ingredient_sublist)\n output_string.append('**************************')\n \n if len(ingredient_sublist) == 0:\n recommended_ingredients = ['No ingredients recognized for matching! Please try again!',\n 'You may want to try some of the following: cumin, strawberry, beef, sake, celery']\n else:\n # Extract indices of the rows in the matrix for the given ingredients\n ingredient_indices = [sorted(tfidf.vocabulary_).index(ingredient) for ingredient in ingredient_sublist]\n \n # Sum up the co-occurences of all ingredients and return the ingredients with the highest combined co-occurence scores\n top_pairings = np.argsort(ingredient_pairs[ingredient_indices,:].sum(axis=0))[::-1]\n top_complimentary_ingredients = [ingredient for ingredient in top_pairings if ingredient not in ingredient_sublist]\n \n # Convert the indices back into ingredient names for display\n recommended_ingredients = [sorted(tfidf.vocabulary_)[i] for i in top_complimentary_ingredients[:n_recommendations]]\n output_string.append('These might pair well with your ingredients!')\n \n # Display the recommendations to the user\n return(output_string, recommended_ingredients)"
]
| [
[
"pandas.read_pickle",
"numpy.zeros",
"numpy.where",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.neighbors.NearestNeighbors"
]
]
|
hfzhang31/I-BAU_Adversarial_Unlearning_of-Backdoors_via_implicit_Hypergradient | [
"19195589caf6649a1c874aa0d3b6c708f9c3772f"
]
| [
"Warping-based_Backdoor_Attack-release/defenses/neural_cleanse/neural_cleanse.py"
]
| [
"from detecting import *\nfrom config import get_argument\nimport numpy as np\nimport sys\nimport json\n\nsys.path.insert(0, \"../..\")\n\n\ndef create_dir(path_dir):\n list_subdir = path_dir.strip(\".\").split(\"/\")\n list_subdir.remove(\"\")\n base_dir = \"./\"\n for subdir in list_subdir:\n base_dir = os.path.join(base_dir, subdir)\n try:\n os.mkdir(base_dir)\n except:\n pass\n\n\ndef outlier_detection(l1_norm_list, idx_mapping, opt):\n print(\"-\" * 30)\n print(\"Determining whether model is backdoor\")\n consistency_constant = 1.4826\n median = torch.median(l1_norm_list)\n mad = consistency_constant * torch.median(torch.abs(l1_norm_list - median))\n min_mad = torch.abs(torch.min(l1_norm_list) - median) / mad\n\n print(\"Median: {}, MAD: {}\".format(median, mad))\n print(\"Anomaly index: {}\".format(min_mad))\n\n if min_mad < 2:\n print(\"Not a backdoor model\")\n else:\n print(\"This is a backdoor model\")\n\n if opt.to_file:\n # result_path = os.path.join(opt.result, opt.saving_prefix, opt.dataset)\n output_path = os.path.join(\n result_path, \"{}_{}_output.txt\".format(opt.attack_mode, opt.dataset, opt.attack_mode)\n )\n with open(output_path, \"a+\") as f:\n f.write(\n str(median.cpu().numpy()) + \", \" + str(mad.cpu().numpy()) + \", \" + str(min_mad.cpu().numpy()) + \"\\n\"\n )\n l1_norm_list_to_save = [str(value) for value in l1_norm_list.cpu().numpy()]\n f.write(\", \".join(l1_norm_list_to_save) + \"\\n\")\n\n flag_list = []\n for y_label in idx_mapping:\n if l1_norm_list[idx_mapping[y_label]] > median:\n continue\n if torch.abs(l1_norm_list[idx_mapping[y_label]] - median) / mad > 2:\n flag_list.append((y_label, l1_norm_list[idx_mapping[y_label]]))\n\n if len(flag_list) > 0:\n flag_list = sorted(flag_list, key=lambda x: x[1])\n\n print(\n \"Flagged label list: {}\".format(\",\".join([\"{}: {}\".format(y_label, l_norm) for y_label, l_norm in flag_list]))\n )\n\n\ndef main():\n\n opt = config.get_argument().parse_args()\n\n if opt.dataset == \"mnist\" or opt.dataset == \"cifar10\":\n opt.total_label = 10\n elif opt.dataset == \"gtsrb\":\n opt.total_label = 43\n elif opt.dataset == \"celeba\":\n opt.total_label = 8\n else:\n raise Exception(\"Invalid Dataset\")\n\n if opt.dataset == \"cifar10\":\n opt.input_height = 32\n opt.input_width = 32\n opt.input_channel = 3\n elif opt.dataset == \"gtsrb\":\n opt.input_height = 32\n opt.input_width = 32\n opt.input_channel = 3\n elif opt.dataset == \"mnist\":\n opt.input_height = 28\n opt.input_width = 28\n opt.input_channel = 1\n elif opt.dataset == \"celeba\":\n opt.input_height = 64\n opt.input_width = 64\n opt.input_channel = 3\n\n else:\n raise Exception(\"Invalid Dataset\")\n\n result_path = os.path.join(opt.result, opt.dataset, opt.attack_mode)\n create_dir(result_path)\n opt.output_path = os.path.join(result_path, \"{}_{}_output_clean.txt\".format(opt.attack_mode, opt.dataset))\n if opt.to_file:\n with open(opt.output_path, \"w+\") as f:\n f.write(\"Output for cleanse: - {}\".format(opt.attack_mode, opt.dataset) + \"\\n\")\n\n init_mask = np.ones((1, opt.input_height, opt.input_width)).astype(np.float32)\n init_pattern = np.ones((opt.input_channel, opt.input_height, opt.input_width)).astype(np.float32)\n\n for test in range(opt.n_times_test):\n print(\"Test {}:\".format(test))\n if opt.to_file:\n with open(opt.output_path, \"a+\") as f:\n f.write(\"-\" * 30 + \"\\n\")\n f.write(\"Test {}:\".format(str(test)) + \"\\n\")\n\n masks = []\n idx_mapping = {}\n\n for target_label in range(opt.total_label):\n print(\"----------------- Analyzing label: {} -----------------\".format(target_label))\n opt.target_label = target_label\n recorder, opt = train(opt, init_mask, init_pattern)\n\n mask = recorder.mask_best\n masks.append(mask)\n idx_mapping[target_label] = len(masks) - 1\n\n l1_norm_list = torch.stack([torch.norm(m, p=opt.use_norm) for m in masks])\n print(\"{} labels found\".format(len(l1_norm_list)))\n print(\"Norm values: {}\".format(l1_norm_list))\n outlier_detection(l1_norm_list, idx_mapping, opt)\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"numpy.ones"
]
]
|
jimdowling/Names-To-Nationality-Predicter | [
"49715aa267d48d92358f11401632e0ab22bc5f97"
]
| [
"ML Model/src/serializer_test.py"
]
| [
"import unittest\nimport math\nimport numpy as np\nfrom serializer import Serializer\n\nclass SerializerTest(unittest.TestCase):\n def test_serialize_example_given_name_should_return_correct_val_1(self):\n self.check_serialized_name(\"Bob Smith\", [2, 15, 2, 0, 19, 13, 9, 20, 8])\n\n def test_serialize_example_given_name_with_random_spaces_should_return_correct_val_2(self):\n self.check_serialized_name(\"Bob Smith \", [2, 15, 2, 0, 19, 13, 9, 20, 8])\n\n def test_serialize_example_given_name_with_unique_chars_should_return_correct_val_3(self):\n self.check_serialized_name(\"Bob Smáith \", [2, 15, 2, 0, 19, 13, 27, 9, 20, 8])\n\n def test_serialize_example_given_name_with_random_chars_should_return_correct_val_4(self):\n self.check_serialized_name(\"$$B)ob Sm#áith *\", [2, 15, 2, 0, 19, 13, 27, 9, 20, 8])\n\n def test_serialize_example_given_name_with_pronoun_should_return_correct_val_4(self):\n self.check_serialized_name(\"Dr. Bob Smith\", [2, 15, 2, 0, 19, 13, 9, 20, 8])\n\n def test_serialize_example_given_single_letter_should_return_correct_val_4(self):\n self.check_serialized_name(\"Bob C Smith\", [2, 15, 2, 0, 19, 13, 9, 20, 8])\n\n def test_serialize_example_given_single_letter_as_first_name_should_return_none(self):\n self.check_serialized_name(\"C Bob Smith\", None)\n\n def test_serialize_example_given_single_letter_as_first_name_with_pronoun_should_return_none(self):\n self.check_serialized_name(\"Mr. C Bob Smith\", None)\n \n def test_serialize_example_given_single_letter_as_last_name_should_return_none(self):\n self.check_serialized_name(\"Bob Joe S\", None)\n\n def test_serialize_example_given_middle_name_should_return_encoding_without_middle_name(self):\n self.check_serialized_name(\"Bob Joe Smith\", [2, 15, 2, 0, 19, 13, 9, 20, 8])\n\n def test_serialize_label_given_second_label_should_return_correct_val(self):\n self.check_serialized_label([\"Germany\", \"France\"], \"France\", [0, 1])\n\n def test_serialize_label_given_nth_label_should_return_correct_val(self):\n self.check_serialized_label([\"a\", \"b\", \"c\", \"d\"], \"c\", [0, 0, 1, 0])\n\n def test_serialize_label_given_unknown_label_should_throw_exception(self):\n with self.assertRaises(Exception):\n self.check_serialized_label([\"a\", \"b\", \"c\", \"d\"], \"e\", [0, 0, 0, 0])\n\n def check_serialized_name(self, name, expected_indexes_with_ones):\n serializer = Serializer(['Germany', 'France'])\n serialized_example = serializer.serialize_example(name)\n\n if expected_indexes_with_ones is None:\n self.assertIsNone(serialized_example)\n else:\n self.assertEqual(len(expected_indexes_with_ones), len(serialized_example))\n\n for i in range(len(expected_indexes_with_ones)):\n serialized_char = serialized_example[i]\n\n self.assertEqual(sum(serialized_char), 1.0)\n self.assertEqual(serialized_char[expected_indexes_with_ones[i]], 1)\n\n def check_serialized_label(self, possible_labels, unserialized_label, expected_serialized_label):\n serializer = Serializer(possible_labels)\n serialized_label = serializer.serialize_label(unserialized_label)\n\n self.assertEqual(len(serialized_label), len(expected_serialized_label))\n self.assertTrue(np.all(serialized_label == expected_serialized_label))\n\nif __name__ == '__main__':\n unittest.main()"
]
| [
[
"numpy.all"
]
]
|
milaan9/Clustering_Algorithms_from_Scratch | [
"ec119a2522dc1dc7b097ecb269912f8d44387dbb"
]
| [
"02_Python/K_Means.py"
]
| [
"#================================================================================================================\n#----------------------------------------------------------------------------------------------------------------\n#\t\t\t\t\t\t\t\t\tK MEANS CLUSTERING\n#----------------------------------------------------------------------------------------------------------------\n#================================================================================================================\n\n# K means clustering is applied to normalized ipl player data\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport pandas as pd \n\nstyle.use('ggplot')\n\nclass K_Means:\n\tdef __init__(self, k =3, tolerance = 0.0001, max_iterations = 500):\n\t\tself.k = k\n\t\tself.tolerance = tolerance\n\t\tself.max_iterations = max_iterations\n\n\tdef fit(self, data):\n\n\t\tself.centroids = {}\n\n\t\t#initialize the centroids, the first 'k' elements in the dataset will be our initial centroids\n\t\tfor i in range(self.k):\n\t\t\tself.centroids[i] = data[i]\n\n\t\t#begin iterations\n\t\tfor i in range(self.max_iterations):\n\t\t\tself.classes = {}\n\t\t\tfor i in range(self.k):\n\t\t\t\tself.classes[i] = []\n\n\t\t\t#find the distance between the point and cluster; choose the nearest centroid\n\t\t\tfor features in data:\n\t\t\t\tdistances = [np.linalg.norm(features - self.centroids[centroid]) for centroid in self.centroids]\n\t\t\t\tclassification = distances.index(min(distances))\n\t\t\t\tself.classes[classification].append(features)\n\n\t\t\tprevious = dict(self.centroids)\n\n\t\t\t#average the cluster datapoints to re-calculate the centroids\n\t\t\tfor classification in self.classes:\n\t\t\t\tself.centroids[classification] = np.average(self.classes[classification], axis = 0)\n\n\t\t\tisOptimal = True\n\n\t\t\tfor centroid in self.centroids:\n\n\t\t\t\toriginal_centroid = previous[centroid]\n\t\t\t\tcurr = self.centroids[centroid]\n\n\t\t\t\tif np.sum((curr - original_centroid)/original_centroid * 100.0) > self.tolerance:\n\t\t\t\t\tisOptimal = False\n\n\t\t\t#break out of the main loop if the results are optimal, ie. the centroids don't change their positions much(more than our tolerance)\n\t\t\tif isOptimal:\n\t\t\t\tbreak\n\n\tdef pred(self, data):\n\t\tdistances = [np.linalg.norm(data - self.centroids[centroid]) for centroid in self.centroids]\n\t\tclassification = distances.index(min(distances))\n\t\treturn classification\n\ndef main():\n\t\n\tdf = pd.read_csv(r\".\\data\\ipl.csv\")\n\tdf = df[['one', 'two']]\n\tdataset = df.astype(float).values.tolist()\n\n\tX = df.values #returns a numpy array\n\t\n\tkm = K_Means(3)\n\tkm.fit(X)\n\n\t# Plotting starts here\n\tcolors = 10*[\"r\", \"g\", \"c\", \"b\", \"k\"]\n\n\tfor centroid in km.centroids:\n\t\tplt.scatter(km.centroids[centroid][0], km.centroids[centroid][1], s = 130, marker = \"x\")\n\n\tfor classification in km.classes:\n\t\tcolor = colors[classification]\n\t\tfor features in km.classes[classification]:\n\t\t\tplt.scatter(features[0], features[1], color = color,s = 30)\n\t\n\tplt.show()\n\nif __name__ == \"__main__\":\n\tmain()\n"
]
| [
[
"matplotlib.style.use",
"numpy.linalg.norm",
"numpy.sum",
"matplotlib.pyplot.scatter",
"numpy.average",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
]
|
franciscocalderon2/incubator-mxnet | [
"3260862c1ea928e99af5517b8e8ce16e670205a9"
]
| [
"python/mxnet/symbol/symbol.py"
]
| [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# coding: utf-8\n# pylint: disable=invalid-name, protected-access, too-many-arguments, too-many-lines\n# pylint: disable=import-error, no-name-in-module\n\"\"\"Symbolic configuration API of MXNet.\"\"\"\ntry:\n from __builtin__ import slice as py_slice\nexcept ImportError:\n from builtins import slice as py_slice\n\nfrom array import array\nimport ctypes\nimport warnings\nfrom numbers import Number\nimport numpy as _numpy # pylint: disable=relative-import\n\nfrom .. import attribute\nfrom ..base import _LIB, numeric_types, c_array, c_array_buf, c_str, c_str_array, c_handle_array\nfrom ..base import mx_uint, py_str, string_types, integer_types, mx_int, mx_int64\nfrom ..base import NDArrayHandle, SymbolHandle\nfrom ..base import check_call, MXNetError, NotImplementedForSymbol\nfrom ..context import Context, current_context\nfrom ..ndarray import NDArray, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP\nfrom ..ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID, _int64_enabled, _SIGNED_INT32_UPPER_LIMIT\nfrom ..executor import Executor\nfrom . import _internal\nfrom . import op\nfrom ._internal import SymbolBase, _set_symbol_class\nfrom ..util import is_np_shape\nfrom ..profiler import _current_scope as _profiler_scope\n\n__all__ = [\"Symbol\", \"var\", \"Variable\", \"Group\", \"load\", \"load_json\",\n \"pow\", \"power\", \"maximum\", \"minimum\", \"hypot\", \"eye\", \"zeros\",\n \"ones\", \"full\", \"arange\", \"linspace\", \"histogram\", \"split_v2\"]\n\n\nclass Symbol(SymbolBase):\n \"\"\"Symbol is symbolic graph of the mxnet.\"\"\"\n # disable dictionary storage, also do not have parent type.\n # pylint: disable=no-member\n __slots__ = []\n\n # Make numpy functions return Symbol instead of numpy object array\n __array_priority__ = 1000.0\n\n def as_np_ndarray(self):\n \"\"\"Convert mx.sym.Symbol to mx.sym.np._Symbol.\"\"\"\n from .numpy import _Symbol\n hdl = SymbolHandle()\n check_call(_LIB.MXShallowCopySymbol(self.handle, ctypes.byref(hdl)))\n return _Symbol(hdl)\n\n def as_nd_ndarray(self):\n \"\"\"Returns self. For the convenience of conversion between legacy and np symbols.\"\"\"\n return self\n\n def __repr__(self):\n \"\"\"Gets a string representation of the symbol.\"\"\"\n name = self.name\n if name is None:\n name = ', '.join([i.name for i in self])\n return '<%s group [%s]>' % (self.__class__.__name__, name)\n else:\n return '<%s %s>' % (self.__class__.__name__, name)\n\n def __iter__(self):\n \"\"\"Returns a generator object of symbol.\n\n One can loop through the returned object list to get outputs.\n\n Example\n -------\n >>> a = mx.sym.Variable('a')\n >>> b = mx.sym.Variable('b')\n >>> c = a+b\n >>> d = mx.sym.Variable('d')\n >>> e = d+c\n >>> out = e.get_children()\n >>> out\n <Symbol Grouped>\n >>> for i in out:\n ... print(i)\n ...\n <Symbol d>\n <Symbol _plus0>\n \"\"\"\n return (self[i] for i in range(len(self)))\n\n def __abs__(self):\n \"\"\"x.__abs__() <=> abs(x) <=> x.abs() <=> mx.symbol.abs(x, y)\"\"\"\n return self.abs()\n\n def __add__(self, other):\n \"\"\"x.__add__(y) <=> x+y\n\n Scalar input is supported.\n Broadcasting is not supported. Use `broadcast_add` instead. \"\"\"\n if isinstance(other, Symbol):\n return _internal._Plus(self, other)\n if isinstance(other, Number):\n return _internal._PlusScalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __bool__(self):\n raise NotImplementedForSymbol(self.__bool__, 'bool')\n\n __nonzero__ = __bool__\n\n def __iadd__(self, other):\n raise NotImplementedForSymbol(self.__iadd__, '+=', other, 1)\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def __sub__(self, other):\n \"\"\"x.__sub__(y) <=> x-y\n\n Scalar input is supported.\n Broadcasting is not supported. Use `broadcast_sub` instead. \"\"\"\n if isinstance(other, Symbol):\n return _internal._Minus(self, other)\n if isinstance(other, Number):\n return _internal._MinusScalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __isub__(self, other):\n raise NotImplementedForSymbol(self.__isub__, '-=', other)\n\n def __rsub__(self, other):\n \"\"\"x.__rsub__(y) <=> y-x\n\n Only `NDArray` is supported for now.\n\n Example\n -------\n >>> x = mx.nd.ones((2,3))*3\n >>> y = mx.nd.ones((2,3))\n >>> x.__rsub__(y).asnumpy()\n array([[-2., -2., -2.],\n [-2., -2., -2.]], dtype=float32)\n \"\"\"\n if isinstance(other, Symbol):\n return other.__sub__(self)\n if isinstance(other, Number):\n return _internal._RMinusScalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __mul__(self, other):\n \"\"\"x.__mul__(y) <=> x*y\n\n Scalar input is supported.\n Broadcasting is not supported. Use `broadcast_mul` instead. \"\"\"\n if isinstance(other, Symbol):\n return _internal._Mul(self, other)\n if isinstance(other, Number):\n return _internal._MulScalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __imul__(self, other):\n raise NotImplementedForSymbol(self.__imul__, '*=', other)\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __div__(self, other):\n \"\"\"x.__div__(y) <=> x/y\n\n Scalar input is supported.\n Broadcasting is not supported. Use `broadcast_div` instead. \"\"\"\n if isinstance(other, Symbol):\n return _internal._Div(self, other)\n if isinstance(other, Number):\n return _internal._DivScalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __rdiv__(self, other):\n \"\"\"x.__rdiv__(y) <=> y/x\n\n Only `NDArray` is supported for now.\n\n Example\n -------\n >>> x = mx.nd.ones((2,3))*3\n >>> y = mx.nd.ones((2,3))\n >>> x.__rdiv__(y).asnumpy()\n array([[ 0.33333334, 0.33333334, 0.33333334],\n [ 0.33333334, 0.33333334, 0.33333334]], dtype=float32)\n \"\"\"\n if isinstance(other, Symbol):\n return other.__truediv__(self)\n if isinstance(other, Number):\n return _internal._RDivScalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __mod__(self, other):\n \"\"\"x.__mod__(y) <=> x%y\n\n Scalar input is supported.\n Broadcasting is not supported. Use `broadcast_mod` instead. \"\"\"\n if isinstance(other, Symbol):\n return _internal._Mod(self, other)\n if isinstance(other, Number):\n return _internal._ModScalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __rmod__(self, other):\n \"\"\"x.__rmod__(y) <=> y%x\n\n Only `NDArray` is supported for now.\n\n Example\n -------\n >>> x = mx.nd.ones((2,3))*3\n >>> y = mx.nd.ones((2,3))\n >>> x.__rmod__(y).asnumpy()\n array([[ 1., 1., 1.,\n [ 1., 1., 1., dtype=float32)\n \"\"\"\n if isinstance(other, Symbol):\n return other.__mod__(self)\n if isinstance(other, Number):\n return _internal._RModScalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __idiv__(self, other):\n raise NotImplementedForSymbol(self.__idiv__, '/=', other)\n\n def __truediv__(self, other):\n return self.__div__(other)\n\n def __rtruediv__(self, other):\n return self.__rdiv__(other)\n\n def __itruediv__(self, other):\n raise NotImplementedForSymbol(self.__itruediv__, '/=', other)\n\n def __pow__(self, other):\n \"\"\"x.__pow__(y) <=> x**y\n\n Scalar input is supported.\n Broadcasting is not supported. Use `broadcast_pow` instead. \"\"\"\n if isinstance(other, Symbol):\n return _internal._Power(self, other)\n if isinstance(other, Number):\n return _internal._PowerScalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __rpow__(self, other):\n \"\"\"x.__rpow__(y) <=> y ** x\"\"\"\n if isinstance(other, Symbol):\n return other.__pow__(self)\n elif isinstance(other, Number):\n return _internal._rpower_scalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __neg__(self):\n \"\"\"x.__neg__() <=> -x\n\n Numerical negative, element-wise.\n\n Example\n -------\n >>> a = mx.sym.Variable('a')\n >>> a\n <Symbol a>\n >>> -a\n <Symbol _mulscalar0>\n >>> a_neg = a.__neg__()\n >>> c = a_neg*b\n >>> ex = c.eval(ctx=mx.cpu(), a=mx.nd.ones([2,3]), b=mx.nd.ones([2,3]))\n >>> ex[0].asnumpy()\n array([[-1., -1., -1.],\n [-1., -1., -1.]], dtype=float32)\n \"\"\"\n return self.__mul__(-1.0)\n\n def __copy__(self):\n return self.__deepcopy__(None)\n\n def __deepcopy__(self, _):\n \"\"\"Returns a deep copy of the input object.\n\n This function returns a deep copy of the input object including the current state\n of all its parameters such as weights, biases, etc.\n\n Any changes made to the deep copy do not reflect in the original object.\n\n Example\n -------\n >>> import copy\n >>> data = mx.sym.Variable('data')\n >>> data_1 = copy.deepcopy(data)\n >>> data_1 = 2*data\n >>> data_1.tojson()\n >>> data_1 is data # Data got modified\n False\n \"\"\"\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolCopy(self.handle,\n ctypes.byref(handle)))\n return Symbol(handle)\n\n def __eq__(self, other):\n \"\"\"x.__eq__(y) <=> x==y\n\n Scalar input is supported.\n Broadcasting is not supported. Use `broadcast_equal` instead. \"\"\"\n if isinstance(other, Symbol):\n return _internal._equal(self, other)\n if isinstance(other, numeric_types):\n return _internal._equal_scalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __ne__(self, other):\n \"\"\"x.__ne__(y) <=> x!=y\n\n Scalar input is supported.\n Broadcasting is not supported. Use `broadcast_not_equal` instead. \"\"\"\n if isinstance(other, Symbol):\n return _internal._not_equal(self, other)\n if isinstance(other, numeric_types):\n return _internal._not_equal_scalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __gt__(self, other):\n \"\"\"x.__gt__(y) <=> x>y\n\n Scalar input is supported.\n Broadcasting is not supported. Use `broadcast_greater` instead. \"\"\"\n if isinstance(other, Symbol):\n return _internal._greater(self, other)\n if isinstance(other, numeric_types):\n return _internal._greater_scalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __ge__(self, other):\n \"\"\"x.__ge__(y) <=> x>=y\n\n Scalar input is supported.\n Broadcasting is not supported. Use `broadcast_greater_equal` instead. \"\"\"\n if isinstance(other, Symbol):\n return _internal._greater_equal(self, other)\n if isinstance(other, numeric_types):\n return _internal._greater_equal_scalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __lt__(self, other):\n \"\"\"x.__lt__(y) <=> x<y\n\n Scalar input is supported.\n Broadcasting is not supported. Use `broadcast_lesser` instead. \"\"\"\n if isinstance(other, Symbol):\n return _internal._lesser(self, other)\n if isinstance(other, numeric_types):\n return _internal._lesser_scalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __le__(self, other):\n \"\"\"x.__le__(y) <=> x<=y\n\n Scalar input is supported.\n Broadcasting is not supported. Use `broadcast_lesser_equal` instead. \"\"\"\n if isinstance(other, Symbol):\n return _internal._lesser_equal(self, other)\n if isinstance(other, numeric_types):\n return _internal._lesser_equal_scalar(self, scalar=other)\n else:\n raise TypeError('type %s not supported' % str(type(other)))\n\n def __getstate__(self):\n handle = self.handle\n if handle is not None:\n return {'handle': self.tojson()}\n else:\n return {'handle': None}\n\n def __setstate__(self, state):\n # pylint: disable=assigning-non-slot\n handle = state['handle']\n if handle is not None:\n json_str = handle\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle)))\n self.handle = handle\n else:\n self.handle = None\n\n def __call__(self, *args, **kwargs):\n \"\"\"Composes symbol using inputs.\n\n x.__call__(y, z) <=> x(y,z)\n\n This function internally calls `_compose` to compose the symbol and\n returns the composed symbol.\n\n Example\n -------\n >>> data = mx.symbol.Variable('data')\n >>> net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)\n >>> net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)\n >>> composed = net2(fc3_data=net1, name='composed')\n >>> composed\n <Symbol composed>\n >>> called = net2.__call__(fc3_data=net1, name='composed')\n >>> called\n <Symbol composed>\n\n Parameters\n ----------\n args:\n Positional arguments.\n\n kwargs:\n Keyword arguments.\n\n Returns\n -------\n The resulting symbol.\n \"\"\"\n s = self.__copy__()\n s._compose(*args, **kwargs)\n return s\n\n def _compose(self, *args, **kwargs):\n \"\"\"Composes symbol using inputs.\n\n x._compose(y, z) <=> x(y,z)\n\n This function mutates the current symbol.\n\n Example\n -------\n >>> data = mx.symbol.Variable('data')\n >>> net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)\n >>> net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)\n >>> net2\n <Symbol fc3>\n >>> net2._compose(fc3_data=net1, name='composed')\n >>> net2\n <Symbol composed>\n\n Parameters\n ----------\n args:\n Positional arguments.\n\n kwargs:\n Keyword arguments.\n\n Returns\n -------\n The resulting symbol.\n \"\"\"\n name = kwargs.pop('name', None)\n\n if name:\n name = c_str(name)\n if len(args) != 0 and len(kwargs) != 0:\n raise TypeError('compose only accept input Symbols \\\n either as positional or keyword arguments, not both')\n\n for arg in args:\n if not isinstance(arg, Symbol):\n raise TypeError('Compose expect `Symbol` as arguments')\n for val in kwargs.values():\n if not isinstance(val, Symbol):\n raise TypeError('Compose expect `Symbol` as arguments')\n\n num_args = len(args) + len(kwargs)\n if len(kwargs) != 0:\n keys = c_str_array(kwargs.keys())\n args = c_handle_array(kwargs.values())\n else:\n keys = None\n args = c_handle_array(args)\n check_call(_LIB.MXSymbolCompose(\n self.handle, name, num_args, keys, args))\n\n def __getitem__(self, index):\n \"\"\"x.__getitem__(i) <=> x[i]\n\n Returns a sliced view of the input symbol.\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> a.__getitem__(0)\n <Symbol a>\n >>> a[0]\n <Symbol a>\n\n Parameters\n ----------\n index : int or str\n Indexing key\n\n \"\"\"\n output_count = len(self)\n if isinstance(index, py_slice):\n start = 0 if index.start is None else index.start\n stop = output_count if index.stop is None else index.stop\n step = 1 if index.step is None else index.step\n return Group([self[i] for i in range(start, stop, step)])\n\n if isinstance(index, string_types):\n # Returning this list of names is expensive. Some symbols may have hundreds of outputs\n output_names = self.list_outputs()\n idx = None\n for i, name in enumerate(output_names):\n if name == index:\n if idx is not None:\n raise ValueError('There are multiple outputs with name \\\"%s\\\"' % index)\n idx = i\n if idx is None:\n raise ValueError('Cannot find output that matches name \\\"%s\\\"' % index)\n index = idx\n\n if not isinstance(index, int):\n raise TypeError('Symbol only support integer index to fetch i-th output')\n if index >= output_count:\n # Important, python determines the end by this exception\n raise IndexError\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolGetOutput(\n self.handle, mx_uint(index), ctypes.byref(handle)))\n return Symbol(handle=handle)\n\n @property\n def name(self):\n \"\"\"Gets name string from the symbol, this function only works for non-grouped symbol.\n\n Returns\n -------\n value : str\n The name of this symbol, returns ``None`` for grouped symbol.\n \"\"\"\n ret = ctypes.c_char_p()\n success = ctypes.c_int()\n check_call(_LIB.MXSymbolGetName(\n self.handle, ctypes.byref(ret), ctypes.byref(success)))\n if success.value != 0:\n return py_str(ret.value)\n else:\n return None\n\n def attr(self, key):\n \"\"\"Returns the attribute string for corresponding input key from the symbol.\n\n This function only works for non-grouped symbols.\n\n Example\n -------\n >>> data = mx.sym.Variable('data', attr={'mood': 'angry'})\n >>> data.attr('mood')\n 'angry'\n\n Parameters\n ----------\n key : str\n The key corresponding to the desired attribute.\n\n Returns\n -------\n value : str\n The desired attribute value, returns ``None`` if the attribute does not exist.\n \"\"\"\n ret = ctypes.c_char_p()\n success = ctypes.c_int()\n check_call(_LIB.MXSymbolGetAttr(\n self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success)))\n if success.value != 0:\n return py_str(ret.value)\n else:\n return None\n\n def list_attr(self, recursive=False):\n \"\"\"Gets all attributes from the symbol.\n\n Example\n -------\n >>> data = mx.sym.Variable('data', attr={'mood': 'angry'})\n >>> data.list_attr()\n {'mood': 'angry'}\n\n Returns\n -------\n ret : Dict of str to str\n A dictionary mapping attribute keys to values.\n \"\"\"\n if recursive:\n raise DeprecationWarning(\"Symbol.list_attr with recursive=True has been deprecated. \"\n \"Please use attr_dict instead.\")\n size = mx_uint()\n pairs = ctypes.POINTER(ctypes.c_char_p)()\n f_handle = _LIB.MXSymbolListAttrShallow\n check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs)))\n return {py_str(pairs[i * 2]): py_str(pairs[i * 2 + 1]) for i in range(size.value)}\n\n def attr_dict(self):\n \"\"\"Recursively gets all attributes from the symbol and its children.\n\n Example\n -------\n >>> a = mx.sym.Variable('a', attr={'a1':'a2'})\n >>> b = mx.sym.Variable('b', attr={'b1':'b2'})\n >>> c = a+b\n >>> c.attr_dict()\n {'a': {'a1': 'a2'}, 'b': {'b1': 'b2'}}\n\n Returns\n -------\n ret : Dict of str to dict\n There is a key in the returned dict for every child with non-empty attribute set.\n For each symbol, the name of the symbol is its key in the dict\n and the correspond value is that symbol's attribute list (itself a dictionary).\n \"\"\"\n size = mx_uint()\n pairs = ctypes.POINTER(ctypes.c_char_p)()\n f_handle = _LIB.MXSymbolListAttr\n check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs)))\n ret = {}\n for i in range(size.value):\n name, key = py_str(pairs[i * 2]).split('$')\n val = py_str(pairs[i * 2 + 1])\n if name not in ret:\n ret[name] = {}\n ret[name][key] = val\n return ret\n\n def _set_attr(self, **kwargs):\n \"\"\"Sets an attribute of the symbol.\n\n For example. A._set_attr(foo=\"bar\") adds the mapping ``\"{foo: bar}\"``\n to the symbol's attribute dictionary.\n\n Parameters\n ----------\n **kwargs\n The attributes to set\n \"\"\"\n for key, value in kwargs.items():\n if not isinstance(value, string_types):\n raise ValueError(\"Set Attr only accepts string values\")\n check_call(_LIB.MXSymbolSetAttr(\n self.handle, c_str(key), c_str(str(value))))\n\n def get_inputs(self):\n \"\"\"Gets a new grouped symbol `sgroup`. The output of `sgroup` is a list of inputs to this symbol.\n\n Consider the following code:\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> d = c.get_inputs()\n >>> d\n <Symbol Grouped>\n >>> d.list_outputs()\n ['a', 'b']\n\n Returns\n -------\n sgroup : Symbol\n A symbol group containing all input nodes of the computation graph\n used to compute the symbol.\n \"\"\"\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolGetInputs(\n self.handle, ctypes.byref(handle)))\n return Symbol(handle=handle)\n\n def get_internals(self):\n \"\"\"Gets a new grouped symbol `sgroup`. The output of `sgroup` is a list of\n outputs of all of the internal nodes.\n\n Consider the following code:\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> d = c.get_internals()\n >>> d\n <Symbol Grouped>\n >>> d.list_outputs()\n ['a', 'b', '_plus4_output']\n\n Returns\n -------\n sgroup : Symbol\n A symbol group containing all internal and leaf nodes of the computation graph\n used to compute the symbol.\n \"\"\"\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolGetInternals(\n self.handle, ctypes.byref(handle)))\n return Symbol(handle=handle)\n\n def get_children(self):\n \"\"\"Gets a new grouped symbol whose output contains\n inputs to output nodes of the original symbol.\n\n Example\n -------\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.Variable('y')\n >>> z = mx.sym.Variable('z')\n >>> a = y+z\n >>> b = x+a\n >>> b.get_children()\n <Symbol Grouped>\n >>> b.get_children().list_outputs()\n ['x', '_plus10_output']\n >>> b.get_children().get_children().list_outputs()\n ['y', 'z']\n\n Returns\n -------\n sgroup : Symbol or None\n The children of the head node. If the symbol has no\n inputs then ``None`` will be returned.\n \"\"\"\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolGetChildren(\n self.handle, ctypes.byref(handle)))\n ret = Symbol(handle=handle)\n if len(ret.list_outputs()) == 0:\n return None\n return ret\n\n def list_arguments(self):\n \"\"\"Lists all the arguments in the symbol.\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> c.list_arguments\n ['a', 'b']\n\n Returns\n -------\n args : list of string\n List containing the names of all the arguments required to compute the symbol.\n \"\"\"\n size = ctypes.c_uint()\n sarr = ctypes.POINTER(ctypes.c_char_p)()\n check_call(_LIB.MXSymbolListArguments(\n self.handle, ctypes.byref(size), ctypes.byref(sarr)))\n return [py_str(sarr[i]) for i in range(size.value)]\n\n def list_outputs(self):\n \"\"\"Lists all the outputs in the symbol.\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> c.list_outputs()\n ['_plus12_output']\n\n Returns\n -------\n list of str\n List of all the outputs.\n For most symbols, this list contains only the name of this symbol.\n For symbol groups, this is a list with the names of all symbols\n in the group.\n \"\"\"\n size = ctypes.c_uint()\n sarr = ctypes.POINTER(ctypes.c_char_p)()\n check_call(_LIB.MXSymbolListOutputs(\n self.handle, ctypes.byref(size), ctypes.byref(sarr)))\n return [py_str(sarr[i]) for i in range(size.value)]\n\n # pylint: disable=invalid-length-returned\n def __len__(self):\n \"\"\"Get number of outputs for the symbol.\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> len(c)\n\n Returns\n -------\n len(self): Number of outputs\n Number of outputs\n \"\"\"\n output_count = mx_uint()\n check_call(_LIB.MXSymbolGetNumOutputs(self.handle, ctypes.byref(output_count)))\n return output_count.value\n\n def list_auxiliary_states(self):\n \"\"\"Lists all the auxiliary states in the symbol.\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> c.list_auxiliary_states()\n []\n\n Example of auxiliary states in `BatchNorm`.\n\n >>> data = mx.symbol.Variable('data')\n >>> weight = mx.sym.Variable(name='fc1_weight')\n >>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)\n >>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')\n >>> fc2.list_auxiliary_states()\n ['batchnorm0_moving_mean', 'batchnorm0_moving_var']\n\n Returns\n -------\n aux_states : list of str\n List of the auxiliary states in input symbol.\n\n Notes\n -----\n Auxiliary states are special states of symbols that do not correspond to an argument,\n and are not updated by gradient descent. Common examples of auxiliary states\n include the `moving_mean` and `moving_variance` in `BatchNorm`.\n Most operators do not have auxiliary states.\n \"\"\"\n size = ctypes.c_uint()\n sarr = ctypes.POINTER(ctypes.c_char_p)()\n check_call(_LIB.MXSymbolListAuxiliaryStates(\n self.handle, ctypes.byref(size), ctypes.byref(sarr)))\n return [py_str(sarr[i]) for i in range(size.value)]\n\n def list_inputs(self):\n \"\"\"Lists all arguments and auxiliary states of this Symbol.\n\n Returns\n -------\n inputs : list of str\n List of all inputs.\n\n Examples\n --------\n >>> bn = mx.sym.BatchNorm(name='bn')\n >>> bn.list_arguments()\n ['bn_data', 'bn_gamma', 'bn_beta']\n >>> bn.list_auxiliary_states()\n ['bn_moving_mean', 'bn_moving_var']\n >>> bn.list_inputs()\n ['bn_data', 'bn_gamma', 'bn_beta', 'bn_moving_mean', 'bn_moving_var']\n \"\"\"\n size = ctypes.c_uint()\n sarr = ctypes.POINTER(ctypes.c_char_p)()\n check_call(_LIB.NNSymbolListInputNames(\n self.handle, 0, ctypes.byref(size), ctypes.byref(sarr)))\n return [py_str(sarr[i]) for i in range(size.value)]\n\n def infer_type(self, *args, **kwargs):\n \"\"\"Infers the type of all arguments and all outputs, given the known types\n for some arguments.\n\n This function takes the known types of some arguments in either positional way\n or keyword argument way as input. It returns a tuple of `None` values\n if there is not enough information to deduce the missing types.\n\n Inconsistencies in the known types will cause an error to be raised.\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> arg_types, out_types, aux_types = c.infer_type(a='float32')\n >>> arg_types\n [<type 'numpy.float32'>, <type 'numpy.float32'>]\n >>> out_types\n [<type 'numpy.float32'>]\n >>> aux_types\n []\n\n Parameters\n ----------\n *args :\n Type of known arguments in a positional way.\n Unknown type can be marked as None.\n\n **kwargs :\n Keyword arguments of known types.\n\n Returns\n -------\n arg_types : list of numpy.dtype or None\n List of argument types.\n The order is same as the order of list_arguments().\n out_types : list of numpy.dtype or None\n List of output types.\n The order is same as the order of list_outputs().\n aux_types : list of numpy.dtype or None\n List of auxiliary state types.\n The order is same as the order of list_auxiliary_states().\n \"\"\"\n try:\n res = self._infer_type_impl(False, *args, **kwargs)\n if res[1] is None:\n arg_shapes, _, _ = self._infer_type_impl(True, *args, **kwargs)\n arg_names = self.list_arguments()\n unknowns = []\n for name, dtype in zip(arg_names, arg_shapes):\n if not dtype:\n if len(unknowns) >= 10:\n unknowns.append('...')\n break\n unknowns.append('%s: %s' % (name, str(dtype)))\n warnings.warn(\n \"Cannot decide type for the following arguments. \" +\n \"Consider providing them as input:\\n\\t\" +\n \"\\n\\t\".join(unknowns), stacklevel=2)\n return res\n except MXNetError:\n print(\"infer_type error. Arguments:\")\n for i, arg in enumerate(args):\n print(\" #%d: %s\" % (i, arg))\n for k, v in kwargs.items():\n print(\" %s: %s\" % (k, v))\n raise\n\n def infer_type_partial(self, *args, **kwargs):\n \"\"\"Infers the type partially.\n\n This functions works the same way as `infer_type`,\n except that this function can return partial results.\n\n In the following example, information about fc2 is not available. So, `infer_shape`\n will return a tuple of `None` values but `infer_shape_partial` will return partial values.\n\n Example\n -------\n >>> data = mx.sym.Variable('data')\n >>> prev = mx.sym.Variable('prev')\n >>> casted_prev = mx.sym.cast(prev, dtype='float32')\n >>> out = mx.sym.Activation(data=mx.sym.elemwise_add(data, casted_prev), act_type='relu')\n >>> out.list_arguments()\n ['data', 'prev']\n >>> out.infer_type(data='float32')\n (None, None, None)\n >>> out.infer_type_partial(data='float32')\n ([numpy.float32, None], [numpy.float32], [])\n >>> # infers type if you give information about prev\n >>> out.infer_type(data='float32', prev='float16')\n ([numpy.float32, numpy.float16], [numpy.float32], [])\n\n Parameters\n ----------\n *args :\n Type of known arguments in a positional way.\n Unknown type can be marked as None.\n\n **kwargs :\n Keyword arguments of known types.\n\n Returns\n -------\n arg_types : list of numpy.dtype or None\n List of argument types.\n The order is same as the order of list_arguments().\n out_types : list of numpy.dtype or None\n List of output types.\n The order is same as the order of list_outputs().\n aux_types : list of numpy.dtype or None\n List of auxiliary state types.\n The order is same as the order of list_auxiliary_states().\n \"\"\"\n return self._infer_type_impl(True, *args, **kwargs)\n\n def _infer_type_impl(self, partial, *args, **kwargs):\n \"\"\"The actual implementation for calling type inference API.\"\"\"\n # pylint: disable=too-many-locals\n if len(args) != 0 and len(kwargs) != 0:\n raise ValueError('Can only specify known argument \\\n types either by positional or kwargs way.')\n sdata = []\n if len(args) != 0:\n keys = c_array(ctypes.c_char_p, [])\n for s in args:\n if s is not None:\n s = _numpy.dtype(s).type\n if s not in _DTYPE_NP_TO_MX:\n raise TypeError('Argument need to be one of ' + str(_DTYPE_NP_TO_MX))\n sdata.append(_DTYPE_NP_TO_MX[s])\n else:\n sdata.append(-1)\n else:\n str_keys = []\n for k, v in kwargs.items():\n v = _numpy.dtype(v).type\n if v in _DTYPE_NP_TO_MX:\n str_keys.append(k)\n sdata.append(_DTYPE_NP_TO_MX[v])\n keys = c_str_array(str_keys)\n arg_type_size = mx_uint()\n arg_type_data = ctypes.POINTER(ctypes.c_int)()\n out_type_size = mx_uint()\n out_type_data = ctypes.POINTER(ctypes.c_int)()\n aux_type_size = mx_uint()\n aux_type_data = ctypes.POINTER(ctypes.c_int)()\n complete = ctypes.c_int()\n if partial:\n infer_func = _LIB.MXSymbolInferTypePartial\n else:\n infer_func = _LIB.MXSymbolInferType\n check_call(infer_func(\n self.handle,\n mx_uint(len(sdata)),\n keys,\n c_array_buf(ctypes.c_int, array('i', sdata)),\n ctypes.byref(arg_type_size),\n ctypes.byref(arg_type_data),\n ctypes.byref(out_type_size),\n ctypes.byref(out_type_data),\n ctypes.byref(aux_type_size),\n ctypes.byref(aux_type_data),\n ctypes.byref(complete)))\n if complete.value != 0:\n arg_types = [\n _DTYPE_MX_TO_NP[arg_type_data[i]] for i in range(arg_type_size.value)]\n out_types = [\n _DTYPE_MX_TO_NP[out_type_data[i]] for i in range(out_type_size.value)]\n aux_types = [\n _DTYPE_MX_TO_NP[aux_type_data[i]] for i in range(aux_type_size.value)]\n return (arg_types, out_types, aux_types)\n else:\n return (None, None, None)\n\n def infer_shape(self, *args, **kwargs):\n \"\"\"Infers the shapes of all arguments and all outputs given the known shapes of\n some arguments.\n\n This function takes the known shapes of some arguments in either positional way\n or keyword argument way as input. It returns a tuple of `None` values\n if there is not enough information to deduce the missing shapes.\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=(3,3))\n >>> arg_shapes\n [(3L, 3L), (3L, 3L)]\n >>> out_shapes\n [(3L, 3L)]\n >>> aux_shapes\n []\n >>> c.infer_shape(a=(0,3)) # 0s in shape means unknown dimensions. So, returns None.\n (None, None, None)\n\n Inconsistencies in the known shapes will cause an error to be raised.\n See the following example:\n\n >>> data = mx.sym.Variable('data')\n >>> out = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=1000)\n >>> out = mx.sym.Activation(data=out, act_type='relu')\n >>> out = mx.sym.FullyConnected(data=out, name='fc2', num_hidden=10)\n >>> weight_shape= (1, 100)\n >>> data_shape = (100, 100)\n >>> out.infer_shape(data=data_shape, fc1_weight=weight_shape)\n Error in operator fc1: Shape inconsistent, Provided=(1,100), inferred shape=(1000,100)\n\n Parameters\n ----------\n *args :\n Shape of arguments in a positional way.\n Unknown shape can be marked as None.\n\n **kwargs :\n Keyword arguments of the known shapes.\n\n Returns\n -------\n arg_shapes : list of tuple or None\n List of argument shapes.\n The order is same as the order of list_arguments().\n out_shapes : list of tuple or None\n List of output shapes.\n The order is same as the order of list_outputs().\n aux_shapes : list of tuple or None\n List of auxiliary state shapes.\n The order is same as the order of list_auxiliary_states().\n \"\"\"\n # pylint: disable=too-many-locals\n try:\n res = self._infer_shape_impl(False, *args, **kwargs)\n if res[1] is None:\n arg_shapes, _, _ = self._infer_shape_impl(True, *args, **kwargs)\n arg_names = self.list_arguments()\n unknowns = []\n for name, shape in zip(arg_names, arg_shapes):\n if is_np_shape():\n shape_is_none = not shape or -1 in shape\n else:\n shape_is_none = not shape or 0 in shape\n if shape_is_none:\n if len(unknowns) >= 10:\n unknowns.append('...')\n break\n unknowns.append('%s: %s' % (name, str(shape)))\n warnings.warn(\n \"Cannot decide shape for the following arguments \" +\n \"(0s in shape means unknown dimensions). \" +\n \"Consider providing them as input:\\n\\t\" +\n \"\\n\\t\".join(unknowns), stacklevel=2)\n return res\n except MXNetError:\n print(\"infer_shape error. Arguments:\")\n for i, arg in enumerate(args):\n print(\" #%d: %s\" % (i, arg))\n for k, v in kwargs.items():\n print(\" %s: %s\" % (k, v))\n raise\n\n def infer_shape_partial(self, *args, **kwargs):\n \"\"\"Infers the shape partially.\n\n This functions works the same way as `infer_shape`,\n except that this function can return partial results.\n\n In the following example, information about fc2 is not available. So, `infer_shape`\n will return a tuple of `None` values but `infer_shape_partial` will return partial values.\n\n Example\n -------\n >>> data = mx.sym.Variable('data')\n >>> prev = mx.sym.Variable('prev')\n >>> fc1 = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=128)\n >>> fc2 = mx.sym.FullyConnected(data=prev, name='fc2', num_hidden=128)\n >>> out = mx.sym.Activation(data=mx.sym.elemwise_add(fc1, fc2), act_type='relu')\n >>> out.list_arguments()\n ['data', 'fc1_weight', 'fc1_bias', 'prev', 'fc2_weight', 'fc2_bias']\n >>> out.infer_shape(data=(10,64))\n (None, None, None)\n >>> out.infer_shape_partial(data=(10,64))\n ([(10L, 64L), (128L, 64L), (128L,), (), (), ()], [(10L, 128L)], [])\n >>> # infers shape if you give information about fc2\n >>> out.infer_shape(data=(10,64), prev=(10,128))\n ([(10L, 64L), (128L, 64L), (128L,), (10L, 128L), (128L, 128L), (128L,)], [(10L, 128L)], [])\n\n Parameters\n ----------\n *args :\n Shape of arguments in a positional way.\n Unknown shape can be marked as None\n\n **kwargs :\n Keyword arguments of known shapes.\n\n Returns\n -------\n arg_shapes : list of tuple or None\n List of argument shapes.\n The order is same as the order of list_arguments().\n out_shapes : list of tuple or None\n List of output shapes.\n The order is same as the order of list_outputs().\n aux_shapes : list of tuple or None\n List of auxiliary state shapes.\n The order is same as the order of list_auxiliary_states().\n \"\"\"\n return self._infer_shape_impl(True, *args, **kwargs)\n\n def _infer_shape_impl(self, partial, *args, **kwargs):\n \"\"\"The actual implementation for calling shape inference API.\"\"\"\n # pylint: disable=too-many-locals\n if len(args) != 0 and len(kwargs) != 0:\n raise ValueError('Can only specify known argument \\\n shapes either by positional or kwargs way.')\n sdata = []\n indptr = [0]\n if len(args) != 0:\n keys = c_array(ctypes.c_char_p, [])\n for i, s in enumerate(args):\n if s is not None:\n if not isinstance(s, tuple):\n raise TypeError(\"Arguments need to be shapes (tuple), \"\n \"but argument %d is %s.\" % (i, type(s)))\n sdata.extend(s)\n indptr.append(len(sdata))\n else:\n str_keys = []\n for k, v in kwargs.items():\n if not isinstance(v, tuple):\n raise TypeError(\"Arguments need to be shapes (tuple), \"\n \"but '%s' is %s.\" % (k, type(v)))\n str_keys.append(k)\n sdata.extend(v)\n indptr.append(len(sdata))\n keys = c_str_array(str_keys)\n arg_shape_size = mx_uint()\n arg_shape_ndim = ctypes.POINTER(mx_int)()\n out_shape_size = mx_uint()\n out_shape_ndim = ctypes.POINTER(mx_int)()\n aux_shape_size = mx_uint()\n aux_shape_ndim = ctypes.POINTER(mx_int)()\n complete = ctypes.c_int()\n if _int64_enabled():\n arg_shape_data = ctypes.POINTER(ctypes.POINTER(mx_int64))()\n out_shape_data = ctypes.POINTER(ctypes.POINTER(mx_int64))()\n aux_shape_data = ctypes.POINTER(ctypes.POINTER(mx_int64))()\n if partial:\n infer_func = _LIB.MXSymbolInferShapePartialEx64\n else:\n infer_func = _LIB.MXSymbolInferShapeEx64\n check_call(infer_func(\n self.handle,\n mx_uint(len(indptr) - 1),\n keys,\n c_array_buf(mx_int64, array('q', indptr)),\n c_array_buf(mx_int64, array('q', sdata)),\n ctypes.byref(arg_shape_size),\n ctypes.byref(arg_shape_ndim),\n ctypes.byref(arg_shape_data),\n ctypes.byref(out_shape_size),\n ctypes.byref(out_shape_ndim),\n ctypes.byref(out_shape_data),\n ctypes.byref(aux_shape_size),\n ctypes.byref(aux_shape_ndim),\n ctypes.byref(aux_shape_data),\n ctypes.byref(complete)))\n else:\n for size in sdata:\n if size > _SIGNED_INT32_UPPER_LIMIT:\n raise Exception(\"[_infer_shape_impl] Size of tensor you are trying to \" +\n \"allocate is larger than 2^31 elements. Please build \" +\n \"with flag USE_INT64_TENSOR_SIZE=1\")\n arg_shape_data = ctypes.POINTER(ctypes.POINTER(mx_int))()\n out_shape_data = ctypes.POINTER(ctypes.POINTER(mx_int))()\n aux_shape_data = ctypes.POINTER(ctypes.POINTER(mx_int))()\n if partial:\n infer_func = _LIB.MXSymbolInferShapePartialEx\n else:\n infer_func = _LIB.MXSymbolInferShapeEx\n check_call(infer_func(\n self.handle,\n mx_uint(len(indptr) - 1),\n keys,\n c_array_buf(mx_uint, array('I', indptr)),\n c_array_buf(mx_int, array('i', sdata)),\n ctypes.byref(arg_shape_size),\n ctypes.byref(arg_shape_ndim),\n ctypes.byref(arg_shape_data),\n ctypes.byref(out_shape_size),\n ctypes.byref(out_shape_ndim),\n ctypes.byref(out_shape_data),\n ctypes.byref(aux_shape_size),\n ctypes.byref(aux_shape_ndim),\n ctypes.byref(aux_shape_data),\n ctypes.byref(complete)))\n if complete.value != 0:\n arg_shapes = [tuple(arg_shape_data[i][:arg_shape_ndim[i]])\n if arg_shape_ndim[i] >= 0 else None\n for i in range(arg_shape_size.value)]\n out_shapes = [tuple(out_shape_data[i][:out_shape_ndim[i]])\n if out_shape_ndim[i] >= 0 else None\n for i in range(out_shape_size.value)]\n aux_shapes = [tuple(aux_shape_data[i][:aux_shape_ndim[i]])\n if aux_shape_ndim[i] >= 0 else None\n for i in range(aux_shape_size.value)]\n return (arg_shapes, out_shapes, aux_shapes)\n else:\n return (None, None, None)\n # pylint: enable=too-many-locals\n\n def debug_str(self):\n \"\"\"Gets a debug string of symbol.\n\n It contains Symbol output, variables and operators in the computation graph\n with their inputs, variables and attributes.\n\n Returns\n -------\n string\n Debug string of the symbol.\n\n Examples\n --------\n >>> a = mx.sym.Variable('a')\n >>> b = mx.sym.sin(a)\n >>> c = 2 * a + b\n >>> d = mx.sym.FullyConnected(data=c, num_hidden=10)\n >>> d.debug_str()\n >>> print d.debug_str()\n Symbol Outputs:\n\t output[0]=fullyconnected0(0)\n Variable:a\n --------------------\n Op:_mul_scalar, Name=_mulscalar0\n Inputs:\n \targ[0]=a(0) version=0\n Attrs:\n \tscalar=2\n --------------------\n Op:sin, Name=sin0\n Inputs:\n \targ[0]=a(0) version=0\n --------------------\n Op:elemwise_add, Name=_plus0\n Inputs:\n \targ[0]=_mulscalar0(0)\n \targ[1]=sin0(0)\n Variable:fullyconnected0_weight\n Variable:fullyconnected0_bias\n --------------------\n Op:FullyConnected, Name=fullyconnected0\n Inputs:\n \targ[0]=_plus0(0)\n \targ[1]=fullyconnected0_weight(0) version=0\n \targ[2]=fullyconnected0_bias(0) version=0\n Attrs:\n \tnum_hidden=10\n \"\"\"\n debug_str = ctypes.c_char_p()\n check_call(_LIB.MXSymbolPrint(\n self.handle, ctypes.byref(debug_str)))\n return py_str(debug_str.value)\n\n def save(self, fname, remove_amp_cast=True):\n \"\"\"Saves symbol to a file.\n\n You can also use pickle to do the job if you only work on python.\n The advantage of `load`/`save` functions is that the file contents are language agnostic.\n This means the model saved by one language binding can be loaded by a different\n language binding of `MXNet`.\n You also get the benefit of being able to directly load/save from cloud storage(S3, HDFS).\n\n Parameters\n ----------\n fname : str\n The name of the file.\n\n - \"s3://my-bucket/path/my-s3-symbol\"\n - \"hdfs://my-bucket/path/my-hdfs-symbol\"\n - \"/path-to/my-local-symbol\"\n remove_amp_cast : bool, optional\n Whether to remove the amp_cast and amp_multicast operators, before saving the model.\n\n See Also\n --------\n symbol.load : Used to load symbol from file.\n \"\"\"\n if not isinstance(fname, string_types):\n raise TypeError('fname need to be string')\n if remove_amp_cast:\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolRemoveAmpCast(self.handle, ctypes.byref(handle)))\n check_call(_LIB.MXSymbolSaveToFile(handle, c_str(fname)))\n else:\n check_call(_LIB.MXSymbolSaveToFile(self.handle, c_str(fname)))\n\n def tojson(self, remove_amp_cast=True):\n \"\"\"Saves symbol to a JSON string.\n\n See Also\n --------\n symbol.load_json : Used to load symbol from JSON string.\n \"\"\"\n json_str = ctypes.c_char_p()\n if remove_amp_cast:\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolRemoveAmpCast(self.handle, ctypes.byref(handle)))\n check_call(_LIB.MXSymbolSaveToJSON(handle, ctypes.byref(json_str)))\n else:\n check_call(_LIB.MXSymbolSaveToJSON(self.handle, ctypes.byref(json_str)))\n return py_str(json_str.value)\n\n @staticmethod\n def _get_ndarray_inputs(arg_key, args, arg_names, allow_missing):\n \"\"\"Helper function to get NDArray lists handles from various inputs.\n\n Parameters\n ----------\n arg_key : str\n The name of argument, used for error message.\n\n args : list of NDArray or dict of str to NDArray\n Input arguments to the symbols.\n If type is list of NDArray, the position is in the same order of arg_names.\n If type is dict of str to NDArray, then it maps the name of arguments\n to the corresponding NDArray,\n\n args_names : list of string\n List of argument names.\n\n allow_missing : boolean\n Whether missing argument is allowed.\n When allowed, the missing handle will be set to None(null)\n\n Returns\n -------\n handles : list of NDArrayHandle\n The positional list of NDArrayHandles generated from input.\n \"\"\"\n # setup args\n arg_handles = []\n arg_arrays = []\n if isinstance(args, list):\n if len(args) != len(arg_names):\n raise ValueError('Length of %s does not match the number of arguments' % arg_key)\n for narr in args:\n if narr is None and allow_missing:\n arg_handles.append(None)\n elif not isinstance(narr, NDArray):\n raise TypeError('Only accept list of NDArrays or dict of str to NDArray')\n else:\n arg_handles.append(narr.handle)\n arg_arrays = args\n elif isinstance(args, dict):\n for name in arg_names:\n if name in args:\n narr = args[name]\n if not isinstance(narr, NDArray):\n raise TypeError('Only accept list of NDArrays or dict of str to NDArray')\n arg_handles.append(narr.handle)\n arg_arrays.append(narr)\n else:\n if allow_missing:\n arg_handles.append(None)\n arg_arrays.append(None)\n else:\n raise ValueError('key `%s` is missing in `%s`' % (name, arg_key))\n else:\n raise TypeError('Only accept list of NDArrays or dict of str to NDArray')\n return c_array(NDArrayHandle, arg_handles), arg_arrays\n\n def _gen_atomic_symbol(self):\n handle = SymbolHandle()\n check_call(_LIB.MXGenAtomicSymbolFromSymbol(self.handle, ctypes.byref(handle)))\n return Symbol(handle)\n\n\n # pylint: disable=too-many-locals\n def optimize_for(self, backend, args=None, aux=None, ctx=None,\n shape_dict=None, type_dict=None, stype_dict=None, skip_infer=False, **kwargs):\n \"\"\"Partitions current symbol and optimizes it for a given backend,\n returns new partitioned symbol.\n\n Parameters\n ----------\n backend : str\n The name of backend, as registered in `SubgraphBackendRegistry`\n\n args : dict of str to NDArray, optional\n Input arguments to the symbol, required to infer shapes/types before partitioning\n - If type is a dict of str to `NDArray`, then it maps the name of arguments\n to the corresponding `NDArray`. Non defined arguments' `NDArray`s don't have to be\n specified in the dict.\n\n aux : dict of str to NDArray, optional\n Input auxiliary arguments to the symbol\n - If type is a dict of str to `NDArray`, then it maps the name of arguments\n to the corresponding `NDArray`.\n\n ctx : Context, optional\n Device context, used to infer stypes\n\n shape_dict : Dict of str->tuple, optional\n Input shape dictionary.\n Used iff input NDArray is not in `args`.\n\n type_dict : Dict of str->numpy.dtype, optional\n Input type dictionary.\n Used iff input NDArray is not in `args`.\n\n stype_dict : Dict of str->str, optional\n Input storage type dictionary.\n Used iff input NDArray is not in `args`.\n\n skip_infer : bool, optional\n If True, the optimization skips the shape, type and storage type inference pass.\n\n kwargs : optional arguments\n Passed on to `PrePartition` and `PostPartition` functions of `SubgraphProperty`\n\n Returns\n -------\n out : SymbolHandle\n The created symbol for target backend.\n \"\"\"\n out = SymbolHandle()\n assert isinstance(backend, str)\n assert isinstance(args, dict) or args is None\n assert isinstance(aux, dict) or aux is None\n\n if args is None or len(args) == 0:\n args_ = []\n args_handle = c_array(NDArrayHandle, [])\n else:\n args_handle, args_ = self._get_ndarray_inputs('args', args,\n self.list_arguments(), True)\n\n if aux is None or len(aux) == 0:\n aux_ = []\n aux_handle = c_array(NDArrayHandle, [])\n else:\n aux_handle, aux_ = self._get_ndarray_inputs('aux_states', aux,\n self.list_auxiliary_states(), True)\n if ctx is None:\n ctx = current_context()\n assert isinstance(ctx, Context)\n\n\n # parse input data shape dict\n num_input_shapes = 0\n input_shape_names = ctypes.POINTER(ctypes.c_char_p)()\n input_shape_data = ctypes.POINTER(mx_int64)()\n input_shape_idx = ctypes.POINTER(mx_uint)()\n if shape_dict is not None:\n input_shape_names = []\n input_shape_data = []\n input_shape_idx = [0]\n for k, v in shape_dict.items():\n if isinstance(v, (tuple, list)):\n input_shape_names.append(k)\n input_shape_data.extend(v)\n input_shape_idx.append(len(input_shape_data))\n else:\n raise ValueError(str(v) + \" has to be a tuple or list.\")\n num_input_shapes = mx_uint(len(input_shape_names))\n input_shape_names = c_str_array(input_shape_names)\n input_shape_data = c_array_buf(mx_int64, array('q', input_shape_data))\n input_shape_idx = c_array_buf(mx_uint, array('i', input_shape_idx))\n\n # parse input data types dict\n num_input_types = 0\n input_type_names = ctypes.POINTER(ctypes.c_char_p)() # provided type argument names\n input_type_data = ctypes.POINTER(mx_uint)() # provided types\n if type_dict is not None:\n input_type_names = []\n input_type_data = []\n for k, v in type_dict.items():\n v = _numpy.dtype(v).type\n if v in _DTYPE_NP_TO_MX:\n input_type_names.append(k)\n input_type_data.append(_DTYPE_NP_TO_MX[v])\n else:\n raise ValueError(str(v) + \" is not a MXNet type.\")\n\n num_input_types = mx_uint(len(input_type_names))\n input_type_names = c_str_array(input_type_names)\n input_type_data = c_array_buf(ctypes.c_int, array('i', input_type_data))\n\n # parse input data storage types dict\n num_input_stypes = 0\n # provided storage type argument names\n input_stype_names = ctypes.POINTER(ctypes.c_char_p)()\n input_stype_data = ctypes.POINTER(mx_uint)() # provided storage types\n if stype_dict is not None:\n input_stype_names = []\n input_stype_data = []\n for k, v in stype_dict.items():\n if v in _STORAGE_TYPE_STR_TO_ID:\n input_stype_names.append(k)\n input_stype_data.append(_STORAGE_TYPE_STR_TO_ID[v])\n else:\n raise ValueError(str(v) + \" is not a MXNet storage type.\")\n\n num_input_stypes = mx_uint(len(input_stype_names))\n input_stype_names = c_str_array(input_stype_names)\n input_stype_data = c_array_buf(ctypes.c_int, array('i', input_stype_data))\n\n new_args_size = ctypes.c_uint()\n new_arg_names = ctypes.POINTER(ctypes.c_char_p)()\n new_args_handle = ctypes.POINTER(NDArrayHandle)()\n new_aux_size = ctypes.c_uint()\n new_aux_names = ctypes.POINTER(ctypes.c_char_p)()\n new_aux_handle = ctypes.POINTER(NDArrayHandle)()\n\n key_list = []\n val_list = []\n for key, val in kwargs.items():\n key_list.append(key)\n val_list.append(str(val))\n check_call(_LIB.MXOptimizeForBackend(self.handle,\n c_str(backend),\n ctypes.c_int(ctx.device_typeid),\n ctypes.byref(out),\n mx_uint(len(args_)),\n args_handle,\n mx_uint(len(aux_)),\n aux_handle,\n mx_uint(len(key_list)),\n c_str_array(key_list),\n c_str_array(val_list),\n num_input_shapes,\n input_shape_names,\n input_shape_data,\n input_shape_idx,\n num_input_types,\n input_type_names,\n input_type_data,\n num_input_stypes,\n input_stype_names,\n input_stype_data,\n ctypes.c_bool(skip_infer),\n ctypes.byref(new_args_size),\n ctypes.byref(new_args_handle),\n ctypes.byref(new_arg_names),\n ctypes.byref(new_aux_size),\n ctypes.byref(new_aux_handle),\n ctypes.byref(new_aux_names)))\n # add new args/aux\n if not args is None:\n for i in range(new_args_size.value):\n args[py_str(new_arg_names[i])] = NDArray(NDArrayHandle(new_args_handle[i]))\n elif new_args_size.value > 0:\n raise RuntimeError('Cannot add new args in optimize_for since args is None\\n' +\n 'Provide a dictionary to the args argument to optimize_for')\n\n if not aux is None:\n for i in range(new_aux_size.value):\n aux[py_str(new_aux_names[i])] = NDArray(NDArrayHandle(new_aux_handle[i]))\n elif new_aux_size.value > 0:\n raise RuntimeError('Cannot add new aux in optimize_for since aux is None\\n' +\n 'Provide a dictionary to the aux argument to optimize_for')\n\n new_sym = Symbol(out)\n\n arg_names = self.list_arguments()\n new_arg_names = new_sym.list_arguments()\n deleted_arg_names = set([item for item in arg_names\n if item not in set(new_arg_names)])\n\n if len(deleted_arg_names) > 0:\n if args is not None:\n for a_n in deleted_arg_names:\n if a_n in args:\n args.pop(a_n)\n else:\n warnings.warn('A param was deleted during optimization, but no args dictionary was provided.\\n' +\n 'Please ensure that your model weights match the newly optimized model.')\n\n aux_names = self.list_auxiliary_states()\n new_aux_names = new_sym.list_auxiliary_states()\n deleted_aux_names = set([item for item in aux_names\n if item not in set(new_aux_names)])\n if len(deleted_aux_names) > 0:\n if aux is not None:\n for a_n in deleted_aux_names:\n if a_n in aux:\n aux.pop(a_n)\n else:\n warnings.warn('A param was deleted during optimization, but no args dictionary was provided.\\n' +\n 'Please ensure that your model weights match the newly optimized model.')\n\n return new_sym\n\n # pylint: disable=too-many-locals\n def _simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,\n **kwargs):\n \"\"\"Bind current symbol to get an executor, allocate all the arguments needed.\n Allows specifying data types.\n\n This function simplifies the binding procedure. You need to specify only input data shapes.\n Before binding the executor, the function allocates arguments and auxiliary states\n that were not explicitly specified. Allows specifying data types.\n\n Example\n -------\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.FullyConnected(x, num_hidden=4)\n >>> exe = y.simple_bind(mx.cpu(), x=(5,4), grad_req='null')\n >>> exe.forward()\n [<NDArray 5x4 @cpu(0)>]\n >>> exe.outputs[0].asnumpy()\n array([[ 0., 0., 0., 0.],\n [ 0., 0., 0., 0.],\n [ 0., 0., 0., 0.],\n [ 0., 0., 0., 0.],\n [ 0., 0., 0., 0.]], dtype=float32)\n >>> exe.arg_arrays\n [<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]\n >>> exe.grad_arrays\n [<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]\n\n Parameters\n ----------\n ctx : Context\n The device context the generated executor to run on.\n\n grad_req: string\n {'write', 'add', 'null'}, or list of str or dict of str to str, optional\n To specify how we should update the gradient to the `args_grad`.\n\n - 'write' means every time gradient is written to specified `args_grad` NDArray.\n - 'add' means every time gradient is added to the specified NDArray.\n - 'null' means no action is taken, the gradient may not be calculated.\n\n type_dict : Dict of str->numpy.dtype\n Input type dictionary, name->dtype\n\n stype_dict : Dict of str->str\n Input storage type dictionary, name->storage_type\n\n kwargs : Dict of str->shape\n Input shape dictionary, name->shape\n\n Returns\n -------\n executor : mxnet.Executor\n The generated executor\n \"\"\"\n assert isinstance(grad_req, (str, dict))\n # infer shape\n arg_shapes, _, aux_shapes = self.infer_shape(**kwargs)\n type_dict = {} if type_dict is None else type_dict\n arg_dtypes, _, _ = None, None, None\n try:\n arg_dtypes, _, aux_dtypes = self.infer_type(**type_dict)\n except Exception: # pylint: disable=broad-except\n pass\n args = [None] * len(arg_shapes) if arg_shapes else []\n aux_states = [None] * len(aux_shapes) if aux_shapes else []\n\n arg_names = self.list_arguments()\n aux_names = self.list_auxiliary_states()\n\n from ..ndarray import zeros as nd_zeros\n if arg_shapes:\n for i, shape in enumerate(arg_shapes):\n if arg_dtypes:\n args[i] = nd_zeros(shape, dtype=arg_dtypes[i])\n else:\n args[i] = nd_zeros(shape)\n if aux_shapes:\n for i, shape in enumerate(aux_shapes):\n if aux_dtypes:\n aux_states[i] = nd_zeros(shape, dtype=aux_dtypes[i])\n else:\n aux_states[i] = nd_zeros(shape)\n\n if stype_dict:\n for name, stype in stype_dict.items():\n if name in arg_names:\n index = arg_names.index(name)\n args[index] = args[index].tostype(stype)\n else:\n assert name in aux_names\n index = aux_names.index(name)\n aux_states[index] = aux_states[index].totype(stype)\n\n if grad_req == 'null':\n args_grad = None\n elif isinstance(grad_req, dict):\n args_grad = {}\n for i, name in enumerate(arg_names):\n if grad_req[name] != 'null':\n args_grad[name] = args[i].copy()\n else:\n args_grad = [x.copy() for x in args]\n return Executor(self, ctx, args, args_grad, grad_req, aux_states)\n\n def _bind(self, ctx, args, args_grad=None, grad_req='write',\n aux_states=None):\n \"\"\"Binds the current symbol to an executor and returns it.\n\n We first declare the computation and then bind to the data to run.\n This function returns an executor which provides method `forward()` method for evaluation\n and a `outputs()` method to get all the results.\n\n Example\n -------\n >>> a = mx.sym.Variable('a')\n >>> b = mx.sym.Variable('b')\n >>> c = a + b\n <Symbol _plus1>\n >>> ex = c._bind(ctx=mx.cpu(), args={'a' : mx.nd.ones([2,3]), 'b' : mx.nd.ones([2,3])})\n >>> ex.forward()\n [<NDArray 2x3 @cpu(0)>]\n >>> ex.outputs[0].asnumpy()\n [[ 2. 2. 2.]\n [ 2. 2. 2.]]\n\n Parameters\n ----------\n ctx : Context\n The device context the generated executor to run on.\n\n args : list of NDArray or dict of str to NDArray\n Input arguments to the symbol.\n\n - If the input type is a list of `NDArray`, the order should be same as the order\n of `list_arguments()`.\n - If the input type is a dict of str to `NDArray`, then it maps the name of arguments\n to the corresponding `NDArray`.\n - In either case, all the arguments must be provided.\n\n args_grad : list of NDArray or dict of str to `NDArray`, optional\n When specified, `args_grad` provides NDArrays to hold\n the result of gradient value in backward.\n\n - If the input type is a list of `NDArray`, the order should be same as the order\n of `list_arguments()`.\n - If the input type is a dict of str to `NDArray`, then it maps the name of arguments\n to the corresponding NDArray.\n - When the type is a dict of str to `NDArray`, one only need to provide the dict\n for required argument gradient.\n Only the specified argument gradient will be calculated.\n\n grad_req : {'write', 'add', 'null'}, or list of str or dict of str to str, optional\n To specify how we should update the gradient to the `args_grad`.\n\n - 'write' means everytime gradient is write to specified `args_grad` `NDArray`.\n - 'add' means everytime gradient is add to the specified NDArray.\n - 'null' means no action is taken, the gradient may not be calculated.\n\n aux_states : list of `NDArray`, or dict of str to `NDArray`, optional\n Input auxiliary states to the symbol, only needed when the output of\n `list_auxiliary_states()` is not empty.\n\n - If the input type is a list of `NDArray`, the order should be same as the order\n of `list_auxiliary_states()`.\n - If the input type is a dict of str to `NDArray`, then it maps the name of\n `auxiliary_states` to the corresponding `NDArray`,\n - In either case, all the auxiliary states need to be provided.\n\n Returns\n -------\n executor : Executor\n The generated executor\n\n Notes\n -----\n Auxiliary states are the special states of symbols that do not correspond\n to an argument, and do not have gradient but are still useful\n for the specific operations. Common examples of auxiliary states include\n the `moving_mean` and `moving_variance` states in `BatchNorm`.\n Most operators do not have auxiliary states and in those cases,\n this parameter can be safely ignored.\n\n One can give up gradient by using a dict in `args_grad` and only specify\n gradient they interested in.\n \"\"\"\n assert isinstance(grad_req, (str, dict))\n return Executor(self, ctx, args, args_grad, grad_req, aux_states)\n\n def gradient(self, wrt):\n \"\"\"Gets the autodiff of current symbol.\n\n This function can only be used if current symbol is a loss function.\n\n .. note:: This function is currently not implemented.\n\n Parameters\n ----------\n wrt : Array of String\n keyword arguments of the symbol that the gradients are taken.\n\n Returns\n -------\n grad : Symbol\n A gradient Symbol with returns to be the corresponding gradients.\n \"\"\"\n handle = SymbolHandle()\n c_wrt = c_str_array(wrt)\n check_call(_LIB.MXSymbolGrad(self.handle,\n mx_uint(len(wrt)),\n c_wrt,\n ctypes.byref(handle)))\n return Symbol(handle)\n\n # pylint: enable= no-member\n\n def eval(self, ctx=None, **kwargs):\n \"\"\"Evaluates a symbol given arguments.\n\n The `eval` method combines a call to `bind` (which returns an executor)\n with a call to `forward` (executor method).\n For the common use case, where you might repeatedly evaluate with same arguments,\n eval is slow.\n In that case, you should call `bind` once and then repeatedly call forward.\n This function allows simpler syntax for less cumbersome introspection.\n\n Example\n -------\n >>> a = mx.sym.Variable('a')\n >>> b = mx.sym.Variable('b')\n >>> c = a + b\n >>> ex = c.eval(ctx = mx.cpu(), a = mx.nd.ones([2,3]), b = mx.nd.ones([2,3]))\n >>> ex\n [<NDArray 2x3 @cpu(0)>]\n >>> ex[0].asnumpy()\n array([[ 2., 2., 2.],\n [ 2., 2., 2.]], dtype=float32)\n\n Parameters\n ----------\n ctx : Context\n The device context the generated executor to run on.\n\n kwargs : Keyword arguments of type `NDArray`\n Input arguments to the symbol. All the arguments must be provided.\n\n Returns\n ----------\n result : a list of NDArrays corresponding to the values taken by each symbol when\n evaluated on given args. When called on a single symbol (not a group),\n the result will be a list with one element.\n \"\"\"\n if ctx is None:\n ctx = current_context()\n return self._bind(ctx, kwargs).forward()\n\n def reshape(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`reshape`.\n\n The arguments are the same as for :py:func:`reshape`, with\n this array as data.\n \"\"\"\n return op.reshape(self, *args, **kwargs)\n\n def reshape_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`reshape_like`.\n\n The arguments are the same as for :py:func:`reshape_like`, with\n this array as data.\n \"\"\"\n return op.reshape_like(self, *args, **kwargs)\n\n def astype(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`cast`.\n\n The arguments are the same as for :py:func:`cast`, with\n this array as data.\n \"\"\"\n return op.cast(self, *args, **kwargs)\n\n def zeros_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`zeros_like`.\n\n The arguments are the same as for :py:func:`zeros_like`, with\n this array as data.\n \"\"\"\n return op.zeros_like(self, *args, **kwargs)\n\n def ones_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`ones_like`.\n\n The arguments are the same as for :py:func:`ones_like`, with\n this array as data.\n \"\"\"\n return op.ones_like(self, *args, **kwargs)\n\n def broadcast_axes(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`broadcast_axes`.\n\n The arguments are the same as for :py:func:`broadcast_axes`, with\n this array as data.\n \"\"\"\n return op.broadcast_axes(self, *args, **kwargs)\n\n def repeat(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`repeat`.\n\n The arguments are the same as for :py:func:`repeat`, with\n this array as data.\n \"\"\"\n return op.repeat(self, *args, **kwargs)\n\n def pad(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`pad`.\n\n The arguments are the same as for :py:func:`pad`, with\n this array as data.\n \"\"\"\n return op.pad(self, *args, **kwargs)\n\n def swapaxes(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`swapaxes`.\n\n The arguments are the same as for :py:func:`swapaxes`, with\n this array as data.\n \"\"\"\n return op.swapaxes(self, *args, **kwargs)\n\n def split(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`split`.\n\n The arguments are the same as for :py:func:`split`, with\n this array as data.\n \"\"\"\n return op.split(self, *args, **kwargs)\n\n def split_v2(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`split_v2`.\n\n The arguments are the same as for :py:func:`split_v2`, with\n this array as data.\n \"\"\"\n return split_v2(self, *args, **kwargs)\n\n def slice(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`slice`.\n\n The arguments are the same as for :py:func:`slice`, with\n this array as data.\n \"\"\"\n return op.slice(self, *args, **kwargs)\n\n def slice_axis(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`slice_axis`.\n\n The arguments are the same as for :py:func:`slice_axis`, with\n this array as data.\n \"\"\"\n return op.slice_axis(self, *args, **kwargs)\n\n def slice_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`slice_like`.\n\n The arguments are the same as for :py:func:`slice_like`, with\n this array as data.\n \"\"\"\n return op.slice_like(self, *args, **kwargs)\n\n def take(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`take`.\n\n The arguments are the same as for :py:func:`take`, with\n this array as data.\n \"\"\"\n return op.take(self, *args, **kwargs)\n\n def one_hot(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`one_hot`.\n\n The arguments are the same as for :py:func:`one_hot`, with\n this array as data.\n \"\"\"\n return op.one_hot(self, *args, **kwargs)\n\n def pick(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`pick`.\n\n The arguments are the same as for :py:func:`pick`, with\n this array as data.\n \"\"\"\n return op.pick(self, *args, **kwargs)\n\n def sort(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sort`.\n\n The arguments are the same as for :py:func:`sort`, with\n this array as data.\n \"\"\"\n return op.sort(self, *args, **kwargs)\n\n def topk(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`topk`.\n\n The arguments are the same as for :py:func:`topk`, with\n this array as data.\n \"\"\"\n return op.topk(self, *args, **kwargs)\n\n def argsort(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`argsort`.\n\n The arguments are the same as for :py:func:`argsort`, with\n this array as data.\n \"\"\"\n return op.argsort(self, *args, **kwargs)\n\n def argmax(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`argmax`.\n\n The arguments are the same as for :py:func:`argmax`, with\n this array as data.\n \"\"\"\n return op.argmax(self, *args, **kwargs)\n\n def argmax_channel(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`argmax_channel`.\n\n The arguments are the same as for :py:func:`argmax_channel`, with\n this array as data.\n \"\"\"\n return op.argmax_channel(self, *args, **kwargs)\n\n def argmin(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`argmin`.\n\n The arguments are the same as for :py:func:`argmin`, with\n this array as data.\n \"\"\"\n return op.argmin(self, *args, **kwargs)\n\n def clip(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`clip`.\n\n The arguments are the same as for :py:func:`clip`, with\n this array as data.\n \"\"\"\n return op.clip(self, *args, **kwargs)\n\n def abs(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`abs`.\n\n The arguments are the same as for :py:func:`abs`, with\n this array as data.\n \"\"\"\n return op.abs(self, *args, **kwargs)\n\n def sign(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sign`.\n\n The arguments are the same as for :py:func:`sign`, with\n this array as data.\n \"\"\"\n return op.sign(self, *args, **kwargs)\n\n def flatten(self, inplace=False, **kwargs): # pylint: disable=unused-argument\n \"\"\"Convenience fluent method for :py:func:`flatten`.\n\n The arguments are the same as for :py:func:`flatten`, with\n this array as data.\n \"\"\"\n return op.flatten(self, **kwargs)\n\n def shape_array(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`shape_array`.\n\n The arguments are the same as for :py:func:`shape_op`, with\n this array as data.\n \"\"\"\n return op.shape_array(self, *args, **kwargs)\n\n def size_array(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`size_array`.\n\n The arguments are the same as for :py:func:`size_array`, with\n this array as data.\n \"\"\"\n return op.size_array(self, *args, **kwargs)\n\n def expand_dims(self, axis, inplace=False, **kwargs): # pylint: disable=unused-argument\n \"\"\"Convenience fluent method for :py:func:`expand_dims`.\n\n The arguments are the same as for :py:func:`expand_dims`, with\n this array as data.\n \"\"\"\n return op.expand_dims(self, axis=axis, **kwargs)\n\n def broadcast_to(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`broadcast_to`.\n\n The arguments are the same as for :py:func:`broadcast_to`, with\n this array as data.\n \"\"\"\n return op.broadcast_to(self, *args, **kwargs)\n\n def broadcast_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`broadcast_like`.\n\n The arguments are the same as for :py:func:`broadcast_like`, with\n this array as data.\n \"\"\"\n return op.broadcast_like(self, *args, **kwargs)\n\n def tile(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`tile`.\n\n The arguments are the same as for :py:func:`tile`, with\n this array as data.\n \"\"\"\n return op.tile(self, *args, **kwargs)\n\n def transpose(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`transpose`.\n\n The arguments are the same as for :py:func:`transpose`, with\n this array as data.\n \"\"\"\n return op.transpose(self, *args, **kwargs)\n\n def flip(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`flip`.\n\n The arguments are the same as for :py:func:`flip`, with\n this array as data.\n \"\"\"\n return op.flip(self, *args, **kwargs)\n\n def depth_to_space(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`depth_to_space`.\n\n The arguments are the same as for :py:func:`depth_to_space`, with\n this array as data.\n \"\"\"\n return op.depth_to_space(self, *args, **kwargs)\n\n def space_to_depth(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`space_to_depth`.\n\n The arguments are the same as for :py:func:`space_to_depth`, with\n this array as data.\n \"\"\"\n return op.space_to_depth(self, *args, **kwargs)\n\n def diag(self, k=0, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`diag`.\n\n The arguments are the same as for :py:func:`diag`, with\n this array as data.\n \"\"\"\n return op.diag(self, k, **kwargs)\n\n def sum(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sum`.\n\n The arguments are the same as for :py:func:`sum`, with\n this array as data.\n \"\"\"\n return op.sum(self, *args, **kwargs)\n\n def nansum(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`nansum`.\n\n The arguments are the same as for :py:func:`nansum`, with\n this array as data.\n \"\"\"\n return op.nansum(self, *args, **kwargs)\n\n def prod(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`prod`.\n\n The arguments are the same as for :py:func:`prod`, with\n this array as data.\n \"\"\"\n return op.prod(self, *args, **kwargs)\n\n def nanprod(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`nanprod`.\n\n The arguments are the same as for :py:func:`nanprod`, with\n this array as data.\n \"\"\"\n return op.nanprod(self, *args, **kwargs)\n\n def mean(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`mean`.\n\n The arguments are the same as for :py:func:`mean`, with\n this array as data.\n \"\"\"\n return op.mean(self, *args, **kwargs)\n\n def max(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`max`.\n\n The arguments are the same as for :py:func:`max`, with\n this array as data.\n \"\"\"\n return op.max(self, *args, **kwargs)\n\n def min(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`min`.\n\n The arguments are the same as for :py:func:`min`, with\n this array as data.\n \"\"\"\n return op.min(self, *args, **kwargs)\n\n def norm(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`norm`.\n\n The arguments are the same as for :py:func:`norm`, with\n this array as data.\n \"\"\"\n return op.norm(self, *args, **kwargs)\n\n def round(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`round`.\n\n The arguments are the same as for :py:func:`round`, with\n this array as data.\n \"\"\"\n return op.round(self, *args, **kwargs)\n\n def rint(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`rint`.\n\n The arguments are the same as for :py:func:`rint`, with\n this array as data.\n \"\"\"\n return op.rint(self, *args, **kwargs)\n\n def fix(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`fix`.\n\n The arguments are the same as for :py:func:`fix`, with\n this array as data.\n \"\"\"\n return op.fix(self, *args, **kwargs)\n\n def floor(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`floor`.\n\n The arguments are the same as for :py:func:`floor`, with\n this array as data.\n \"\"\"\n return op.floor(self, *args, **kwargs)\n\n def ceil(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`ceil`.\n\n The arguments are the same as for :py:func:`ceil`, with\n this array as data.\n \"\"\"\n return op.ceil(self, *args, **kwargs)\n\n def trunc(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`trunc`.\n\n The arguments are the same as for :py:func:`trunc`, with\n this array as data.\n \"\"\"\n return op.trunc(self, *args, **kwargs)\n\n def sin(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sin`.\n\n The arguments are the same as for :py:func:`sin`, with\n this array as data.\n \"\"\"\n return op.sin(self, *args, **kwargs)\n\n def cos(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`cos`.\n\n The arguments are the same as for :py:func:`cos`, with\n this array as data.\n \"\"\"\n return op.cos(self, *args, **kwargs)\n\n def tan(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`tan`.\n\n The arguments are the same as for :py:func:`tan`, with\n this array as data.\n \"\"\"\n return op.tan(self, *args, **kwargs)\n\n def arcsin(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arcsin`.\n\n The arguments are the same as for :py:func:`arcsin`, with\n this array as data.\n \"\"\"\n return op.arcsin(self, *args, **kwargs)\n\n def arccos(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arccos`.\n\n The arguments are the same as for :py:func:`arccos`, with\n this array as data.\n \"\"\"\n return op.arccos(self, *args, **kwargs)\n\n def arctan(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arctan`.\n\n The arguments are the same as for :py:func:`arctan`, with\n this array as data.\n \"\"\"\n return op.arctan(self, *args, **kwargs)\n\n def degrees(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`degrees`.\n\n The arguments are the same as for :py:func:`degrees`, with\n this array as data.\n \"\"\"\n return op.degrees(self, *args, **kwargs)\n\n def radians(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`radians`.\n\n The arguments are the same as for :py:func:`radians`, with\n this array as data.\n \"\"\"\n return op.radians(self, *args, **kwargs)\n\n def sinh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sinh`.\n\n The arguments are the same as for :py:func:`sinh`, with\n this array as data.\n \"\"\"\n return op.sinh(self, *args, **kwargs)\n\n def cosh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`cosh`.\n\n The arguments are the same as for :py:func:`cosh`, with\n this array as data.\n \"\"\"\n return op.cosh(self, *args, **kwargs)\n\n def tanh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`tanh`.\n\n The arguments are the same as for :py:func:`tanh`, with\n this array as data.\n \"\"\"\n return op.tanh(self, *args, **kwargs)\n\n def arcsinh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arcsinh`.\n\n The arguments are the same as for :py:func:`arcsinh`, with\n this array as data.\n \"\"\"\n return op.arcsinh(self, *args, **kwargs)\n\n def arccosh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arccosh`.\n\n The arguments are the same as for :py:func:`arccosh`, with\n this array as data.\n \"\"\"\n return op.arccosh(self, *args, **kwargs)\n\n def arctanh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arctanh`.\n\n The arguments are the same as for :py:func:`arctanh`, with\n this array as data.\n \"\"\"\n return op.arctanh(self, *args, **kwargs)\n\n def exp(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`exp`.\n\n The arguments are the same as for :py:func:`exp`, with\n this array as data.\n \"\"\"\n return op.exp(self, *args, **kwargs)\n\n def expm1(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`expm1`.\n\n The arguments are the same as for :py:func:`expm1`, with\n this array as data.\n \"\"\"\n return op.expm1(self, *args, **kwargs)\n\n def log(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log`.\n\n The arguments are the same as for :py:func:`log`, with\n this array as data.\n \"\"\"\n return op.log(self, *args, **kwargs)\n\n def log10(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log10`.\n\n The arguments are the same as for :py:func:`log10`, with\n this array as data.\n \"\"\"\n return op.log10(self, *args, **kwargs)\n\n def log2(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log2`.\n\n The arguments are the same as for :py:func:`log2`, with\n this array as data.\n \"\"\"\n return op.log2(self, *args, **kwargs)\n\n def log1p(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log1p`.\n\n The arguments are the same as for :py:func:`log1p`, with\n this array as data.\n \"\"\"\n return op.log1p(self, *args, **kwargs)\n\n def sqrt(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sqrt`.\n\n The arguments are the same as for :py:func:`sqrt`, with\n this array as data.\n \"\"\"\n return op.sqrt(self, *args, **kwargs)\n\n def rsqrt(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`rsqrt`.\n\n The arguments are the same as for :py:func:`rsqrt`, with\n this array as data.\n \"\"\"\n return op.rsqrt(self, *args, **kwargs)\n\n def cbrt(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`cbrt`.\n\n The arguments are the same as for :py:func:`cbrt`, with\n this array as data.\n \"\"\"\n return op.cbrt(self, *args, **kwargs)\n\n def rcbrt(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`rcbrt`.\n\n The arguments are the same as for :py:func:`rcbrt`, with\n this array as data.\n \"\"\"\n return op.rcbrt(self, *args, **kwargs)\n\n def square(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`square`.\n\n The arguments are the same as for :py:func:`square`, with\n this array as data.\n \"\"\"\n return op.square(self, *args, **kwargs)\n\n def reciprocal(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`reciprocal`.\n\n The arguments are the same as for :py:func:`reciprocal`, with\n this array as data.\n \"\"\"\n return op.reciprocal(self, *args, **kwargs)\n\n def relu(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`relu`.\n\n The arguments are the same as for :py:func:`relu`, with\n this array as data.\n \"\"\"\n return op.relu(self, *args, **kwargs)\n\n def sigmoid(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sigmoid`.\n\n The arguments are the same as for :py:func:`sigmoid`, with\n this array as data.\n \"\"\"\n return op.sigmoid(self, *args, **kwargs)\n\n def softmax(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`softmax`.\n\n The arguments are the same as for :py:func:`softmax`, with\n this array as data.\n \"\"\"\n return op.softmax(self, *args, **kwargs)\n\n def log_softmax(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log_softmax`.\n\n The arguments are the same as for :py:func:`log_softmax`, with\n this array as data.\n \"\"\"\n return op.log_softmax(self, *args, **kwargs)\n\n def softmin(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`softmin`.\n\n The arguments are the same as for :py:func:`softmin`, with\n this array as data.\n \"\"\"\n return op.softmin(self, *args, **kwargs)\n\n def squeeze(self, axis=None, inplace=False, **kwargs): # pylint: disable=unused-argument\n \"\"\"Convenience fluent method for :py:func:`squeeze`.\n\n The arguments are the same as for :py:func:`squeeze`, with\n this array as data.\n \"\"\"\n return op.squeeze(self, axis=axis, **kwargs)\n\n def get_backend_symbol(self, backend):\n \"\"\"Return symbol for target backend.\n\n Parameters\n ----------\n backend : str\n The backend names.\n\n Returns\n -------\n out : Symbol\n The created Symbol for target backend.\n \"\"\"\n out = SymbolHandle()\n check_call(_LIB.MXGenBackendSubgraph(self.handle, c_str(backend), ctypes.byref(out)))\n return Symbol(out)\n\n def wait_to_read(self):\n raise NotImplementedForSymbol(self.wait_to_read, None)\n\n def asnumpy(self):\n raise NotImplementedForSymbol(self.asnumpy, None)\n\n def asscalar(self):\n raise NotImplementedForSymbol(self.asscalar, None)\n\n def copy(self):\n raise NotImplementedForSymbol(self.copy, None)\n\n def as_in_context(self):\n raise NotImplementedForSymbol(self.as_in_context, None)\n\n def detach(self):\n raise NotImplementedForSymbol(self.detach, None)\n\n def backward(self):\n raise NotImplementedForSymbol(self.backward, None)\n\ndef var(name, attr=None, shape=None, lr_mult=None, wd_mult=None, dtype=None,\n init=None, stype=None, profiler_scope=None, **kwargs):\n \"\"\"Creates a symbolic variable with specified name.\n\n Example\n -------\n >>> data = mx.sym.Variable('data', attr={'a': 'b'})\n >>> data\n <Symbol data>\n >>> csr_data = mx.sym.Variable('csr_data', stype='csr')\n >>> csr_data\n <Symbol csr_data>\n >>> row_sparse_weight = mx.sym.Variable('weight', stype='row_sparse')\n >>> row_sparse_weight\n <Symbol weight>\n\n Parameters\n ----------\n name : str\n Variable name.\n attr : Dict of strings\n Additional attributes to set on the variable. Format {string : string}.\n shape : tuple\n The shape of a variable. If specified, this will be used during the shape inference.\n If one has specified a different shape for this variable using\n a keyword argument when calling shape inference, this shape information will be ignored.\n lr_mult : float\n The learning rate multiplier for input variable.\n wd_mult : float\n Weight decay multiplier for input variable.\n dtype : str or numpy.dtype\n The dtype for input variable. If not specified, this value will be inferred.\n init : initializer (mxnet.init.*)\n Initializer for this variable to (optionally) override the default initializer.\n stype : str\n The storage type of the variable, such as 'row_sparse', 'csr', 'default', etc\n profiler_scope : str\n The profiler scope for input variable.\n kwargs : Additional attribute variables\n Additional attributes must start and end with double underscores.\n\n Returns\n -------\n variable : Symbol\n A symbol corresponding to an input to the computation graph.\n \"\"\"\n if not isinstance(name, string_types):\n raise TypeError('Expect a string for variable `name`')\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolCreateVariable(c_str(name), ctypes.byref(handle)))\n ret = Symbol(handle)\n attr = attribute.current().get(attr)\n attr = {} if attr is None else attr\n if shape is not None:\n attr['__shape__'] = str(shape)\n if lr_mult is not None:\n attr['__lr_mult__'] = str(lr_mult)\n if wd_mult is not None:\n attr['__wd_mult__'] = str(wd_mult)\n if dtype is not None:\n np_dtype = _numpy.dtype(dtype)\n if np_dtype == _numpy.dtype([('bfloat16', _numpy.uint16)]):\n attr['__dtype__'] = str(_DTYPE_NP_TO_MX[np_dtype])\n else:\n attr['__dtype__'] = str(_DTYPE_NP_TO_MX[_numpy.dtype(dtype).type])\n if init is not None:\n if not isinstance(init, string_types):\n init = init.dumps()\n attr['__init__'] = init\n if stype is not None:\n attr['__storage_type__'] = str(_STORAGE_TYPE_STR_TO_ID[stype])\n if profiler_scope is not None:\n attr['__profiler_scope__'] = profiler_scope\n else:\n attr['__profiler_scope__'] = _profiler_scope.get()\n for k, v in kwargs.items():\n if k.startswith('__') and k.endswith('__'):\n attr[k] = str(v)\n else:\n raise ValueError('Attribute name=%s is not supported.'\n ' Additional attributes must start and end with double underscores,'\n ' e.g, __yourattr__' % k)\n ret._set_attr(**attr)\n return ret\n\n\n# for back compatibility\nVariable = var\n\n\ndef Group(symbols, create_fn=Symbol):\n \"\"\"Creates a symbol that contains a collection of other symbols, grouped together.\n A classic symbol (`mx.sym.Symbol`) will be returned if all the symbols in the list\n are of that type; a numpy symbol (`mx.sym.np._Symbol`) will be returned if all the\n symbols in the list are of that type. A type error will be raised if a list of mixed\n classic and numpy symbols are provided.\n\n Example\n -------\n >>> a = mx.sym.Variable('a')\n >>> b = mx.sym.Variable('b')\n >>> mx.sym.Group([a,b])\n <Symbol Grouped>\n\n Parameters\n ----------\n symbols : list\n List of symbols to be grouped.\n\n create_fn : mx.sym.Symbol or mx.sym.np._Symbol\n Symbol class for creating the grouped symbol.\n\n Returns\n -------\n sym : Symbol\n A group symbol.\n \"\"\"\n if not symbols or any(not isinstance(sym, Symbol) for sym in symbols):\n raise TypeError('Expected a list of symbols as input')\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolCreateGroup(\n mx_uint(len(symbols)),\n c_handle_array(symbols), ctypes.byref(handle)))\n return create_fn(handle)\n\n\ndef load(fname):\n \"\"\"Loads symbol from a JSON file.\n\n You can also use pickle to do the job if you only work on python.\n The advantage of load/save is the file is language agnostic.\n This means the file saved using save can be loaded by other language binding of mxnet.\n You also get the benefit being able to directly load/save from cloud storage(S3, HDFS).\n\n Parameters\n ----------\n fname : str\n The name of the file, examples:\n\n - `s3://my-bucket/path/my-s3-symbol`\n - `hdfs://my-bucket/path/my-hdfs-symbol`\n - `/path-to/my-local-symbol`\n\n Returns\n -------\n sym : Symbol\n The loaded symbol.\n\n See Also\n --------\n Symbol.save : Used to save symbol into file.\n \"\"\"\n if not isinstance(fname, string_types):\n raise TypeError('fname need to be string')\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolCreateFromFile(c_str(fname), ctypes.byref(handle)))\n return Symbol(handle)\n\n\ndef load_json(json_str):\n \"\"\"Loads symbol from json string.\n\n Parameters\n ----------\n json_str : str\n A JSON string.\n\n Returns\n -------\n sym : Symbol\n The loaded symbol.\n\n See Also\n --------\n Symbol.tojson : Used to save symbol into json string.\n \"\"\"\n if not isinstance(json_str, string_types):\n raise TypeError('fname required to be string')\n handle = SymbolHandle()\n check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle)))\n return Symbol(handle)\n\n\n# pylint: disable=no-member\n# pylint: disable=redefined-builtin\ndef pow(base, exp):\n \"\"\"Returns element-wise result of base element raised to powers from exp element.\n\n Both inputs can be Symbol or scalar number.\n Broadcasting is not supported. Use `broadcast_pow` instead.\n\n `sym.pow` is being deprecated, please use `sym.power` instead.\n\n Parameters\n ---------\n base : Symbol or scalar\n The base symbol\n exp : Symbol or scalar\n The exponent symbol\n\n Returns\n -------\n Symbol or scalar\n The bases in x raised to the exponents in y.\n\n Examples\n --------\n >>> mx.sym.pow(2, 3)\n 8\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.Variable('y')\n >>> z = mx.sym.pow(x, 2)\n >>> z.eval(x=mx.nd.array([1,2]))[0].asnumpy()\n array([ 1., 4.], dtype=float32)\n >>> z = mx.sym.pow(3, y)\n >>> z.eval(y=mx.nd.array([2,3]))[0].asnumpy()\n array([ 9., 27.], dtype=float32)\n >>> z = mx.sym.pow(x, y)\n >>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([2,3]))[0].asnumpy()\n array([ 9., 64.], dtype=float32)\n \"\"\"\n if isinstance(base, Symbol) and isinstance(exp, Symbol):\n return _internal._Power(base, exp)\n if isinstance(base, Symbol) and isinstance(exp, Number):\n return _internal._PowerScalar(base, scalar=exp)\n if isinstance(base, Number) and isinstance(exp, Symbol):\n return _internal._RPowerScalar(exp, scalar=base)\n if isinstance(base, Number) and isinstance(exp, Number):\n return base**exp\n else:\n raise TypeError('types (%s, %s) not supported' % (str(type(base)), str(type(exp))))\n\n\ndef power(base, exp):\n \"\"\"Returns element-wise result of base element raised to powers from exp element.\n\n Both inputs can be Symbol or scalar number.\n Broadcasting is not supported. Use `broadcast_pow` instead.\n\n Parameters\n ---------\n base : Symbol or scalar\n The base symbol\n exp : Symbol or scalar\n The exponent symbol\n\n Returns\n -------\n Symbol or scalar\n The bases in x raised to the exponents in y.\n\n Examples\n --------\n >>> mx.sym.power(2, 3)\n 8\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.Variable('y')\n >>> z = mx.sym.power(x, 2)\n >>> z.eval(x=mx.nd.array([1,2]))[0].asnumpy()\n array([ 1., 4.], dtype=float32)\n >>> z = mx.sym.power(3, y)\n >>> z.eval(y=mx.nd.array([2,3]))[0].asnumpy()\n array([ 9., 27.], dtype=float32)\n >>> z = mx.sym.power(x, y)\n >>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([2,3]))[0].asnumpy()\n array([ 9., 64.], dtype=float32)\n \"\"\"\n return pow(base, exp)\n\n\n# pylint: disable=no-member\n# pylint: disable=redefined-builtin\ndef maximum(left, right):\n \"\"\"Returns element-wise maximum of the input elements.\n\n Both inputs can be Symbol or scalar number. Broadcasting is not supported.\n\n Parameters\n ---------\n left : Symbol or scalar\n First symbol to be compared.\n right : Symbol or scalar\n Second symbol to be compared.\n\n Returns\n -------\n Symbol or scalar\n The element-wise maximum of the input symbols.\n\n Examples\n --------\n >>> mx.sym.maximum(2, 3.5)\n 3.5\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.Variable('y')\n >>> z = mx.sym.maximum(x, 4)\n >>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy()\n array([ 4., 5., 4., 10.], dtype=float32)\n >>> z = mx.sym.maximum(x, y)\n >>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()\n array([ 10., 4.], dtype=float32)\n \"\"\"\n if isinstance(left, Symbol) and isinstance(right, Symbol):\n return _internal._Maximum(left, right)\n if isinstance(left, Symbol) and isinstance(right, Number):\n return _internal._MaximumScalar(left, scalar=right)\n if isinstance(left, Number) and isinstance(right, Symbol):\n return _internal._MaximumScalar(right, scalar=left)\n if isinstance(left, Number) and isinstance(right, Number):\n return left if left > right else right\n else:\n raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))\n\n\n# pylint: disable=no-member\n# pylint: disable=redefined-builtin\ndef minimum(left, right):\n \"\"\"Returns element-wise minimum of the input elements.\n\n Both inputs can be Symbol or scalar number. Broadcasting is not supported.\n\n Parameters\n ---------\n left : Symbol or scalar\n First symbol to be compared.\n right : Symbol or scalar\n Second symbol to be compared.\n\n Returns\n -------\n Symbol or scalar\n The element-wise minimum of the input symbols.\n\n Examples\n --------\n >>> mx.sym.minimum(2, 3.5)\n 2\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.Variable('y')\n >>> z = mx.sym.minimum(x, 4)\n >>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy()\n array([ 3., 4., 2., 4.], dtype=float32)\n >>> z = mx.sym.minimum(x, y)\n >>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()\n array([ 3., 2.], dtype=float32)\n \"\"\"\n if isinstance(left, Symbol) and isinstance(right, Symbol):\n return _internal._Minimum(left, right)\n if isinstance(left, Symbol) and isinstance(right, Number):\n return _internal._MinimumScalar(left, scalar=right)\n if isinstance(left, Number) and isinstance(right, Symbol):\n return _internal._MinimumScalar(right, scalar=left)\n if isinstance(left, Number) and isinstance(right, Number):\n return left if left < right else right\n else:\n raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))\n\n\n# pylint: disable=no-member\n# pylint: disable=redefined-builtin\ndef hypot(left, right):\n \"\"\"Given the \"legs\" of a right triangle, returns its hypotenuse.\n\n Equivalent to :math:`\\\\sqrt(left^2 + right^2)`, element-wise.\n Both inputs can be Symbol or scalar number. Broadcasting is not supported.\n\n Parameters\n ---------\n left : Symbol or scalar\n First leg of the triangle(s).\n right : Symbol or scalar\n Second leg of the triangle(s).\n\n Returns\n -------\n Symbol or scalar\n The hypotenuse of the triangle(s)\n\n Examples\n --------\n >>> mx.sym.hypot(3, 4)\n 5.0\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.Variable('y')\n >>> z = mx.sym.hypot(x, 4)\n >>> z.eval(x=mx.nd.array([3,5,2]))[0].asnumpy()\n array([ 5., 6.40312433, 4.47213602], dtype=float32)\n >>> z = mx.sym.hypot(x, y)\n >>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()\n array([ 10.44030666, 4.47213602], dtype=float32)\n \"\"\"\n if isinstance(left, Symbol) and isinstance(right, Symbol):\n return _internal._Hypot(left, right)\n if isinstance(left, Symbol) and isinstance(right, Number):\n return _internal._HypotScalar(left, scalar=right)\n if isinstance(left, Number) and isinstance(right, Symbol):\n return _internal._HypotScalar(right, scalar=left)\n if isinstance(left, Number) and isinstance(right, Number):\n return _numpy.hypot(left, right)\n else:\n raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))\n\n\ndef eye(N, M=0, k=0, dtype=None, **kwargs):\n \"\"\"Returns a new symbol of 2-D shpae, filled with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N: int\n Number of rows in the output.\n M: int, optional\n Number of columns in the output. If 0, defaults to N.\n k: int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal,\n and a negative value to a lower diagonal.\n dtype : str or numpy.dtype, optional\n The value type of the inner value, default to ``np.float32``.\n\n Returns\n -------\n out : Symbol\n The created Symbol.\n \"\"\"\n if dtype is None:\n dtype = _numpy.float32\n return _internal._eye(N, M, k, dtype=dtype, **kwargs)\n\ndef zeros(shape, dtype=None, **kwargs):\n \"\"\"Returns a new symbol of given shape and type, filled with zeros.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array.\n dtype : str or numpy.dtype, optional\n The value type of the inner value, default to ``np.float32``.\n\n Returns\n -------\n out : Symbol\n The created Symbol.\n \"\"\"\n if dtype is None:\n dtype = _numpy.float32\n return _internal._zeros(shape=shape, dtype=dtype, **kwargs)\n\n\ndef ones(shape, dtype=None, **kwargs):\n \"\"\"Returns a new symbol of given shape and type, filled with ones.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array.\n dtype : str or numpy.dtype, optional\n The value type of the inner value, default to ``np.float32``.\n\n Returns\n -------\n out : Symbol\n The created Symbol\n \"\"\"\n if dtype is None:\n dtype = _numpy.float32\n return _internal._ones(shape=shape, dtype=dtype, **kwargs)\n\n\ndef full(shape, val, dtype=None, **kwargs):\n \"\"\"Returns a new array of given shape and type, filled with the given value `val`.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array.\n val : scalar\n Fill value.\n dtype : str or numpy.dtype, optional\n The value type of the inner value, default to ``np.float32``.\n\n Returns\n -------\n out : Symbol\n The created Symbol\n \"\"\"\n if dtype is None:\n dtype = _numpy.float32\n return _internal._full(shape=shape, dtype=dtype, value=float(val), **kwargs)\n\n# pylint: disable=redefined-outer-name\ndef arange(start, stop=None, step=1.0, repeat=1, infer_range=False, name=None, dtype=None):\n \"\"\"Returns evenly spaced values within a given interval.\n\n Values are generated within the half-open interval [`start`, `stop`). In other\n words, the interval includes `start` but excludes `stop`. The function is\n similar to the built-in Python function `range` and to `numpy.arange`,\n but returns a `Symbol`.\n\n Parameters\n ----------\n start : number, optional\n Start of interval. The interval includes this value. The default start value is 0.\n stop : number\n End of interval. The interval does not include this value.\n step : number, optional\n Spacing between values.\n repeat : int, optional\n \"The repeating time of all elements.\n E.g repeat=3, the element a will be repeated three times --> a, a, a.\n infer_range : boolean, optional\n When set to True, infer the stop position from the start, step,\n repeat, and output tensor size.\n dtype : str or numpy.dtype, optional\n The value type of the inner value, default to ``np.float32``.\n\n Returns\n -------\n out : Symbol\n The created Symbol\n \"\"\"\n if dtype is None:\n dtype = _numpy.float32\n return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,\n infer_range=infer_range, name=name, dtype=dtype)\n\ndef linspace(start, stop, num, endpoint=True, name=None, dtype=None):\n \"\"\"Return evenly spaced numbers within a specified interval.\n\n Values are generated within the half-open interval [`start`, `stop`) or\n closed interval [start, stop] depending on whether `endpoint` is True or\n False. The function is similar to `numpy.linspace`, but returns a `Symbol`.\n\n Parameters\n ----------\n start : number\n Start of interval.\n stop : number\n End of interval, unless endpoint is set to False. In that case,\n the sequence consists of all but the last of `num + 1` evenly spaced\n samples, so that stop is excluded. Note that the step size changes\n when endpoint is False.\n num : number\n Number of samples to generate. Must be non-negative.\n endpoint : bool\n If True, stop is the last sample. Otherwise, it is not included.\n The default is True.\n ctx : Context, optional\n Device context. Default context is the current default context.\n dtype : str or numpy.dtype, optional\n The data type of the `NDArray`. The default datatype is `np.float32`.\n\n Returns\n -------\n out : Symbol\n The created Symbol\n \"\"\"\n if dtype is None:\n dtype = _numpy.float32\n return _internal._linspace(start=start, stop=stop, num=num, endpoint=endpoint,\n name=name, dtype=dtype)\n\ndef histogram(a, bins=10, range=None, **kwargs):\n \"\"\"Compute the histogram of the input data.\n\n Parameters\n ----------\n a : NDArray\n Input data. The histogram is computed over the flattened array.\n bins : int or sequence of scalars\n If bins is an int, it defines the number of equal-width bins in the\n given range (10, by default). If bins is a sequence, it defines the bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n range : (float, float), required if bins is an integer\n The lower and upper range of the bins. If not provided, range is simply (a.min(), a.max()).\n Values outside the range are ignored. The first element of the range must be less than or\n equal to the second. range affects the automatic bin computation as well, the range will\n be equally divided by the number of bins.\n\n Returns\n -------\n out : Symbol\n The created Symbol\n \"\"\"\n if isinstance(bins, Symbol):\n return _internal._histogram(data=a, bins=bins, **kwargs)\n elif isinstance(bins, integer_types):\n if range is None:\n raise ValueError(\"null range is not supported in symbol mode\")\n return _internal._histogram(data=a, bin_cnt=bins, range=range, **kwargs)\n raise ValueError(\"bins argument should be either an integer or an NDArray\")\n\ndef split_v2(ary, indices_or_sections, axis=0, squeeze_axis=False):\n \"\"\"Split an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : NDArray\n Array to be divided into sub-arrays.\n indices_or_sections : int or tuple of ints\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n squeeze_axis: boolean, optional\n Whether to squeeze the axis of sub-arrays or not, only useful when size\n of the sub-arrays are 1 on the `axis`. Default is False.\n\n Returns\n -------\n out : Symbol\n The created Symbol\n \"\"\"\n indices = []\n sections = 0\n if isinstance(indices_or_sections, int):\n sections = indices_or_sections\n elif isinstance(indices_or_sections, tuple):\n indices = [0] + list(indices_or_sections)\n else:\n raise ValueError('indices_or_sections must either int or tuple of ints')\n return _internal._split_v2(ary, indices, axis, squeeze_axis, sections)\n\n_set_symbol_class(Symbol)\n"
]
| [
[
"numpy.dtype",
"numpy.hypot"
]
]
|
ReReMLclub/cord19utils | [
"2392832ddb7e862a9e475da631f30ca614230fa3"
]
| [
"cord19utils/visualizations.py"
]
| [
"import networkx as nx\nimport holoviews as hv\nimport pandas as pd\nfrom holoviews import opts, dim\nhv.extension('bokeh')\n\ndef drawChordGraph(sgraph, id2label, nodeOfInterest = False):\n nodeData = {\n 'id' : [],\n 'name' : []\n }\n\n edgeData = {\n 'source' : [],\n 'target' : [],\n 'value' : []\n }\n \n if nodeOfInterest:\n nodesToKeep = [nodeOfInterest] + [node for node in sgraph.predecessors(nodeOfInterest)] + [node for node in sgraph.successors(nodeOfInterest)]\n sgraph = sgraph.subgraph(nodesToKeep)\n\n for node in sgraph.nodes():\n nodeData['id'].append(node)\n nodeData['name'].append(id2label[node])\n\n for source, target in sgraph.edges():\n value = sgraph.edges()[(source, target)]['weight']\n edgeData['source'].append(source)\n edgeData['target'].append(target)\n edgeData['value'].append(value)\n\n nodeDF = pd.DataFrame(nodeData)\n edgeDF = pd.DataFrame(edgeData)\n\n chord = hv.Chord((edgeDF, hv.Dataset(pd.DataFrame(nodeDF), 'id'))).select(value=(5, None))\n chord.opts(\n opts.Chord(cmap='Category20', edge_cmap='Category20', edge_color=dim('source').str(), \n labels='name', node_color=dim('id').str()))\n\n return chord"
]
| [
[
"pandas.DataFrame"
]
]
|
jobrajac/ca-es | [
"ef9e4d0f06410e86eb380f049d047b224a216bfb"
]
| [
"src/pool.py"
]
| [
"import numpy as np\n\n\nclass CustomPool:\n \"\"\"Class for storing and providing samples of different stages of growth.\"\"\"\n def __init__(self, seed, size):\n self.size = size\n self.slots = np.repeat([seed], size, 0)\n self.seed = seed\n\n def commit(self, batch):\n \"\"\"Replace existing slots with a batch.\"\"\"\n indices = batch[\"indices\"]\n for i, x in enumerate(batch[\"x\"]):\n if (x[:, :, 3] > 0.1).any(): # Avoid committing dead image\n self.slots[indices[i]] = x.copy()\n\n def sample(self, c):\n \"\"\"Retrieve a batch from the pool.\"\"\"\n indices = np.random.choice(self.size, c, False)\n batch = {\n \"indices\": indices,\n \"x\": self.slots[indices]\n }\n return batch\n"
]
| [
[
"numpy.repeat",
"numpy.random.choice"
]
]
|
neale/bsuite | [
"0430d90bc2d74d604f5a2215bc524de4c614d97e"
]
| [
"bsuite/experiments/mnist_noise/mnist_noise_test.py"
]
| [
"# pylint: disable=g-bad-file-header\n# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for bsuite.experiments.mnist.\"\"\"\n\n# Import all required packages\n\nfrom absl.testing import absltest\nfrom bsuite.experiments.mnist_noise import mnist_noise\nfrom dm_env import test_utils\n\nimport numpy as np\n\n\nclass InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):\n\n def make_object_under_test(self):\n return mnist_noise.load(noise_scale=2.0, seed=101)\n\n def make_action_sequence(self):\n num_actions = self.environment.action_spec().num_values\n rng = np.random.RandomState(42)\n\n for _ in range(100):\n yield rng.randint(num_actions)\n\nif __name__ == '__main__':\n absltest.main()\n"
]
| [
[
"numpy.random.RandomState"
]
]
|
dgiambra/schrodinger | [
"ac1e283242d662f5bab151b46770f21f0ff0bbe0"
]
| [
"schrodinger/schrodinger.py"
]
| [
"def schrodinger(V_0 , c, n_basis_set, fxn, d_0 , d_1):\n '''\n This function solves Schrodingers wave equation in two dimensions.\n \n Parameters\n ----------\n V_0 : int\n Potential Energy Constant\n \n c : int\n constant\n \n n_basis_set : int\n number of basis sets to use\n \n d_0 : int\n lower bound of domain\n \n d_1 : int\n upperbound of domain\n \n Output\n ------\n energy : tuple\n first element is an array of eigenvalues\n second element is the eigen vector corresponding to the basis set coefficients\n \n \n '''\n import scipy.ndimage.filters as snf\n import numpy as np\n import scipy.integrate as integrate\n from numpy.polynomial import Polynomial, Legendre\n from numpy import polyder as deriv\n import numpy.linalg as linalg\n ## crs = open(file_name, \"r\")\n x = np.linspace(d_0,d_1,1000)\n ## fps = []\n ## for columns in (raw.strip().split() for raw in crs):\n ## fps.append(float(columns[2]))\n ## x.append(float(columns[1]))\n ## fxn_x = fxn(x)\n ai=[]\n h=np.zeros((n_basis_set,n_basis_set))\n\n ## for i in list(range((n_basis_set))):\n ## b.append(np.exp(1j*2*np.pi*i*x))\n ## ai.append(integrate.quad(lambda x:fxn(x)*np.exp(1j*2*np.pi*i*x),d_0,d_1 )[0])\n ## h.append([])\n for i in list(range((n_basis_set))):\n for z in list(range((n_basis_set))):\n h[i][z]=integrate.quad(lambda x:(float(-c*-i**2*np.pi**2*np.exp(1j*2*np.pi*i*x)+V_0*np.exp(1j*2*np.pi*i*x))*np.exp(1j*2*np.pi*z*x)),d_0,d_1 )[0]\n ## ai = np.matrix(ai)\n h = np.matrix(h)\n energy = linalg.eig(h)\n return energy\n"
]
| [
[
"numpy.matrix",
"numpy.zeros",
"numpy.exp",
"numpy.linalg.eig",
"numpy.linspace"
]
]
|
markf94/reference-qvm | [
"e4ca313928f72b3d2348a3f9abfec6607944c59e"
]
| [
"referenceqvm/tests/test_stabilizer_utils.py"
]
| [
"\"\"\"\nTest the infrastructure for building a state by projection onto the +1\neigenspace of a set of generators or stabilizers\n\"\"\"\nimport numpy as np\nfrom referenceqvm.stabilizer_utils import (compute_action, project_stabilized_state,\n binary_stabilizer_to_pauli_stabilizer,\n pauli_stabilizer_to_binary_stabilizer)\nfrom referenceqvm.tests.test_stabilizer_qvm import (five_qubit_code_generators,\n bell_stabilizer)\nfrom pyquil.paulis import sX, sZ, sY, sI, PauliSum\nimport pytest\n\n\ndef test_compute_action_type_checks():\n \"\"\"\n Make sure type checks are consistent and working\n \"\"\"\n with pytest.raises(TypeError):\n compute_action([0, 0, 0, 0, 0], PauliSum([sX(0)]), 5)\n\n with pytest.raises(TypeError):\n compute_action([0, 0, 0, 0, 0], sX(0), 4)\n\n with pytest.raises(TypeError):\n compute_action(3, 'a', 4)\n\n with pytest.raises(TypeError):\n compute_action(-3, sX(0), 4)\n\n with pytest.raises(TypeError):\n compute_action('0001', sX(0), 4)\n\n\ndef test_stabilizer_to_matrix_conversion():\n # bitflip code\n stabilizer_matrix = pauli_stabilizer_to_binary_stabilizer(bell_stabilizer)\n true_stabilizer_matrix = np.array([[0, 0, 1, 1, 0],\n [1, 1, 0, 0, 0]])\n assert np.allclose(true_stabilizer_matrix, stabilizer_matrix)\n\n test_stabilizer_list = binary_stabilizer_to_pauli_stabilizer(true_stabilizer_matrix)\n for idx, term in enumerate(test_stabilizer_list):\n assert term == bell_stabilizer[idx]\n\n # given some codes convert them to code matrices\n stabilizer_matrix = pauli_stabilizer_to_binary_stabilizer(five_qubit_code_generators)\n true_stabilizer_matrix = np.array([[1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0],\n [0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0],\n [0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0]])\n assert np.allclose(true_stabilizer_matrix, stabilizer_matrix)\n\n test_stabilizer_list = binary_stabilizer_to_pauli_stabilizer(true_stabilizer_matrix)\n for idx, term in enumerate(test_stabilizer_list):\n assert term == five_qubit_code_generators[idx]\n\n\ndef test_compute_action_identity():\n \"\"\"\n Action of Pauli operators on state\n \"\"\"\n comp_basis_state = [0, 0, 0, 0]\n for ii in range(4):\n pauli_term = sI(ii)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n assert new_basis_state == comp_basis_state\n assert np.isclose(coeff, 1)\n\n\ndef test_compute_action_X():\n \"\"\"\n Action of Pauli operators on state\n \"\"\"\n comp_basis_state = [0, 0, 0, 0]\n for ii in range(4):\n pauli_term = sX(ii)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n true_basis_state[ii] = 1\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, 1)\n\n comp_basis_state = [1, 1, 1, 1]\n for ii in range(4):\n pauli_term = sX(ii)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n true_basis_state[ii] = true_basis_state[ii] ^ 1\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, 1)\n\n\ndef test_compute_action_XX():\n \"\"\"\n Action of Pauli operators on state\n \"\"\"\n comp_basis_state = [0, 0, 0, 0]\n for ii in range(3):\n pauli_term = sX(ii) * sX(ii + 1)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n true_basis_state[ii] = true_basis_state[ii + 1] = 1\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, 1)\n\n comp_basis_state = [1, 1, 1, 1]\n for ii in range(3):\n pauli_term = sX(ii) * sX(ii + 1)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n true_basis_state[ii] = true_basis_state[ii] ^ 1\n true_basis_state[ii + 1] = true_basis_state[ii + 1] ^ 1\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, 1)\n\n\ndef test_compute_action_Y():\n \"\"\"\n Action of Pauli operators on state\n \"\"\"\n comp_basis_state = [0, 0, 0, 0]\n for ii in range(4):\n pauli_term = sY(ii)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n true_basis_state[ii] = true_basis_state[ii] ^ 1\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, 1j)\n\n comp_basis_state = [1, 1, 1, 1]\n for ii in range(4):\n pauli_term = sY(ii)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n true_basis_state[ii] = true_basis_state[ii] ^ 1\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, -1j)\n\n\ndef test_compute_action_YY():\n \"\"\"\n Action of Pauli operators on state\n \"\"\"\n comp_basis_state = [0, 0, 0, 0]\n for ii in range(3):\n pauli_term = sY(ii) * sY(ii + 1)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n true_basis_state[ii] = true_basis_state[ii + 1] = 1\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, -1)\n\n comp_basis_state = [1, 1, 1, 1]\n for ii in range(3):\n pauli_term = sY(ii) * sY(ii + 1)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n true_basis_state[ii] = true_basis_state[ii] ^ 1\n true_basis_state[ii + 1] = true_basis_state[ii + 1] ^ 1\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, -1)\n\n\ndef test_compute_action_Z():\n \"\"\"\n Action of Pauli operators on state\n \"\"\"\n comp_basis_state = [0, 0, 0, 0]\n for ii in range(4):\n pauli_term = sZ(ii)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n true_basis_state[ii] = true_basis_state[ii]\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, 1)\n\n comp_basis_state = [1, 1, 1, 1]\n for ii in range(4):\n pauli_term = sZ(ii)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n true_basis_state[ii] = true_basis_state[ii]\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, -1)\n\n\ndef test_compute_action_ZZ():\n \"\"\"\n Action of Pauli operators on state\n \"\"\"\n comp_basis_state = [0, 0, 0, 0]\n for ii in range(3):\n pauli_term = sZ(ii) * sZ(ii + 1)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, 1)\n\n comp_basis_state = [1, 1, 1, 1]\n for ii in range(3):\n pauli_term = sZ(ii) * sZ(ii + 1)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, 1)\n\n\ndef test_compute_action_XY():\n \"\"\"\n Action of Pauli operators on state\n \"\"\"\n comp_basis_state = [0, 0, 0, 0]\n for ii in range(3):\n pauli_term = sX(ii) * sY(ii + 1)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n true_basis_state[ii] ^= 1\n true_basis_state[ii + 1] ^= 1\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, 1j)\n\n comp_basis_state = [1, 1, 1, 1]\n for ii in range(3):\n pauli_term = sX(ii) * sY(ii + 1)\n new_basis_state, coeff = compute_action(comp_basis_state, pauli_term,\n len(comp_basis_state))\n # abuse of comparisons in python\n true_basis_state = comp_basis_state.copy()\n true_basis_state[ii] ^= 1\n true_basis_state[ii + 1] ^= 1\n assert new_basis_state == true_basis_state\n assert np.isclose(coeff, -1j)\n\n\ndef test_stabilizer_projection_Z():\n \"\"\"\n test if we project out the correct state\n \"\"\"\n stabilizer_state = project_stabilized_state([sZ(0)])\n true_state = np.zeros((2, 1))\n true_state[0, 0] = 1\n assert np.allclose(true_state, stabilizer_state.todense())\n\n\ndef test_stabilizer_projection_ZZ():\n \"\"\"\n test if we project out the correct state\n \"\"\"\n stabilizer_state = project_stabilized_state([sZ(0) * sZ(1), sX(0) * sX(1)])\n true_state = np.zeros((4, 1))\n true_state[0, 0] = true_state[3, 0] = 1\n true_state /= np.sqrt(2)\n assert np.allclose(true_state, stabilizer_state.todense())\n\n\ndef test_stabilizer_projection_ZZZ():\n \"\"\"\n test if we project out the correct state\n \"\"\"\n stabilizer_state = project_stabilized_state([sZ(0) * sZ(1), sZ(1) * sZ(2),\n sX(0) * sX(1) * sX(2)])\n true_state = np.zeros((8, 1))\n true_state[0, 0] = true_state[7, 0] = 1\n true_state /= np.sqrt(2)\n assert np.allclose(true_state, np.array(stabilizer_state.todense()))\n"
]
| [
[
"numpy.array",
"numpy.isclose",
"numpy.zeros",
"numpy.allclose",
"numpy.sqrt"
]
]
|
RobinAlgayres/beer | [
"15ad0dad5a49f98e658e948724e05df347ffe3b8"
]
| [
"beer/priors/gamma.py"
]
| [
"'''Implementation of the Gamma distribution.'''\n\nimport torch\nfrom .baseprior import ExpFamilyPrior\n\n\nclass GammaPrior(ExpFamilyPrior):\n '''Gamma distribution.\n\n parameters:\n a: shape\n b: rate\n\n natural parameters:\n eta1 = -b\n eta2 = a - 1\n\n sufficient statistics:\n T_1(x) = x\n T_2(x) = ln x\n\n '''\n __repr_str = '{classname}(shape={shape}, rate={rate})'\n\n def __init__(self, shape, rate):\n nparams = self.to_natural_parameters(shape, rate)\n super().__init__(nparams)\n\n def __repr__(self):\n shape, rate = self.to_std_parameters(self.natural_parameters)\n return self.__repr_str.format(\n classname=self.__class__.__name__,\n shape=repr(shape), rate=repr(rate)\n )\n\n def expected_value(self):\n shape, rate = self.to_std_parameters(self.natural_parameters)\n return shape / rate\n\n def to_natural_parameters(self, shape, rate):\n return torch.cat([-rate.view(1), (shape - 1).view(1)])\n\n def _to_std_parameters(self, natural_parameters):\n shape, rate = natural_parameters[1] + 1, -natural_parameters[0]\n return shape, rate\n\n def _expected_sufficient_statistics(self):\n shape, rate = self.to_std_parameters(self.natural_parameters)\n return torch.cat([(shape / rate).view(1),\n (torch.digamma(shape) - torch.log(rate)).view(1)])\n\n def _log_norm(self, natural_parameters=None):\n if natural_parameters is None:\n natural_parameters = self.natural_parameters\n shape, rate = self.to_std_parameters(natural_parameters)\n return torch.lgamma(shape) - shape * torch.log(rate)\n\n\n__all__ = ['GammaPrior']\n"
]
| [
[
"torch.digamma",
"torch.log",
"torch.lgamma"
]
]
|
Fillipedem/quora-insincere-questions | [
"1ad51be239f41958e7b1b51402f189079048dba1"
]
| [
"restapi/net.py"
]
| [
"import torch.nn as nn\nfrom transformers import DistilBertModel\n\n\nclass DistilBertModelClass(nn.Module):\n\n def __init__(self):\n super(DistilBertModelClass, self).__init__()\n self.distil_bert = DistilBertModel.from_pretrained(\"distilbert-base-uncased\")\n self.linear1 = nn.Linear(768, 2)\n self.sigmoid = nn.Sigmoid()\n \n def forward(self, ids, mask):\n bert_out = self.distil_bert(ids, mask)\n x = bert_out.last_hidden_state[:, -1, :] # get bert last hidden state\n x = self.linear1(x)\n x = self.sigmoid(x)\n return x"
]
| [
[
"torch.nn.Linear",
"torch.nn.Sigmoid"
]
]
|
MagicUmom/pattern_recognition_project | [
"f47b9d40baad1431ecdc5981bc1118fb228a34b0"
]
| [
"face_recognition.py"
]
| [
"\"\"\"\n===================================================\nFaces recognition example using eigenfaces and SVMs\n===================================================\n\nThe dataset used in this example is a preprocessed excerpt of the\n\"Labeled Faces in the Wild\", aka LFW_:\n\n http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)\n\n.. _LFW: http://vis-www.cs.umass.edu/lfw/\n\nExpected results for the top 5 most represented people in the dataset:\n\n================== ============ ======= ========== =======\n precision recall f1-score support\n================== ============ ======= ========== =======\n Ariel Sharon 0.67 0.92 0.77 13\n Colin Powell 0.75 0.78 0.76 60\n Donald Rumsfeld 0.78 0.67 0.72 27\n George W Bush 0.86 0.86 0.86 146\nGerhard Schroeder 0.76 0.76 0.76 25\n Hugo Chavez 0.67 0.67 0.67 15\n Tony Blair 0.81 0.69 0.75 36\n\n avg / total 0.80 0.80 0.80 322\n================== ============ ======= ========== =======\n\n\"\"\"\nfrom __future__ import print_function\n\nfrom time import time\nimport logging\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.datasets import fetch_lfw_people\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\n\n\nprint(__doc__)\n\n# Display progress logs on stdout\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')\n\n\n###############################################################################\n# Download the data, if not already on disk and load it as numpy arrays\n\nlfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)\n\n# introspect the images arrays to find the shapes (for plotting)\nn_samples, h, w = lfw_people.images.shape\n\n# for machine learning we use the 2 data directly (as relative pixel\n# positions info is ignored by this model)\nX = lfw_people.data\nn_features = X.shape[1]\n\n# the label to predict is the id of the person\ny = lfw_people.target\ntarget_names = lfw_people.target_names\nn_classes = target_names.shape[0]\n\nprint(\"Total dataset size:\")\nprint(\"n_samples: %d\" % n_samples)\nprint(\"n_features: %d\" % n_features)\nprint(\"n_classes: %d\" % n_classes)\n\n\n###############################################################################\n# Split into a training set and a test set using a stratified k fold\n\n# split into a training and testing set\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.25, random_state=42)\n\n\n###############################################################################\n# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled\n# dataset): unsupervised feature extraction / dimensionality reduction\nn_components = 150\n\nprint(\"Extracting the top %d eigenfaces from %d faces\"\n % (n_components, X_train.shape[0]))\nt0 = time()\npca = PCA(n_components=n_components, svd_solver='randomized',\n whiten=True).fit(X_train)\nprint(\"done in %0.3fs\" % (time() - t0))\n\neigenfaces = pca.components_.reshape((n_components, h, w))\n\nprint(\"Projecting the input data on the eigenfaces orthonormal basis\")\nt0 = time()\nX_train_pca = pca.transform(X_train)\nX_test_pca = pca.transform(X_test)\nprint(\"done in %0.3fs\" % (time() - t0))\n\n\n###############################################################################\n# Train a SVM classification model\n\nprint(\"Fitting the classifier to the training set\")\nt0 = time()\nparam_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],\n 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }\nclf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)\nclf = clf.fit(X_train_pca, y_train)\nprint(\"done in %0.3fs\" % (time() - t0))\nprint(\"Best estimator found by grid search:\")\nprint(clf.best_estimator_)\n\n\n###############################################################################\n# Quantitative evaluation of the model quality on the test set\n\nprint(\"Predicting people's names on the test set\")\nt0 = time()\ny_pred = clf.predict(X_test_pca)\nprint(\"done in %0.3fs\" % (time() - t0))\n\nprint(classification_report(y_test, y_pred, target_names=target_names))\nprint(confusion_matrix(y_test, y_pred, labels=range(n_classes)))\n\n\n###############################################################################\n# Qualitative evaluation of the predictions using matplotlib\n\ndef plot_gallery(images, titles, h, w, n_row=3, n_col=4):\n \"\"\"Helper function to plot a gallery of portraits\"\"\"\n plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())\n\n\n# plot the result of the prediction on a portion of the test set\n\ndef title(y_pred, y_test, target_names, i):\n pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]\n true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]\n return 'predicted: %s\\ntrue: %s' % (pred_name, true_name)\n\nprediction_titles = [title(y_pred, y_test, target_names, i)\n for i in range(y_pred.shape[0])]\n\nplot_gallery(X_test, prediction_titles, h, w)\n\n# plot the gallery of the most significative eigenfaces\n\neigenface_titles = [\"eigenface %d\" % i for i in range(eigenfaces.shape[0])]\nplot_gallery(eigenfaces, eigenface_titles, h, w)\n\nplt.show()\n"
]
| [
[
"sklearn.datasets.fetch_lfw_people",
"matplotlib.pyplot.title",
"sklearn.decomposition.PCA",
"sklearn.svm.SVC",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplot"
]
]
|
hartvjir/detectron2 | [
"3d6cf5f1212ef432358db751e761491d7f60b6d5"
]
| [
"ipalm/patch_architecture.py"
]
| [
"import numpy as np\nimport torch\nimport torch.nn as nn\n\nimport random\n\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport torchvision\nfrom torchvision import transforms\n\nfrom net.mobilenetv3 import MobileNetV3Large, MobileNetV3Small\n# from trainer import Trainer\n# import utils\n\n\n# class MaterialPatchNet(MobileNetV3Large):\n# def __init__(self):\n# super().__init__()\n\n\nif __name__ == \"__main__\":\n lrn = nn.LocalResponseNorm(2)\n signal_2d = torch.randn(32, 5, 24, 24)\n signal_4d = torch.randn(16, 5, 7, 7, 7, 7)\n output_2d = lrn(signal_2d)\n output_4d = lrn(signal_4d)"
]
| [
[
"torch.nn.LocalResponseNorm",
"torch.randn"
]
]
|
oskopek/cil | [
"4c1fd464b5af52aff7a0509f56e21a2671fb8ce8"
]
| [
"cil/data/datasets.py"
]
| [
"import numpy as np\n\nfrom .twitter_dataset import TwitterDataset\n\n\nclass Datasets:\n X_train = None\n X_train_word = None\n y_train = None\n X_eval = None\n X_eval_word = None\n y_eval = None\n X_test = None\n X_test_word = None\n\n word_vocab = None\n inv_word_vocab = None\n\n train = None\n eval = None\n test = None\n\n def __init__(self,\n train_file,\n eval_file,\n test_file,\n preprocessing,\n random_state=42,\n vocab_size=20000,\n padding_size=40):\n self.train_file = train_file\n self.eval_file = eval_file\n self.test_file = test_file\n self.random_state = random_state\n self.preprocessing = preprocessing\n self.vocab_size = vocab_size\n self.padding_size = padding_size\n\n @staticmethod\n def _read_lines(filename, quote='\"'):\n with open(filename, \"r\") as f:\n X = []\n y = []\n for line in f:\n label, line = line.split(',', maxsplit=1)\n X.append(line.rstrip())\n y.append(int(label))\n return X, y\n\n def load(self):\n print(\"Loading data from disk...\")\n X_train, y_train = Datasets._read_lines(self.train_file)\n X_eval, y_eval = Datasets._read_lines(self.eval_file)\n X_test, _ = Datasets._read_lines(self.test_file, quote=None)\n print(X_train[0], y_train[0])\n print(X_eval[0], y_eval[0])\n print(X_test[0]) # TODO: Debug\n\n print(\"Preprocessing...\")\n X_train, y_train = self.preprocessing.transform(X_train, labels=y_train)\n X_eval, y_eval = self.preprocessing.transform(X_eval, labels=y_eval)\n X_test, _ = self.preprocessing.transform(X_test, labels=None)\n\n print(\"Generating vocabulary...\")\n word_vocab, inv_word_vocab = self.preprocessing.vocab(\n X_train, vocab_downsize=self.vocab_size)\n\n self.X_train = X_train\n self.y_train = y_train\n\n self.X_eval = X_eval\n self.y_eval = y_eval\n\n self.X_test = X_test\n\n self.word_vocab = word_vocab\n self.inv_word_vocab = inv_word_vocab\n\n print(\"Generating TF data...\")\n self.train = TwitterDataset(\n X_train, y_train, word_vocab=self.word_vocab, padding_size=self.padding_size)\n self.eval = TwitterDataset(X_eval, y_eval, train=self.train, padding_size=self.padding_size)\n self.test = TwitterDataset(X_test, None, train=self.train, padding_size=self.padding_size)\n\n def batches_per_epoch_generator(self, batch_size, data=None, shuffle=True):\n if data is None:\n data = self.X_train_word\n\n n_rows = data.shape[0]\n if shuffle:\n train_permutation = np.random.permutation(n_rows)\n else:\n train_permutation = np.arange(n_rows)\n\n for i in range(0, n_rows, batch_size):\n batch = data[train_permutation[i:i + batch_size]]\n if len(batch) == 0:\n raise StopIteration\n else:\n yield batch\n"
]
| [
[
"numpy.random.permutation",
"numpy.arange"
]
]
|
JayRGopal/CRAFT-pytorch | [
"a4444ef26c44f16c6fd9054dbf6b74e5a161acfe"
]
| [
"file_utils.py"
]
| [
"# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport cv2\nimport imgproc\n\n# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py\ndef get_files(img_dir):\n imgs, masks, xmls = list_files(img_dir)\n return imgs, masks, xmls\n\ndef list_files(in_path):\n img_files = []\n mask_files = []\n gt_files = []\n for (dirpath, dirnames, filenames) in os.walk(in_path):\n for file in filenames:\n filename, ext = os.path.splitext(file)\n ext = str.lower(ext)\n if ext == '.jpg' or ext == '.jpeg' or ext == '.gif' or ext == '.png' or ext == '.pgm':\n img_files.append(os.path.join(dirpath, file))\n elif ext == '.bmp':\n mask_files.append(os.path.join(dirpath, file))\n elif ext == '.xml' or ext == '.gt' or ext == '.txt':\n gt_files.append(os.path.join(dirpath, file))\n elif ext == '.zip':\n continue\n # img_files.sort()\n # mask_files.sort()\n # gt_files.sort()\n return img_files, mask_files, gt_files\n\ndef saveResult(img_file, img, boxes, dirname='./result/', verticals=None, texts=None):\n \"\"\" save text detection result one by one\n Args:\n img_file (str): image file name\n img (array): raw image context\n boxes (array): array of result file\n Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output\n Return:\n None\n \"\"\"\n img = np.array(img)\n\n # make result file list\n filename, file_ext = os.path.splitext(os.path.basename(img_file))\n\n # result directory\n res_file = dirname + \"res_\" + filename + '.txt'\n res_img_file = dirname + \"res_\" + filename + '.jpg'\n\n if not os.path.isdir(dirname):\n os.mkdir(dirname)\n\n with open(res_file, 'w') as f:\n for i, box in enumerate(boxes):\n poly = np.array(box).astype(np.int32).reshape((-1))\n strResult = ','.join([str(p) for p in poly]) + '\\r\\n'\n f.write(strResult)\n\n poly = poly.reshape(-1, 2)\n \n ''' \n Experimental fill line replaced the line below:\n cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)\n '''\n cv2.fillPoly(img, [poly.reshape((-1, 1, 2))], color=(0, 0, 255))\n \n \n ptColor = (0, 255, 255)\n if verticals is not None:\n if verticals[i]:\n ptColor = (255, 0, 0)\n\n if texts is not None:\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_scale = 0.5\n cv2.putText(img, \"{}\".format(texts[i]), (poly[0][0]+1, poly[0][1]+1), font, font_scale, (0, 0, 0), thickness=1)\n cv2.putText(img, \"{}\".format(texts[i]), tuple(poly[0]), font, font_scale, (0, 255, 255), thickness=1)\n\n # Save result image\n cv2.imwrite(res_img_file, img)\n\n"
]
| [
[
"numpy.array"
]
]
|
choobea/quickdraw_mlp | [
"267f9a6225d69c8fbb086b7814066e91fb56b370"
]
| [
"quickdraw_multitask_network_trainer.py"
]
| [
"import argparse\nimport numpy as np\nimport tensorflow as tf\nimport tqdm\nfrom data_providers import QuickDrawCombinedDataProvider\nfrom network_builder import MultutaskNetworkGraph\nfrom utils.parser_utils import ParserClass\nfrom utils.storage import build_experiment_folder, save_statistics\n\ntf.reset_default_graph() # resets any previous graphs to clear memory\nparser = argparse.ArgumentParser(description='Welcome to CNN experiments script') # generates an argument parser\nparser_extractor = ParserClass(parser=parser) # creates a parser class to process the parsed input\n\nbatch_size, seed, epochs, logs_path, continue_from_epoch, tensorboard_enable, batch_norm, \\\nstrided_dim_reduction, experiment_prefix, dropout_rate_value, rnn_dropout_rate_value, layer_stage_sizes, \\\nrnn_cell_type, bidirectional, rnn_stage_sizes, conv_rnn_sizes, num_classes_use, inner_layer_depth, \\\nfilter_size, num_dense_layers, num_dense_units, network_name, rotate = parser_extractor.get_argument_variables()\n\n# returns a list of objects that contain\n# our parsed input\n\nrng = np.random.RandomState(seed=seed) # set seed\n\nconvnet_desc = \"\"\nif batch_norm:\n convnet_desc = convnet_desc + \"BN\"\n\nfor ls in layer_stage_sizes:\n convnet_desc = \"{}_{}\".format(convnet_desc, ls)\n\nif bidirectional:\n if len(conv_rnn_sizes) == 0:\n experiment_name = \"exp{}_{}_{}_layers_bidirectional_{}_{}_dense({})\".format(experiment_prefix, rnn_cell_type,\n len(rnn_stage_sizes), dropout_rate_value, num_dense_layers, num_dense_units)\n else:\n experiment_name = \"exp{}_{}_{}_layers_{}_rnnconv_bidirectional_{}_dropout_{}_dense({})\".format(experiment_prefix, rnn_cell_type,\n len(rnn_stage_sizes), len(conv_rnn_sizes), dropout_rate_value, num_dense_layers,num_dense_units)\n\nif bidirectional:\n if len(conv_rnn_sizes) == 0:\n experiment_name = \"exp{}_{}_{}_rnnlayers_bidirectional_{}_dropout_{}_{}_convlayers_{}_filter_{}_dense({})\".format(experiment_prefix, rnn_cell_type,\n len(rnn_stage_sizes), dropout_rate_value, network_name, convnet_desc, max(filter_size), num_dense_layers, num_dense_units)\n else:\n experiment_name = \"exp{}_{}_{}_rnnlayers_{}_rnnconv_bidirectional_{}_dropout_{}_{}_convlayers_{}_filter_{}_dense({})\".format(experiment_prefix, rnn_cell_type,\n len(rnn_stage_sizes), len(conv_rnn_sizes), dropout_rate_value, network_name, convnet_desc, max(filter_size), num_dense_layers, num_dense_units)\nelse:\n experiment_name = \"exp{}_{}_{}_rnnlayers_{}_dropout_{}_{}_convlayers_{}_filter_{}_dense({})\".format(experiment_prefix, rnn_cell_type,\n len(rnn_stage_sizes), dropout_rate_value, network_name, convnet_desc, max(filter_size), num_dense_layers, num_dense_units)\n\nnetwork_name = \"MultitaskClassifier\"\n\ntrain_data = QuickDrawCombinedDataProvider(which_set=\"train\", batch_size=batch_size, rng=rng, num_classes_use=num_classes_use)\nval_data = QuickDrawCombinedDataProvider(which_set=\"valid\", batch_size=batch_size, rng=rng, num_classes_use=num_classes_use)\ntest_data = QuickDrawCombinedDataProvider(which_set=\"test\", batch_size=batch_size, rng=rng, num_classes_use=num_classes_use)\n# setup our data providers\n\nprint(\"Running {}\".format(experiment_name))\nprint(\"Starting from epoch {}\".format(continue_from_epoch))\n\nsaved_models_filepath, logs_filepath = build_experiment_folder(experiment_name, logs_path) # generate experiment dir\n\n# Placeholder setup\ndata_inputs_seq = tf.placeholder(tf.float32, [batch_size, train_data.inputs[0][0].shape[0], train_data.inputs[0][0].shape[1]], 'data-inputs-seq')\ndata_inputs_im = tf.placeholder(tf.float32, [batch_size, train_data.inputs[0][1].shape[0], train_data.inputs[0][1].shape[1], train_data.inputs[0][1].shape[2]], 'data-inputs-im')\ndata_targets = tf.placeholder(tf.int32, [batch_size], 'data-targets')\n\ntraining_phase = tf.placeholder(tf.bool, name='training-flag')\nrotate_data = tf.placeholder(tf.bool, name='rotate-flag')\ndropout_rate = tf.placeholder(tf.float32, name='dropout-prob')\nrnn_dropout = tf.placeholder(tf.float32, name='rnn-dropout-prob')\n\nclassifier_network = MultutaskNetworkGraph(input_x=data_inputs_im, target_placeholder=data_targets,\n dropout_rate=dropout_rate, rnn_dropout=rnn_dropout, batch_size=batch_size,\n num_channels=1, n_classes=train_data.num_classes,\n is_training=training_phase, augment_rotate_flag=rotate_data,\n strided_dim_reduction=strided_dim_reduction,\n use_batch_normalization=batch_norm,\n network_name=network_name, layer_stage_sizes=layer_stage_sizes,\n inner_layer_depth=inner_layer_depth, filter_size=filter_size,\n rnn_cell_type=rnn_cell_type,\n input_seq=data_inputs_seq,\n bidirectional=bidirectional,\n rnn_stage_sizes=rnn_stage_sizes,\n conv_rnn_sizes=conv_rnn_sizes,\n num_dense_layers=num_dense_layers\n ) # initialize our computational graph\n\nif continue_from_epoch == -1: # if this is a new experiment and not continuation of a previous one then generate a new\n # statistics file\n save_statistics(logs_filepath, \"result_summary_statistics\", [\"epoch\", \"train_c_image_loss\",\n \"train_image_accuracy\",\n \"train_c_stroke_loss\",\n \"train_stroke_accuracy\",\n \"train_c_comb_loss\",\n \"train_comb_accuracy\",\n \"val_c_image_loss\",\n \"val_image_accuracy\",\n \"val_c_stroke_loss\",\n \"val_stroke_accuracy\",\n \"val_c_comb_loss\",\n \"val_comb_accuracy\",\n \"test_c_image_loss\",\n \"test_image_accuracy\",\n \"test_c_stroke_loss\",\n \"test_stroke_accuracy\",\n \"test_c_comb_loss\",\n \"test_comb_accuracy\"], create=True)\n\nstart_epoch = continue_from_epoch if continue_from_epoch != -1 else 0 # if new experiment start from 0 otherwise\n# continue where left off\n\nsummary_op, losses_ops, c_error_opt_op = classifier_network.init_train() # get graph operations (ops)\n\ntotal_train_batches = train_data.num_batches\ntotal_val_batches = val_data.num_batches\ntotal_test_batches = test_data.num_batches\n\nbest_epoch = 0\n\nif tensorboard_enable:\n print(\"saved tensorboard file at\", logs_filepath)\n writer = tf.summary.FileWriter(logs_filepath, graph=tf.get_default_graph())\n\ninit = tf.global_variables_initializer() # initialization op for the graph\n\n\ndef split_x_batch(x_batch):\n \"\"\"This is a helper function that splits the x_batch into batch of sequences\n and images.\n : param x_batch: input batch of shape [batch_size, ]. In each sub element\n we have a list of two items - sequence and image representation.\n\n This function takes the representations and splits them.\n \"\"\"\n seq_inputs = np.empty((0, 70, 3))\n im_inputs = np.empty((0, 28, 28, 1))\n\n for ii in range(x_batch.shape[0]):\n seq_inputs = np.append(seq_inputs, [x_batch[ii][0]], axis=0)\n im_inputs = np.append(im_inputs, [x_batch[ii][1]], axis=0)\n\n return seq_inputs, im_inputs\n\n\nwith tf.Session() as sess:\n sess.run(init) # actually running the initialization op\n train_saver = tf.train.Saver() # saver object that will save our graph so we can reload it later for continuation of\n val_saver = tf.train.Saver()\n # training or inference\n\n if continue_from_epoch != -1:\n train_saver.restore(sess, \"{}/{}_{}.ckpt\".format(saved_models_filepath, experiment_name,\n continue_from_epoch-1)) # restore previous graph to continue operations\n\n best_val_accuracy = 0.\n with tqdm.tqdm(total=epochs - start_epoch) as epoch_pbar:\n for e in range(start_epoch, epochs):\n total_c_image_loss = 0.\n total_image_accuracy = 0.\n total_c_stroke_loss = 0.\n total_stroke_accuracy = 0.\n total_c_comb_loss = 0.\n total_comb_accuracy = 0.\n with tqdm.tqdm(total=total_train_batches) as pbar_train:\n for batch_idx, (x_batch, y_batch) in enumerate(train_data):\n # split x_batch into x_batch_im and x_batch_seq\n x_batch_seq, x_batch_im = split_x_batch(x_batch)\n iter_id = e * total_train_batches + batch_idx\n _, c_loss_value = sess.run(\n [c_error_opt_op, losses_ops],\n feed_dict={dropout_rate: dropout_rate_value, rnn_dropout: rnn_dropout_rate_value,\n data_inputs_seq: x_batch_seq, data_inputs_im: x_batch_im,\n data_targets: y_batch, training_phase: True, rotate_data: rotate})\n # Here we execute the c_error_opt_op which trains the network and also the ops that compute the\n # loss and accuracy, we save those in _, c_loss_value and acc respectively.\n total_c_image_loss += c_loss_value[\"crossentropy_image_losses\"] # add loss of current iter to sum\n total_image_accuracy += c_loss_value[\"accuracy_image\"]\n total_c_stroke_loss += c_loss_value[\"crossentropy_stroke_losses\"] # add loss of current iter to sum\n total_stroke_accuracy += c_loss_value[\"accuracy_stroke\"]\n total_c_comb_loss += c_loss_value[\"crossentropy_comb_losses\"] # add loss of current iter to sum\n total_comb_accuracy += c_loss_value[\"accuracy_comb\"]\n\n iter_out = \"iter_num: {}, train_comb_loss: {}, train_comb_accuracy: {}\".format(iter_id,\n total_c_comb_loss / (batch_idx + 1),\n total_comb_accuracy / (\n batch_idx + 1)) # show\n # iter statistics using running averages of previous iter within this epoch\n pbar_train.set_description(iter_out)\n pbar_train.update(1)\n if tensorboard_enable and batch_idx % 25 == 0: # save tensorboard summary every 25 iterations\n _summary = sess.run(\n summary_op,\n feed_dict={dropout_rate: dropout_rate_value, rnn_dropout: rnn_dropout_rate_value,\n data_inputs_seq: x_batch_seq, data_inputs_im: x_batch_im,\n data_targets: y_batch, training_phase: True, rotate_data: rotate})\n writer.add_summary(_summary, global_step=iter_id)\n\n total_c_image_loss /= total_train_batches\n total_image_accuracy /= total_train_batches\n total_c_stroke_loss /= total_train_batches\n total_stroke_accuracy /= total_train_batches\n total_c_comb_loss /= total_train_batches\n total_comb_accuracy /= total_train_batches\n # compute mean of loss\n # compute mean of accuracy\n\n save_path = train_saver.save(sess, \"{}/{}_{}.ckpt\".format(saved_models_filepath, experiment_name, e))\n # save graph and weights\n print(\"Saved current model at\", save_path)\n\n total_val_c_image_loss = 0.\n total_val_image_accuracy = 0.\n total_val_c_stroke_loss = 0.\n total_val_stroke_accuracy = 0.\n total_val_c_comb_loss = 0.\n total_val_comb_accuracy = 0. # run validation stage, note how training_phase placeholder is set to False\n # and that we do not run the c_error_opt_op which runs gradient descent, but instead only call the loss ops\n # to collect losses on the validation set\n with tqdm.tqdm(total=total_val_batches) as pbar_val:\n for batch_idx, (x_batch, y_batch) in enumerate(val_data):\n # split x_batch into x_batch_im and x_batch_seq\n x_batch_seq, x_batch_im = split_x_batch(x_batch)\n c_loss_value = sess.run(\n [losses_ops],\n feed_dict={dropout_rate: dropout_rate_value, rnn_dropout: rnn_dropout_rate_value,\n data_inputs_seq: x_batch_seq, data_inputs_im: x_batch_im,\n data_targets: y_batch, training_phase: False, rotate_data: False})\n\n total_val_c_image_loss += c_loss_value[0][\"crossentropy_image_losses\"]\n total_val_image_accuracy += c_loss_value[0][\"accuracy_image\"]\n total_val_c_stroke_loss += c_loss_value[0][\"crossentropy_stroke_losses\"]\n total_val_stroke_accuracy += c_loss_value[0][\"accuracy_stroke\"]\n total_val_c_comb_loss += c_loss_value[0][\"crossentropy_comb_losses\"]\n total_val_comb_accuracy += c_loss_value[0][\"accuracy_comb\"]\n\n iter_out = \"val_comb_loss: {}, val_comb_accuracy: {}\".format(total_c_comb_loss / (batch_idx + 1),\n total_val_comb_accuracy / (batch_idx + 1))\n pbar_val.set_description(iter_out)\n pbar_val.update(1)\n\n total_val_c_image_loss /= total_val_batches\n total_val_image_accuracy /= total_val_batches\n total_val_c_stroke_loss /= total_val_batches\n total_val_stroke_accuracy /= total_val_batches\n total_val_c_comb_loss /= total_val_batches\n total_val_comb_accuracy /= total_val_batches\n\n if best_val_accuracy < total_val_comb_accuracy: # check if val acc better than the previous best and if\n # so save current as best and save the model as the best validation model to be used on the test set\n # after the final epoch\n best_val_accuracy = total_val_comb_accuracy\n best_epoch = e\n save_path = val_saver.save(sess, \"{}/best_validation_{}_{}.ckpt\".format(saved_models_filepath, experiment_name, e))\n print(\"Saved best validation score model at\", save_path)\n\n epoch_pbar.update(1)\n # save statistics of this epoch, train and val without test set performance\n save_statistics(logs_filepath, \"result_summary_statistics\",\n [e,\n total_c_image_loss,\n total_image_accuracy,\n total_c_stroke_loss,\n total_stroke_accuracy,\n total_c_comb_loss,\n total_comb_accuracy,\n total_val_c_image_loss,\n total_val_image_accuracy,\n total_val_c_stroke_loss,\n total_val_stroke_accuracy,\n total_val_c_comb_loss,\n total_val_comb_accuracy,\n -1, -1, -1, -1, -1, -1])\n\n val_saver.restore(sess, \"{}/best_validation_{}_{}.ckpt\".format(saved_models_filepath, experiment_name, best_epoch))\n # restore model with best performance on validation set\n total_test_c_image_loss = 0.\n total_test_image_accuracy = 0.\n total_test_c_stroke_loss = 0.\n total_test_stroke_accuracy = 0.\n total_test_c_comb_loss = 0.\n total_test_comb_accuracy = 0.\n # computer test loss and accuracy and save\n with tqdm.tqdm(total=total_test_batches) as pbar_test:\n for batch_id, (x_batch, y_batch) in enumerate(test_data):\n # split x_batch into x_batch_im and x_batch_seq\n x_batch_seq, x_batch_im = split_x_batch(x_batch)\n c_loss_value = sess.run(\n [losses_ops],\n feed_dict={dropout_rate: dropout_rate_value, rnn_dropout: rnn_dropout_rate_value,\n data_inputs_seq: x_batch_seq, data_inputs_im: x_batch_im,\n data_targets: y_batch, training_phase: False, rotate_data: False})\n\n total_test_c_image_loss += c_loss_value[0][\"crossentropy_image_losses\"]\n total_test_image_accuracy += c_loss_value[0][\"accuracy_image\"]\n total_test_c_stroke_loss += c_loss_value[0][\"crossentropy_stroke_losses\"]\n total_test_stroke_accuracy += c_loss_value[0][\"accuracy_stroke\"]\n total_test_c_comb_loss += c_loss_value[0][\"crossentropy_comb_losses\"]\n total_test_comb_accuracy += c_loss_value[0][\"accuracy_comb\"]\n\n iter_out = \"test_comb_loss: {}, test_com_accuracy: {}\".format(total_test_c_comb_loss / (batch_idx + 1),\n total_test_comb_accuracy / (batch_idx + 1))\n pbar_test.set_description(iter_out)\n pbar_test.update(1)\n\n total_test_c_image_loss /= total_test_batches\n total_test_image_accuracy /= total_test_batches\n total_test_c_stroke_loss /= total_test_batches\n total_test_stroke_accuracy /= total_test_batches\n total_test_c_comb_loss /= total_test_batches\n total_test_comb_accuracy /= total_test_batches\n\n save_statistics(logs_filepath, \"result_summary_statistics\",\n [\"test set performance\", -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\n total_test_c_image_loss,\n total_test_image_accuracy,\n total_test_c_stroke_loss,\n total_test_stroke_accuracy,\n total_test_c_comb_loss,\n total_test_comb_accuracy])\n"
]
| [
[
"numpy.empty",
"numpy.random.RandomState",
"tensorflow.get_default_graph",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.placeholder",
"numpy.append",
"tensorflow.global_variables_initializer"
]
]
|
jhamman/xarray-test-docs | [
"c54123772817875678ec7ad769e6d4d6612aeb92"
]
| [
"xarray/core/common.py"
]
| [
"from __future__ import annotations\n\nimport warnings\nfrom contextlib import suppress\nfrom html import escape\nfrom textwrap import dedent\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Hashable,\n Iterable,\n Iterator,\n Mapping,\n TypeVar,\n overload,\n)\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import dtypes, duck_array_ops, formatting, formatting_html, ops\nfrom .npcompat import DTypeLike\nfrom .options import OPTIONS, _get_keep_attrs\nfrom .pycompat import is_duck_dask_array\nfrom .rolling_exp import RollingExp\nfrom .utils import Frozen, either_dict_or_kwargs, is_scalar\n\ntry:\n import cftime\nexcept ImportError:\n cftime = None\n\n# Used as a sentinel value to indicate a all dimensions\nALL_DIMS = ...\n\n\nif TYPE_CHECKING:\n from .dataarray import DataArray\n from .dataset import Dataset\n from .types import T_DataWithCoords, T_Xarray\n from .variable import Variable\n from .weighted import Weighted\n\n\nC = TypeVar(\"C\")\nT = TypeVar(\"T\")\n\n\nclass ImplementsArrayReduce:\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool):\n if include_skipna:\n\n def wrapped_func(self, dim=None, axis=None, skipna=None, **kwargs):\n return self.reduce(func, dim, axis, skipna=skipna, **kwargs)\n\n else:\n\n def wrapped_func(self, dim=None, axis=None, **kwargs): # type: ignore[misc]\n return self.reduce(func, dim, axis, **kwargs)\n\n return wrapped_func\n\n _reduce_extra_args_docstring = dedent(\n \"\"\"\\\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `{name}` is calculated over axes.\"\"\"\n )\n\n _cum_extra_args_docstring = dedent(\n \"\"\"\\\n dim : str or sequence of str, optional\n Dimension over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied.\"\"\"\n )\n\n\nclass ImplementsDatasetReduce:\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool):\n if include_skipna:\n\n def wrapped_func(self, dim=None, skipna=None, **kwargs):\n return self.reduce(\n func, dim, skipna=skipna, numeric_only=numeric_only, **kwargs\n )\n\n else:\n\n def wrapped_func(self, dim=None, **kwargs): # type: ignore[misc]\n return self.reduce(func, dim, numeric_only=numeric_only, **kwargs)\n\n return wrapped_func\n\n _reduce_extra_args_docstring = dedent(\n \"\"\"\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `{name}`. By default `{name}` is\n applied over all dimensions.\n \"\"\"\n ).strip()\n\n _cum_extra_args_docstring = dedent(\n \"\"\"\n dim : str or sequence of str, optional\n Dimension over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied.\n \"\"\"\n ).strip()\n\n\nclass AbstractArray:\n \"\"\"Shared base class for DataArray and Variable.\"\"\"\n\n __slots__ = ()\n\n def __bool__(self: Any) -> bool:\n return bool(self.values)\n\n def __float__(self: Any) -> float:\n return float(self.values)\n\n def __int__(self: Any) -> int:\n return int(self.values)\n\n def __complex__(self: Any) -> complex:\n return complex(self.values)\n\n def __array__(self: Any, dtype: DTypeLike = None) -> np.ndarray:\n return np.asarray(self.values, dtype=dtype)\n\n def __repr__(self) -> str:\n return formatting.array_repr(self)\n\n def _repr_html_(self):\n if OPTIONS[\"display_style\"] == \"text\":\n return f\"<pre>{escape(repr(self))}</pre>\"\n return formatting_html.array_repr(self)\n\n def _iter(self: Any) -> Iterator[Any]:\n for n in range(len(self)):\n yield self[n]\n\n def __iter__(self: Any) -> Iterator[Any]:\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\")\n return self._iter()\n\n def get_axis_num(self, dim: Hashable | Iterable[Hashable]) -> int | tuple[int, ...]:\n \"\"\"Return axis number(s) corresponding to dimension(s) in this array.\n\n Parameters\n ----------\n dim : str or iterable of str\n Dimension name(s) for which to lookup axes.\n\n Returns\n -------\n int or tuple of int\n Axis number or numbers corresponding to the given dimensions.\n \"\"\"\n if isinstance(dim, Iterable) and not isinstance(dim, str):\n return tuple(self._get_axis_num(d) for d in dim)\n else:\n return self._get_axis_num(dim)\n\n def _get_axis_num(self: Any, dim: Hashable) -> int:\n try:\n return self.dims.index(dim)\n except ValueError:\n raise ValueError(f\"{dim!r} not found in array dimensions {self.dims!r}\")\n\n @property\n def sizes(self: Any) -> Mapping[Hashable, int]:\n \"\"\"Ordered mapping from dimension names to lengths.\n\n Immutable.\n\n See Also\n --------\n Dataset.sizes\n \"\"\"\n return Frozen(dict(zip(self.dims, self.shape)))\n\n\nclass AttrAccessMixin:\n \"\"\"Mixin class that allows getting keys with attribute access\"\"\"\n\n __slots__ = ()\n\n def __init_subclass__(cls, **kwargs):\n \"\"\"Verify that all subclasses explicitly define ``__slots__``. If they don't,\n raise error in the core xarray module and a FutureWarning in third-party\n extensions.\n \"\"\"\n if not hasattr(object.__new__(cls), \"__dict__\"):\n pass\n elif cls.__module__.startswith(\"xarray.\"):\n raise AttributeError(f\"{cls.__name__} must explicitly define __slots__\")\n else:\n cls.__setattr__ = cls._setattr_dict\n warnings.warn(\n f\"xarray subclass {cls.__name__} should explicitly define __slots__\",\n FutureWarning,\n stacklevel=2,\n )\n super().__init_subclass__(**kwargs)\n\n @property\n def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]:\n \"\"\"Places to look-up items for attribute-style access\"\"\"\n yield from ()\n\n @property\n def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]:\n \"\"\"Places to look-up items for key-autocompletion\"\"\"\n yield from ()\n\n def __getattr__(self, name: str) -> Any:\n if name not in {\"__dict__\", \"__setstate__\"}:\n # this avoids an infinite loop when pickle looks for the\n # __setstate__ attribute before the xarray object is initialized\n for source in self._attr_sources:\n with suppress(KeyError):\n return source[name]\n raise AttributeError(\n f\"{type(self).__name__!r} object has no attribute {name!r}\"\n )\n\n # This complicated two-method design boosts overall performance of simple operations\n # - particularly DataArray methods that perform a _to_temp_dataset() round-trip - by\n # a whopping 8% compared to a single method that checks hasattr(self, \"__dict__\") at\n # runtime before every single assignment. All of this is just temporary until the\n # FutureWarning can be changed into a hard crash.\n def _setattr_dict(self, name: str, value: Any) -> None:\n \"\"\"Deprecated third party subclass (see ``__init_subclass__`` above)\"\"\"\n object.__setattr__(self, name, value)\n if name in self.__dict__:\n # Custom, non-slotted attr, or improperly assigned variable?\n warnings.warn(\n f\"Setting attribute {name!r} on a {type(self).__name__!r} object. Explicitly define __slots__ \"\n \"to suppress this warning for legitimate custom attributes and \"\n \"raise an error when attempting variables assignments.\",\n FutureWarning,\n stacklevel=2,\n )\n\n def __setattr__(self, name: str, value: Any) -> None:\n \"\"\"Objects with ``__slots__`` raise AttributeError if you try setting an\n undeclared attribute. This is desirable, but the error message could use some\n improvement.\n \"\"\"\n try:\n object.__setattr__(self, name, value)\n except AttributeError as e:\n # Don't accidentally shadow custom AttributeErrors, e.g.\n # DataArray.dims.setter\n if str(e) != \"{!r} object has no attribute {!r}\".format(\n type(self).__name__, name\n ):\n raise\n raise AttributeError(\n f\"cannot set attribute {name!r} on a {type(self).__name__!r} object. Use __setitem__ style\"\n \"assignment (e.g., `ds['name'] = ...`) instead of assigning variables.\"\n ) from e\n\n def __dir__(self) -> list[str]:\n \"\"\"Provide method name lookup and completion. Only provide 'public'\n methods.\n \"\"\"\n extra_attrs = {\n item\n for source in self._attr_sources\n for item in source\n if isinstance(item, str)\n }\n return sorted(set(dir(type(self))) | extra_attrs)\n\n def _ipython_key_completions_(self) -> list[str]:\n \"\"\"Provide method for the key-autocompletions in IPython.\n See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion\n For the details.\n \"\"\"\n items = {\n item\n for source in self._item_sources\n for item in source\n if isinstance(item, str)\n }\n return list(items)\n\n\ndef get_squeeze_dims(\n xarray_obj,\n dim: Hashable | Iterable[Hashable] | None = None,\n axis: int | Iterable[int] | None = None,\n) -> list[Hashable]:\n \"\"\"Get a list of dimensions to squeeze out.\"\"\"\n if dim is not None and axis is not None:\n raise ValueError(\"cannot use both parameters `axis` and `dim`\")\n if dim is None and axis is None:\n return [d for d, s in xarray_obj.sizes.items() if s == 1]\n\n if isinstance(dim, Iterable) and not isinstance(dim, str):\n dim = list(dim)\n elif dim is not None:\n dim = [dim]\n else:\n assert axis is not None\n if isinstance(axis, int):\n axis = [axis]\n axis = list(axis)\n if any(not isinstance(a, int) for a in axis):\n raise TypeError(\"parameter `axis` must be int or iterable of int.\")\n alldims = list(xarray_obj.sizes.keys())\n dim = [alldims[a] for a in axis]\n\n if any(xarray_obj.sizes[k] > 1 for k in dim):\n raise ValueError(\n \"cannot select a dimension to squeeze out \"\n \"which has length greater than one\"\n )\n return dim\n\n\nclass DataWithCoords(AttrAccessMixin):\n \"\"\"Shared base class for Dataset and DataArray.\"\"\"\n\n _close: Callable[[], None] | None\n\n __slots__ = (\"_close\",)\n\n def squeeze(\n self,\n dim: Hashable | Iterable[Hashable] | None = None,\n drop: bool = False,\n axis: int | Iterable[int] | None = None,\n ):\n \"\"\"Return a new object with squeezed data.\n\n Parameters\n ----------\n dim : None or Hashable or iterable of Hashable, optional\n Selects a subset of the length one dimensions. If a dimension is\n selected with length greater than one, an error is raised. If\n None, all length one dimensions are squeezed.\n drop : bool, optional\n If ``drop=True``, drop squeezed coordinates instead of making them\n scalar.\n axis : None or int or iterable of int, optional\n Like dim, but positional.\n\n Returns\n -------\n squeezed : same type as caller\n This object, but with with all or a subset of the dimensions of\n length 1 removed.\n\n See Also\n --------\n numpy.squeeze\n \"\"\"\n dims = get_squeeze_dims(self, dim, axis)\n return self.isel(drop=drop, **{d: 0 for d in dims})\n\n def clip(self, min=None, max=None, *, keep_attrs: bool = None):\n \"\"\"\n Return an array whose values are limited to ``[min, max]``.\n At least one of max or min must be given.\n\n Refer to `numpy.clip` for full documentation.\n\n See Also\n --------\n numpy.clip : equivalent function\n \"\"\"\n from .computation import apply_ufunc\n\n if keep_attrs is None:\n # When this was a unary func, the default was True, so retaining the\n # default.\n keep_attrs = _get_keep_attrs(default=True)\n\n return apply_ufunc(\n np.clip, self, min, max, keep_attrs=keep_attrs, dask=\"allowed\"\n )\n\n def get_index(self, key: Hashable) -> pd.Index:\n \"\"\"Get an index for a dimension, with fall-back to a default RangeIndex\"\"\"\n if key not in self.dims:\n raise KeyError(key)\n\n try:\n return self.xindexes[key].to_pandas_index()\n except KeyError:\n return pd.Index(range(self.sizes[key]), name=key)\n\n def _calc_assign_results(\n self: C, kwargs: Mapping[Any, T | Callable[[C], T]]\n ) -> dict[Hashable, T]:\n return {k: v(self) if callable(v) else v for k, v in kwargs.items()}\n\n def assign_coords(self, coords=None, **coords_kwargs):\n \"\"\"Assign new coordinates to this object.\n\n Returns a new object with all the original data in addition to the new\n coordinates.\n\n Parameters\n ----------\n coords : dict, optional\n A dict where the keys are the names of the coordinates\n with the new values to assign. If the values are callable, they are\n computed on this object and assigned to new coordinate variables.\n If the values are not callable, (e.g. a ``DataArray``, scalar, or\n array), they are simply assigned. A new coordinate can also be\n defined and attached to an existing dimension using a tuple with\n the first element the dimension name and the second element the\n values for this new coordinate.\n **coords_kwargs : optional\n The keyword arguments form of ``coords``.\n One of ``coords`` or ``coords_kwargs`` must be provided.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new coordinates in addition to the existing\n data.\n\n Examples\n --------\n Convert longitude coordinates from 0-359 to -180-179:\n\n >>> da = xr.DataArray(\n ... np.random.rand(4),\n ... coords=[np.array([358, 359, 0, 1])],\n ... dims=\"lon\",\n ... )\n >>> da\n <xarray.DataArray (lon: 4)>\n array([0.5488135 , 0.71518937, 0.60276338, 0.54488318])\n Coordinates:\n * lon (lon) int64 358 359 0 1\n >>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180))\n <xarray.DataArray (lon: 4)>\n array([0.5488135 , 0.71518937, 0.60276338, 0.54488318])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n The function also accepts dictionary arguments:\n\n >>> da.assign_coords({\"lon\": (((da.lon + 180) % 360) - 180)})\n <xarray.DataArray (lon: 4)>\n array([0.5488135 , 0.71518937, 0.60276338, 0.54488318])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n New coordinate can also be attached to an existing dimension:\n\n >>> lon_2 = np.array([300, 289, 0, 1])\n >>> da.assign_coords(lon_2=(\"lon\", lon_2))\n <xarray.DataArray (lon: 4)>\n array([0.5488135 , 0.71518937, 0.60276338, 0.54488318])\n Coordinates:\n * lon (lon) int64 358 359 0 1\n lon_2 (lon) int64 300 289 0 1\n\n Note that the same result can also be obtained with a dict e.g.\n\n >>> _ = da.assign_coords({\"lon_2\": (\"lon\", lon_2)})\n\n Notes\n -----\n Since ``coords_kwargs`` is a dictionary, the order of your arguments\n may not be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign_coords``\n is possible, but you cannot reference other variables created within\n the same ``assign_coords`` call.\n\n See Also\n --------\n Dataset.assign\n Dataset.swap_dims\n \"\"\"\n coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, \"assign_coords\")\n data = self.copy(deep=False)\n results = self._calc_assign_results(coords_kwargs)\n data.coords.update(results)\n return data\n\n def assign_attrs(self, *args, **kwargs):\n \"\"\"Assign new attrs to this object.\n\n Returns a new object equivalent to ``self.attrs.update(*args, **kwargs)``.\n\n Parameters\n ----------\n *args\n positional arguments passed into ``attrs.update``.\n **kwargs\n keyword arguments passed into ``attrs.update``.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new attrs in addition to the existing data.\n\n See Also\n --------\n Dataset.assign\n \"\"\"\n out = self.copy(deep=False)\n out.attrs.update(*args, **kwargs)\n return out\n\n def pipe(\n self,\n func: Callable[..., T] | tuple[Callable[..., T], str],\n *args,\n **kwargs,\n ) -> T:\n \"\"\"\n Apply ``func(self, *args, **kwargs)``\n\n This method replicates the pandas method of the same name.\n\n Parameters\n ----------\n func : callable\n function to apply to this xarray object (Dataset/DataArray).\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the xarray object.\n *args\n positional arguments passed into ``func``.\n **kwargs\n a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : Any\n the return type of ``func``.\n\n Notes\n -----\n Use ``.pipe`` when chaining together functions that expect\n xarray or pandas objects, e.g., instead of writing\n\n .. code:: python\n\n f(g(h(ds), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n .. code:: python\n\n (ds.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c))\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n .. code:: python\n\n (ds.pipe(h).pipe(g, arg1=a).pipe((f, \"arg2\"), arg1=a, arg3=c))\n\n Examples\n --------\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": (\n ... (\"lat\", \"lon\"),\n ... 20 * np.random.rand(4).reshape(2, 2),\n ... ),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9\n precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918\n\n >>> def adder(data, arg):\n ... return data + arg\n ...\n >>> def div(data, arg):\n ... return data / arg\n ...\n >>> def sub_mult(data, sub_arg, mult_arg):\n ... return (data * mult_arg) - sub_arg\n ...\n >>> x.pipe(adder, 2)\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 12.98 16.3 14.06 12.9\n precipitation (lat, lon) float64 2.424 2.646 2.438 2.892\n\n >>> x.pipe(adder, arg=2)\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 12.98 16.3 14.06 12.9\n precipitation (lat, lon) float64 2.424 2.646 2.438 2.892\n\n >>> (\n ... x.pipe(adder, arg=2)\n ... .pipe(div, arg=2)\n ... .pipe(sub_mult, sub_arg=2, mult_arg=2)\n ... )\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9\n precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918\n\n See Also\n --------\n pandas.DataFrame.pipe\n \"\"\"\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\n f\"{target} is both the pipe target and a keyword argument\"\n )\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)\n\n def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None):\n \"\"\"Returns a GroupBy object for performing grouped operations.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose unique values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n squeeze : bool, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n\n Examples\n --------\n Calculate daily anomalies for daily data:\n\n >>> da = xr.DataArray(\n ... np.linspace(0, 1826, num=1827),\n ... coords=[pd.date_range(\"1/1/2000\", \"31/12/2004\", freq=\"D\")],\n ... dims=\"time\",\n ... )\n >>> da\n <xarray.DataArray (time: 1827)>\n array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03,\n 1.826e+03])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2004-12-31\n >>> da.groupby(\"time.dayofyear\") - da.groupby(\"time.dayofyear\").mean(\"time\")\n <xarray.DataArray (time: 1827)>\n array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2004-12-31\n dayofyear (time) int64 1 2 3 4 5 6 7 8 ... 359 360 361 362 363 364 365 366\n\n See Also\n --------\n core.groupby.DataArrayGroupBy\n core.groupby.DatasetGroupBy\n \"\"\"\n # While we don't generally check the type of every arg, passing\n # multiple dimensions as multiple arguments is common enough, and the\n # consequences hidden enough (strings evaluate as true) to warrant\n # checking here.\n # A future version could make squeeze kwarg only, but would face\n # backward-compat issues.\n if not isinstance(squeeze, bool):\n raise TypeError(\n f\"`squeeze` must be True or False, but {squeeze} was supplied\"\n )\n\n return self._groupby_cls(\n self, group, squeeze=squeeze, restore_coord_dims=restore_coord_dims\n )\n\n def groupby_bins(\n self,\n group,\n bins,\n right: bool = True,\n labels=None,\n precision: int = 3,\n include_lowest: bool = False,\n squeeze: bool = True,\n restore_coord_dims: bool = None,\n ):\n \"\"\"Returns a GroupBy object for performing grouped operations.\n\n Rather than using all unique values of `group`, the values are discretized\n first by applying `pandas.cut` [1]_ to `group`.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose binned values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n bins : int or array-like\n If bins is an int, it defines the number of equal-width bins in the\n range of x. However, in this case, the range of x is extended by .1%\n on each side to include the min or max values of x. If bins is a\n sequence it defines the bin edges allowing for non-uniform bin\n width. No extension of the range of x is done in this case.\n right : bool, default: True\n Indicates whether the bins include the rightmost edge or not. If\n right == True (the default), then the bins [1,2,3,4] indicate\n (1,2], (2,3], (3,4].\n labels : array-like or bool, default: None\n Used as labels for the resulting bins. Must be of the same length as\n the resulting bins. If False, string bin labels are assigned by\n `pandas.cut`.\n precision : int\n The precision at which to store and display the bins labels.\n include_lowest : bool\n Whether the first interval should be left-inclusive or not.\n squeeze : bool, default: True\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n The name of the group has the added suffix `_bins` in order to\n distinguish it from the original variable.\n\n References\n ----------\n .. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html\n \"\"\"\n return self._groupby_cls(\n self,\n group,\n squeeze=squeeze,\n bins=bins,\n restore_coord_dims=restore_coord_dims,\n cut_kwargs={\n \"right\": right,\n \"labels\": labels,\n \"precision\": precision,\n \"include_lowest\": include_lowest,\n },\n )\n\n def weighted(self: T_DataWithCoords, weights: DataArray) -> Weighted[T_Xarray]:\n \"\"\"\n Weighted operations.\n\n Parameters\n ----------\n weights : DataArray\n An array of weights associated with the values in this Dataset.\n Each value in the data contributes to the reduction operation\n according to its associated weight.\n\n Notes\n -----\n ``weights`` must be a DataArray and cannot contain missing values.\n Missing values can be replaced by ``weights.fillna(0)``.\n \"\"\"\n\n return self._weighted_cls(self, weights)\n\n def rolling(\n self,\n dim: Mapping[Any, int] = None,\n min_periods: int = None,\n center: bool | Mapping[Any, bool] = False,\n **window_kwargs: int,\n ):\n \"\"\"\n Rolling window object.\n\n Parameters\n ----------\n dim : dict, optional\n Mapping from the dimension name to create the rolling iterator\n along (e.g. `time`) to its moving window size.\n min_periods : int, default: None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : bool or mapping, default: False\n Set the labels at the center of the window.\n **window_kwargs : optional\n The keyword arguments form of ``dim``.\n One of dim or window_kwargs must be provided.\n\n Returns\n -------\n core.rolling.DataArrayRolling or core.rolling.DatasetRolling\n A rolling object (``DataArrayRolling`` for ``DataArray``,\n ``DatasetRolling`` for ``Dataset``)\n\n Examples\n --------\n Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON:\n\n >>> da = xr.DataArray(\n ... np.linspace(0, 11, num=12),\n ... coords=[\n ... pd.date_range(\n ... \"15/12/1999\",\n ... periods=12,\n ... freq=pd.DateOffset(months=1),\n ... )\n ... ],\n ... dims=\"time\",\n ... )\n >>> da\n <xarray.DataArray (time: 12)>\n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 ... 2000-11-15\n >>> da.rolling(time=3, center=True).mean()\n <xarray.DataArray (time: 12)>\n array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 ... 2000-11-15\n\n Remove the NaNs using ``dropna()``:\n\n >>> da.rolling(time=3, center=True).mean().dropna(\"time\")\n <xarray.DataArray (time: 10)>\n array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-15 2000-02-15 ... 2000-10-15\n\n See Also\n --------\n core.rolling.DataArrayRolling\n core.rolling.DatasetRolling\n \"\"\"\n\n dim = either_dict_or_kwargs(dim, window_kwargs, \"rolling\")\n return self._rolling_cls(self, dim, min_periods=min_periods, center=center)\n\n def rolling_exp(\n self,\n window: Mapping[Any, int] = None,\n window_type: str = \"span\",\n **window_kwargs,\n ):\n \"\"\"\n Exponentially-weighted moving window.\n Similar to EWM in pandas\n\n Requires the optional Numbagg dependency.\n\n Parameters\n ----------\n window : mapping of hashable to int, optional\n A mapping from the name of the dimension to create the rolling\n exponential window along (e.g. `time`) to the size of the moving window.\n window_type : {\"span\", \"com\", \"halflife\", \"alpha\"}, default: \"span\"\n The format of the previously supplied window. Each is a simple\n numerical transformation of the others. Described in detail:\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html\n **window_kwargs : optional\n The keyword arguments form of ``window``.\n One of window or window_kwargs must be provided.\n\n See Also\n --------\n core.rolling_exp.RollingExp\n \"\"\"\n\n if \"keep_attrs\" in window_kwargs:\n warnings.warn(\n \"Passing ``keep_attrs`` to ``rolling_exp`` has no effect. Pass\"\n \" ``keep_attrs`` directly to the applied function, e.g.\"\n \" ``rolling_exp(...).mean(keep_attrs=False)``.\"\n )\n\n window = either_dict_or_kwargs(window, window_kwargs, \"rolling_exp\")\n\n return RollingExp(self, window, window_type)\n\n def coarsen(\n self,\n dim: Mapping[Any, int] = None,\n boundary: str = \"exact\",\n side: str | Mapping[Any, str] = \"left\",\n coord_func: str = \"mean\",\n **window_kwargs: int,\n ):\n \"\"\"\n Coarsen object.\n\n Parameters\n ----------\n dim : mapping of hashable to int, optional\n Mapping from the dimension name to the window size.\n boundary : {\"exact\", \"trim\", \"pad\"}, default: \"exact\"\n If 'exact', a ValueError will be raised if dimension size is not a\n multiple of the window size. If 'trim', the excess entries are\n dropped. If 'pad', NA will be padded.\n side : {\"left\", \"right\"} or mapping of str to {\"left\", \"right\"}\n coord_func : str or mapping of hashable to str, default: \"mean\"\n function (name) that is applied to the coordinates,\n or a mapping from coordinate name to function (name).\n\n Returns\n -------\n core.rolling.DataArrayCoarsen or core.rolling.DatasetCoarsen\n A coarsen object (``DataArrayCoarsen`` for ``DataArray``,\n ``DatasetCoarsen`` for ``Dataset``)\n\n Examples\n --------\n Coarsen the long time series by averaging over every four days.\n\n >>> da = xr.DataArray(\n ... np.linspace(0, 364, num=364),\n ... dims=\"time\",\n ... coords={\"time\": pd.date_range(\"15/12/1999\", periods=364)},\n ... )\n >>> da # +doctest: ELLIPSIS\n <xarray.DataArray (time: 364)>\n array([ 0. , 1.00275482, 2.00550964, 3.00826446,\n 4.01101928, 5.0137741 , 6.01652893, 7.01928375,\n 8.02203857, 9.02479339, 10.02754821, 11.03030303,\n ...\n 356.98071625, 357.98347107, 358.9862259 , 359.98898072,\n 360.99173554, 361.99449036, 362.99724518, 364. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12\n >>> da.coarsen(time=3, boundary=\"trim\").mean() # +doctest: ELLIPSIS\n <xarray.DataArray (time: 121)>\n array([ 1.00275482, 4.01101928, 7.01928375, 10.02754821,\n 13.03581267, 16.04407713, 19.0523416 , 22.06060606,\n 25.06887052, 28.07713499, 31.08539945, 34.09366391,\n ...\n 349.96143251, 352.96969697, 355.97796143, 358.9862259 ,\n 361.99449036])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10\n >>>\n\n See Also\n --------\n core.rolling.DataArrayCoarsen\n core.rolling.DatasetCoarsen\n \"\"\"\n\n dim = either_dict_or_kwargs(dim, window_kwargs, \"coarsen\")\n return self._coarsen_cls(\n self,\n dim,\n boundary=boundary,\n side=side,\n coord_func=coord_func,\n )\n\n def resample(\n self,\n indexer: Mapping[Any, str] = None,\n skipna=None,\n closed: str = None,\n label: str = None,\n base: int = 0,\n keep_attrs: bool = None,\n loffset=None,\n restore_coord_dims: bool = None,\n **indexer_kwargs: str,\n ):\n \"\"\"Returns a Resample object for performing resampling operations.\n\n Handles both downsampling and upsampling. The resampled\n dimension must be a datetime-like coordinate. If any intervals\n contain no values from the original object, they will be given\n the value ``NaN``.\n\n Parameters\n ----------\n indexer : {dim: freq}, optional\n Mapping from the dimension name to resample frequency [1]_. The\n dimension must be datetime-like.\n skipna : bool, optional\n Whether to skip missing values when aggregating in downsampling.\n closed : {\"left\", \"right\"}, optional\n Side of each interval to treat as closed.\n label : {\"left\", \"right\"}, optional\n Side of each interval to use for labeling.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for \"24H\" frequency, base could\n range from 0 through 23.\n loffset : timedelta or str, optional\n Offset used to adjust the resampled time labels. Some pandas date\n offset strings are supported.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n **indexer_kwargs : {dim: freq}\n The keyword arguments form of ``indexer``.\n One of indexer or indexer_kwargs must be provided.\n\n Returns\n -------\n resampled : same type as caller\n This object resampled.\n\n Examples\n --------\n Downsample monthly time-series data to seasonal data:\n\n >>> da = xr.DataArray(\n ... np.linspace(0, 11, num=12),\n ... coords=[\n ... pd.date_range(\n ... \"15/12/1999\",\n ... periods=12,\n ... freq=pd.DateOffset(months=1),\n ... )\n ... ],\n ... dims=\"time\",\n ... )\n >>> da\n <xarray.DataArray (time: 12)>\n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 ... 2000-11-15\n >>> da.resample(time=\"QS-DEC\").mean()\n <xarray.DataArray (time: 4)>\n array([ 1., 4., 7., 10.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-01 2000-03-01 2000-06-01 2000-09-01\n\n Upsample monthly time-series data to daily data:\n\n >>> da.resample(time=\"1D\").interpolate(\"linear\") # +doctest: ELLIPSIS\n <xarray.DataArray (time: 337)>\n array([ 0. , 0.03225806, 0.06451613, 0.09677419, 0.12903226,\n 0.16129032, 0.19354839, 0.22580645, 0.25806452, 0.29032258,\n 0.32258065, 0.35483871, 0.38709677, 0.41935484, 0.4516129 ,\n ...\n 10.80645161, 10.83870968, 10.87096774, 10.90322581, 10.93548387,\n 10.96774194, 11. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15\n\n Limit scope of upsampling method\n\n >>> da.resample(time=\"1D\").nearest(tolerance=\"1D\")\n <xarray.DataArray (time: 337)>\n array([ 0., 0., nan, ..., nan, 11., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15\n\n See Also\n --------\n pandas.Series.resample\n pandas.DataFrame.resample\n\n References\n ----------\n .. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases\n \"\"\"\n # TODO support non-string indexer after removing the old API.\n\n from ..coding.cftimeindex import CFTimeIndex\n from .dataarray import DataArray\n from .resample import RESAMPLE_DIM\n\n if keep_attrs is not None:\n warnings.warn(\n \"Passing ``keep_attrs`` to ``resample`` has no effect and will raise an\"\n \" error in xarray 0.20. Pass ``keep_attrs`` directly to the applied\"\n \" function, e.g. ``resample(...).mean(keep_attrs=True)``.\"\n )\n\n # note: the second argument (now 'skipna') use to be 'dim'\n if (\n (skipna is not None and not isinstance(skipna, bool))\n or (\"how\" in indexer_kwargs and \"how\" not in self.dims)\n or (\"dim\" in indexer_kwargs and \"dim\" not in self.dims)\n ):\n raise TypeError(\n \"resample() no longer supports the `how` or \"\n \"`dim` arguments. Instead call methods on resample \"\n \"objects, e.g., data.resample(time='1D').mean()\"\n )\n\n indexer = either_dict_or_kwargs(indexer, indexer_kwargs, \"resample\")\n if len(indexer) != 1:\n raise ValueError(\"Resampling only supported along single dimensions.\")\n dim, freq = next(iter(indexer.items()))\n\n dim_name = dim\n dim_coord = self[dim]\n\n # TODO: remove once pandas=1.1 is the minimum required version\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\",\n r\"'(base|loffset)' in .resample\\(\\) and in Grouper\\(\\) is deprecated.\",\n category=FutureWarning,\n )\n\n # TODO (benbovy - flexible indexes): update when CFTimeIndex is an xarray Index subclass\n if isinstance(self.xindexes[dim_name].to_pandas_index(), CFTimeIndex):\n from .resample_cftime import CFTimeGrouper\n\n grouper = CFTimeGrouper(freq, closed, label, base, loffset)\n else:\n grouper = pd.Grouper(\n freq=freq, closed=closed, label=label, base=base, loffset=loffset\n )\n group = DataArray(\n dim_coord, coords=dim_coord.coords, dims=dim_coord.dims, name=RESAMPLE_DIM\n )\n resampler = self._resample_cls(\n self,\n group=group,\n dim=dim_name,\n grouper=grouper,\n resample_dim=RESAMPLE_DIM,\n restore_coord_dims=restore_coord_dims,\n )\n\n return resampler\n\n def where(self, cond, other=dtypes.NA, drop: bool = False):\n \"\"\"Filter elements from this object according to a condition.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic.\n\n Parameters\n ----------\n cond : DataArray, Dataset, or callable\n Locations at which to preserve this object's values. dtype must be `bool`.\n If a callable, it must expect this object as its only parameter.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, these locations filled with NA.\n drop : bool, optional\n If True, coordinate labels that only correspond to False values of\n the condition are dropped from the result. Mutually exclusive with\n ``other``.\n\n Returns\n -------\n DataArray or Dataset\n Same xarray type as caller, with dtype float64.\n\n Examples\n --------\n >>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=(\"x\", \"y\"))\n >>> a\n <xarray.DataArray (x: 5, y: 5)>\n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n Dimensions without coordinates: x, y\n\n >>> a.where(a.x + a.y < 4)\n <xarray.DataArray (x: 5, y: 5)>\n array([[ 0., 1., 2., 3., nan],\n [ 5., 6., 7., nan, nan],\n [10., 11., nan, nan, nan],\n [15., nan, nan, nan, nan],\n [nan, nan, nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n >>> a.where(a.x + a.y < 5, -1)\n <xarray.DataArray (x: 5, y: 5)>\n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, -1],\n [10, 11, 12, -1, -1],\n [15, 16, -1, -1, -1],\n [20, -1, -1, -1, -1]])\n Dimensions without coordinates: x, y\n\n >>> a.where(a.x + a.y < 4, drop=True)\n <xarray.DataArray (x: 4, y: 4)>\n array([[ 0., 1., 2., 3.],\n [ 5., 6., 7., nan],\n [10., 11., nan, nan],\n [15., nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n >>> a.where(lambda x: x.x + x.y < 4, drop=True)\n <xarray.DataArray (x: 4, y: 4)>\n array([[ 0., 1., 2., 3.],\n [ 5., 6., 7., nan],\n [10., 11., nan, nan],\n [15., nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n See Also\n --------\n numpy.where : corresponding numpy function\n where : equivalent function\n \"\"\"\n from .alignment import align\n from .dataarray import DataArray\n from .dataset import Dataset\n\n if callable(cond):\n cond = cond(self)\n\n if drop:\n if other is not dtypes.NA:\n raise ValueError(\"cannot set `other` if drop=True\")\n\n if not isinstance(cond, (Dataset, DataArray)):\n raise TypeError(\n f\"cond argument is {cond!r} but must be a {Dataset!r} or {DataArray!r}\"\n )\n\n # align so we can use integer indexing\n self, cond = align(self, cond)\n\n # get cond with the minimal size needed for the Dataset\n if isinstance(cond, Dataset):\n clipcond = cond.to_array().any(\"variable\")\n else:\n clipcond = cond\n\n # clip the data corresponding to coordinate dims that are not used\n nonzeros = zip(clipcond.dims, np.nonzero(clipcond.values))\n indexers = {k: np.unique(v) for k, v in nonzeros}\n\n self = self.isel(**indexers)\n cond = cond.isel(**indexers)\n\n return ops.where_method(self, cond, other)\n\n def set_close(self, close: Callable[[], None] | None) -> None:\n \"\"\"Register the function that releases any resources linked to this object.\n\n This method controls how xarray cleans up resources associated\n with this object when the ``.close()`` method is called. It is mostly\n intended for backend developers and it is rarely needed by regular\n end-users.\n\n Parameters\n ----------\n close : callable\n The function that when called like ``close()`` releases\n any resources linked to this object.\n \"\"\"\n self._close = close\n\n def close(self: Any) -> None:\n \"\"\"Release any resources linked to this object.\"\"\"\n if self._close is not None:\n self._close()\n self._close = None\n\n def isnull(self, keep_attrs: bool = None):\n \"\"\"Test each value in the array for whether it is a missing value.\n\n Returns\n -------\n isnull : DataArray or Dataset\n Same type and shape as object, but the dtype of the data is bool.\n\n See Also\n --------\n pandas.isnull\n\n Examples\n --------\n >>> array = xr.DataArray([1, np.nan, 3], dims=\"x\")\n >>> array\n <xarray.DataArray (x: 3)>\n array([ 1., nan, 3.])\n Dimensions without coordinates: x\n >>> array.isnull()\n <xarray.DataArray (x: 3)>\n array([False, True, False])\n Dimensions without coordinates: x\n \"\"\"\n from .computation import apply_ufunc\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n return apply_ufunc(\n duck_array_ops.isnull,\n self,\n dask=\"allowed\",\n keep_attrs=keep_attrs,\n )\n\n def notnull(self, keep_attrs: bool = None):\n \"\"\"Test each value in the array for whether it is not a missing value.\n\n Returns\n -------\n notnull : DataArray or Dataset\n Same type and shape as object, but the dtype of the data is bool.\n\n See Also\n --------\n pandas.notnull\n\n Examples\n --------\n >>> array = xr.DataArray([1, np.nan, 3], dims=\"x\")\n >>> array\n <xarray.DataArray (x: 3)>\n array([ 1., nan, 3.])\n Dimensions without coordinates: x\n >>> array.notnull()\n <xarray.DataArray (x: 3)>\n array([ True, False, True])\n Dimensions without coordinates: x\n \"\"\"\n from .computation import apply_ufunc\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n return apply_ufunc(\n duck_array_ops.notnull,\n self,\n dask=\"allowed\",\n keep_attrs=keep_attrs,\n )\n\n def isin(self, test_elements):\n \"\"\"Tests each value in the array for whether it is in test elements.\n\n Parameters\n ----------\n test_elements : array_like\n The values against which to test each value of `element`.\n This argument is flattened if an array or array_like.\n See numpy notes for behavior with non-array-like parameters.\n\n Returns\n -------\n isin : DataArray or Dataset\n Has the same type and shape as this object, but with a bool dtype.\n\n Examples\n --------\n >>> array = xr.DataArray([1, 2, 3], dims=\"x\")\n >>> array.isin([1, 3])\n <xarray.DataArray (x: 3)>\n array([ True, False, True])\n Dimensions without coordinates: x\n\n See Also\n --------\n numpy.isin\n \"\"\"\n from .computation import apply_ufunc\n from .dataarray import DataArray\n from .dataset import Dataset\n from .variable import Variable\n\n if isinstance(test_elements, Dataset):\n raise TypeError(\n \"isin() argument must be convertible to an array: {}\".format(\n test_elements\n )\n )\n elif isinstance(test_elements, (Variable, DataArray)):\n # need to explicitly pull out data to support dask arrays as the\n # second argument\n test_elements = test_elements.data\n\n return apply_ufunc(\n duck_array_ops.isin,\n self,\n kwargs=dict(test_elements=test_elements),\n dask=\"allowed\",\n )\n\n def astype(\n self: T,\n dtype,\n *,\n order=None,\n casting=None,\n subok=None,\n copy=None,\n keep_attrs=True,\n ) -> T:\n \"\"\"\n Copy of the xarray object, with data cast to a specified type.\n Leaves coordinate dtype unchanged.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n order : {'C', 'F', 'A', 'K'}, optional\n Controls the memory layout order of the result. ‘C’ means C order,\n ‘F’ means Fortran order, ‘A’ means ‘F’ order if all the arrays are\n Fortran contiguous, ‘C’ order otherwise, and ‘K’ means as close to\n the order the array elements appear in memory as possible.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise the\n returned array will be forced to be a base-class array.\n copy : bool, optional\n By default, astype always returns a newly allocated array. If this\n is set to False and the `dtype` requirement is satisfied, the input\n array is returned instead of a copy.\n keep_attrs : bool, optional\n By default, astype keeps attributes. Set to False to remove\n attributes in the returned object.\n\n Returns\n -------\n out : same as object\n New object with data cast to the specified type.\n\n Notes\n -----\n The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed\n through to the ``astype`` method of the underlying array when a value\n different than ``None`` is supplied.\n Make sure to only supply these arguments if the underlying array class\n supports them.\n\n See Also\n --------\n numpy.ndarray.astype\n dask.array.Array.astype\n sparse.COO.astype\n \"\"\"\n from .computation import apply_ufunc\n\n kwargs = dict(order=order, casting=casting, subok=subok, copy=copy)\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n\n return apply_ufunc(\n duck_array_ops.astype,\n self,\n dtype,\n kwargs=kwargs,\n keep_attrs=keep_attrs,\n dask=\"allowed\",\n )\n\n def __enter__(self: T) -> T:\n return self\n\n def __exit__(self, exc_type, exc_value, traceback) -> None:\n self.close()\n\n def __getitem__(self, value):\n # implementations of this class should implement this method\n raise NotImplementedError()\n\n\n@overload\ndef full_like(\n other: Dataset,\n fill_value,\n dtype: DTypeLike | Mapping[Any, DTypeLike] = None,\n) -> Dataset:\n ...\n\n\n@overload\ndef full_like(other: DataArray, fill_value, dtype: DTypeLike = None) -> DataArray:\n ...\n\n\n@overload\ndef full_like(other: Variable, fill_value, dtype: DTypeLike = None) -> Variable:\n ...\n\n\ndef full_like(other, fill_value, dtype=None):\n \"\"\"Return a new object with the same shape and type as a given object.\n\n Parameters\n ----------\n other : DataArray, Dataset or Variable\n The reference object in input\n fill_value : scalar or dict-like\n Value to fill the new object with before returning it. If\n other is a Dataset, may also be a dict-like mapping data\n variables to fill values.\n dtype : dtype or dict-like of dtype, optional\n dtype of the new array. If a dict-like, maps dtypes to\n variables. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object with the same shape and type as other, with the data\n filled with fill_value. Coords will be copied from other.\n If other is based on dask, the new one will be as well, and will be\n split in the same chunks.\n\n Examples\n --------\n >>> x = xr.DataArray(\n ... np.arange(6).reshape(2, 3),\n ... dims=[\"lat\", \"lon\"],\n ... coords={\"lat\": [1, 2], \"lon\": [0, 1, 2]},\n ... )\n >>> x\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 1)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[1, 1, 1],\n [1, 1, 1]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 0.5)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 0, 0],\n [0, 0, 0]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 0.5, dtype=np.double)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0.5, 0.5, 0.5],\n [0.5, 0.5, 0.5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, np.nan, dtype=np.double)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[nan, nan, nan],\n [nan, nan, nan]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> ds = xr.Dataset(\n ... {\"a\": (\"x\", [3, 5, 2]), \"b\": (\"x\", [9, 1, 0])}, coords={\"x\": [2, 4, 6]}\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 3)\n Coordinates:\n * x (x) int64 2 4 6\n Data variables:\n a (x) int64 3 5 2\n b (x) int64 9 1 0\n >>> xr.full_like(ds, fill_value={\"a\": 1, \"b\": 2})\n <xarray.Dataset>\n Dimensions: (x: 3)\n Coordinates:\n * x (x) int64 2 4 6\n Data variables:\n a (x) int64 1 1 1\n b (x) int64 2 2 2\n >>> xr.full_like(ds, fill_value={\"a\": 1, \"b\": 2}, dtype={\"a\": bool, \"b\": float})\n <xarray.Dataset>\n Dimensions: (x: 3)\n Coordinates:\n * x (x) int64 2 4 6\n Data variables:\n a (x) bool True True True\n b (x) float64 2.0 2.0 2.0\n\n See Also\n --------\n zeros_like\n ones_like\n\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n from .variable import Variable\n\n if not is_scalar(fill_value) and not (\n isinstance(other, Dataset) and isinstance(fill_value, dict)\n ):\n raise ValueError(\n f\"fill_value must be scalar or, for datasets, a dict-like. Received {fill_value} instead.\"\n )\n\n if not isinstance(other, Dataset) and isinstance(dtype, Mapping):\n raise ValueError(\n \"'dtype' cannot be dict-like when passing a DataArray or Variable\"\n )\n\n if isinstance(other, Dataset):\n if not isinstance(fill_value, dict):\n fill_value = {k: fill_value for k in other.data_vars.keys()}\n\n if not isinstance(dtype, Mapping):\n dtype_ = {k: dtype for k in other.data_vars.keys()}\n else:\n dtype_ = dtype\n\n data_vars = {\n k: _full_like_variable(v, fill_value.get(k, dtypes.NA), dtype_.get(k, None))\n for k, v in other.data_vars.items()\n }\n return Dataset(data_vars, coords=other.coords, attrs=other.attrs)\n elif isinstance(other, DataArray):\n return DataArray(\n _full_like_variable(other.variable, fill_value, dtype),\n dims=other.dims,\n coords=other.coords,\n attrs=other.attrs,\n name=other.name,\n )\n elif isinstance(other, Variable):\n return _full_like_variable(other, fill_value, dtype)\n else:\n raise TypeError(\"Expected DataArray, Dataset, or Variable\")\n\n\ndef _full_like_variable(other, fill_value, dtype: DTypeLike = None):\n \"\"\"Inner function of full_like, where other must be a variable\"\"\"\n from .variable import Variable\n\n if fill_value is dtypes.NA:\n fill_value = dtypes.get_fill_value(dtype if dtype is not None else other.dtype)\n\n if is_duck_dask_array(other.data):\n import dask.array\n\n if dtype is None:\n dtype = other.dtype\n data = dask.array.full(\n other.shape, fill_value, dtype=dtype, chunks=other.data.chunks\n )\n else:\n data = np.full_like(other.data, fill_value, dtype=dtype)\n\n return Variable(dims=other.dims, data=data, attrs=other.attrs)\n\n\ndef zeros_like(other, dtype: DTypeLike = None):\n \"\"\"Return a new object of zeros with the same shape and\n type as a given dataarray or dataset.\n\n Parameters\n ----------\n other : DataArray, Dataset or Variable\n The reference object. The output will have the same dimensions and coordinates as this object.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : DataArray, Dataset or Variable\n New object of zeros with the same shape and type as other.\n\n Examples\n --------\n >>> x = xr.DataArray(\n ... np.arange(6).reshape(2, 3),\n ... dims=[\"lat\", \"lon\"],\n ... coords={\"lat\": [1, 2], \"lon\": [0, 1, 2]},\n ... )\n >>> x\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.zeros_like(x)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 0, 0],\n [0, 0, 0]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.zeros_like(x, dtype=float)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0., 0., 0.],\n [0., 0., 0.]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See Also\n --------\n ones_like\n full_like\n\n \"\"\"\n return full_like(other, 0, dtype)\n\n\ndef ones_like(other, dtype: DTypeLike = None):\n \"\"\"Return a new object of ones with the same shape and\n type as a given dataarray or dataset.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object. The output will have the same dimensions and coordinates as this object.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object of ones with the same shape and type as other.\n\n Examples\n --------\n >>> x = xr.DataArray(\n ... np.arange(6).reshape(2, 3),\n ... dims=[\"lat\", \"lon\"],\n ... coords={\"lat\": [1, 2], \"lon\": [0, 1, 2]},\n ... )\n >>> x\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.ones_like(x)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[1, 1, 1],\n [1, 1, 1]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See Also\n --------\n zeros_like\n full_like\n\n \"\"\"\n return full_like(other, 1, dtype)\n\n\ndef get_chunksizes(\n variables: Iterable[Variable],\n) -> Mapping[Any, tuple[int, ...]]:\n\n chunks: dict[Any, tuple[int, ...]] = {}\n for v in variables:\n if hasattr(v.data, \"chunks\"):\n for dim, c in v.chunksizes.items():\n if dim in chunks and c != chunks[dim]:\n raise ValueError(\n f\"Object has inconsistent chunks along dimension {dim}. \"\n \"This can be fixed by calling unify_chunks().\"\n )\n chunks[dim] = c\n return Frozen(chunks)\n\n\ndef is_np_datetime_like(dtype: DTypeLike) -> bool:\n \"\"\"Check if a dtype is a subclass of the numpy datetime types\"\"\"\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)\n\n\ndef is_np_timedelta_like(dtype: DTypeLike) -> bool:\n \"\"\"Check whether dtype is of the timedelta64 dtype.\"\"\"\n return np.issubdtype(dtype, np.timedelta64)\n\n\ndef _contains_cftime_datetimes(array) -> bool:\n \"\"\"Check if an array contains cftime.datetime objects\"\"\"\n if cftime is None:\n return False\n else:\n if array.dtype == np.dtype(\"O\") and array.size > 0:\n sample = array.ravel()[0]\n if is_duck_dask_array(sample):\n sample = sample.compute()\n if isinstance(sample, np.ndarray):\n sample = sample.item()\n return isinstance(sample, cftime.datetime)\n else:\n return False\n\n\ndef contains_cftime_datetimes(var) -> bool:\n \"\"\"Check if an xarray.Variable contains cftime.datetime objects\"\"\"\n return _contains_cftime_datetimes(var.data)\n\n\ndef _contains_datetime_like_objects(var) -> bool:\n \"\"\"Check if a variable contains datetime like objects (either\n np.datetime64, np.timedelta64, or cftime.datetime)\n \"\"\"\n return is_np_datetime_like(var.dtype) or contains_cftime_datetimes(var)\n"
]
| [
[
"numpy.asarray",
"pandas.Grouper",
"numpy.nonzero",
"numpy.issubdtype",
"numpy.dtype",
"numpy.unique",
"numpy.full_like"
]
]
|
dsctt/ray | [
"29d94a22114b02adfd3745c4991a3ce70592dd16"
]
| [
"python/ray/serve/tests/test_util.py"
]
| [
"import json\n\nimport numpy as np\nimport pytest\n\nfrom ray.serve.utils import ServeEncoder\n\n\ndef test_bytes_encoder():\n data_before = {\"inp\": {\"nest\": b\"bytes\"}}\n data_after = {\"inp\": {\"nest\": \"bytes\"}}\n assert json.loads(json.dumps(data_before, cls=ServeEncoder)) == data_after\n\n\ndef test_numpy_encoding():\n data = [1, 2]\n floats = np.array(data).astype(np.float32)\n ints = floats.astype(np.int32)\n uints = floats.astype(np.uint32)\n\n assert json.loads(json.dumps(floats, cls=ServeEncoder)) == data\n assert json.loads(json.dumps(ints, cls=ServeEncoder)) == data\n assert json.loads(json.dumps(uints, cls=ServeEncoder)) == data\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main([\"-v\", \"-s\", __file__]))\n"
]
| [
[
"numpy.array"
]
]
|
knu2xs/deep-learning-population-density | [
"7d359920b708c8fd0c03c2c44b1b3eb6224ec3c6"
]
| [
"src/deep_learning_population_density/__init__.py"
]
| [
"__title__ = 'deep-learning-population-density'\n__version__ = '0.0.0'\n__author__ = 'Esri Advanced Analytics'\n__license__ = 'Apache 2.0'\n__copyright__ = 'Copyright 2020 by Esri Advanced Analytics'\n\n__all__ = ['example_function', 'ExampleObject']\n\n# add specific imports below if you want to organize your code into modules, which is mostly what I do\n## from . import utils\n\nfrom typing import Union\nfrom pathlib import Path\n\nimport pandas as pd\n\n\ndef example_function(in_path: Union[str, Path]) -> pd.DataFrame:\n \"\"\"\n This is an example function, mostly to provide a template for properly\n structuring a function and docstring for both you, and also for myself,\n since I *almost always* have to look this up, and it's a *lot* easier\n for it to be already templated.\n\n Args:\n in_path: Required path to something you really care about, or at least\n want to exploit, a really big word used to simply say, *use*.\n\n Returns:\n Hypothetically, a Pandas Dataframe. Good luck with that.\n\n .. code-block:: python\n\n from deep_learning_population_density import example_function\n\n pth = r'C:/path/to/some/table.csv'\n\n df = example_function(pth)\n \"\"\"\n return pd.read_csv(in_path)\n\n\nclass ExampleObject(object):\n \"\"\"\n This is an example object, mostly to provide a template for properly\n structuring a function and docstring for both you, and also for myself,\n since I *almost always* have to look this up, and it's a *lot* easier\n for it to be already templated.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n\n # is not applicable in all cases, but I always have to look it up, so it is here for simplicity's sake\n super().__init__(*args, **kwargs)\n\n @staticmethod\n def example_static_function(in_path: Union[str, Path]) -> pd.DataFrame:\n \"\"\"\n This is an example function, mostly to provide a template for properly\n structuring a function and docstring for both you, and also for myself,\n since I *almost always* have to look this up, and it's a *lot* easier\n for it to be already templated.\n\n Args:\n in_path: Required path to something you really care about, or at least\n want to exploit, a really big word used to simply say, *use*.\n\n Returns:\n Hypothetically, a Pandas Dataframe. Good luck with that.\n\n .. code-block:: python\n\n from deep_learning_population_density import ExampleObject\n\n pth = r'C:/path/to/some/table.csv'\n\n df = ExampleObject.example_function(pth)\n \"\"\"\n return pd.read_csv(in_path)\n\n @classmethod\n def example_class_method(cls):\n \"\"\"\n Class methods prove really useful for when you need a method to\n return an instance of the parent class. Again, I usually have to\n search for how to do this, so I also just put it in here.\n\n Returns:\n An instance of the class, duh!\n\n .. code-block:: python\n\n from from deep_learning_population_density import ExampleObject\n\n pth = r'C:/path/to/some/table.csv'\n\n obj_inst = ExampleObject.example_class_method()\n\n df = obj_inst.example_function(pth)\n \"\"\"\n return cls()\n"
]
| [
[
"pandas.read_csv"
]
]
|
robfiras/mushroom-rl | [
"b04026fdb6aa0ea4878a410f8b745f28e71d4fef"
]
| [
"mushroom_rl/algorithms/value/dqn/rainbow.py"
]
| [
"from copy import deepcopy\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mushroom_rl.algorithms.value.dqn import AbstractDQN\nfrom mushroom_rl.algorithms.value.dqn.categorical_dqn import categorical_loss\nfrom mushroom_rl.algorithms.value.dqn.noisy_dqn import NoisyNetwork\nfrom mushroom_rl.approximators.parametric.torch_approximator import *\nfrom mushroom_rl.utils.replay_memory import PrioritizedReplayMemory\n\n\nclass RainbowNetwork(nn.Module):\n def __init__(self, input_shape, output_shape, features_network, n_atoms,\n v_min, v_max, n_features, use_cuda, sigma_coeff, **kwargs):\n super().__init__()\n\n self._n_output = output_shape[0]\n self._phi = features_network(input_shape, (n_features,),\n n_features=n_features, **kwargs)\n self._n_atoms = n_atoms\n self._v_min = v_min\n self._v_max = v_max\n\n delta = (self._v_max - self._v_min) / (self._n_atoms - 1)\n self._a_values = torch.arange(self._v_min, self._v_max + delta, delta)\n if use_cuda:\n self._a_values = self._a_values.cuda()\n\n self._pv = NoisyNetwork.NoisyLinear(n_features, n_atoms, use_cuda, sigma_coeff)\n self._pa = nn.ModuleList(\n [NoisyNetwork.NoisyLinear(n_features, n_atoms, use_cuda, sigma_coeff) for _ in range(self._n_output)])\n\n def forward(self, state, action=None, get_distribution=False):\n features = self._phi(state)\n\n a_pv = self._pv(features)\n a_pa = [self._pa[i](features) for i in range(self._n_output)]\n a_pa = torch.stack(a_pa, dim=1)\n a_pv = a_pv.unsqueeze(1).repeat(1, self._n_output, 1)\n mean_a_pa = a_pa.mean(1, keepdim=True).repeat(1, self._n_output, 1)\n softmax = F.softmax(a_pv + a_pa - mean_a_pa, dim=-1)\n\n if not get_distribution:\n q = torch.empty(softmax.shape[:-1])\n for i in range(softmax.shape[0]):\n q[i] = softmax[i] @ self._a_values\n\n if action is not None:\n return torch.squeeze(q.gather(1, action))\n else:\n return q\n else:\n if action is not None:\n action = torch.unsqueeze(\n action.long(), 2).repeat(1, 1, self._n_atoms)\n\n return torch.squeeze(softmax.gather(1, action))\n else:\n return softmax\n\n\nclass Rainbow(AbstractDQN):\n \"\"\"\n Rainbow algorithm.\n \"Rainbow: Combinining Improvements in Deep Reinforcement Learning\".\n Hessel M. et al.. 2018.\n\n \"\"\"\n def __init__(self, mdp_info, policy, approximator_params, n_atoms, v_min,\n v_max, n_steps_return, alpha_coeff, beta, sigma_coeff=.5,\n **params):\n \"\"\"\n Constructor.\n\n Args:\n n_atoms (int): number of atoms;\n v_min (float): minimum value of value-function;\n v_max (float): maximum value of value-function;\n n_steps_return (int): the number of steps to consider to compute the n-return;\n alpha_coeff (float): prioritization exponent for prioritized experience replay;\n beta (Parameter): importance sampling coefficient for prioritized experience replay;\n sigma_coeff (float, .5): sigma0 coefficient for noise initialization in noisy layers.\n\n \"\"\"\n features_network = approximator_params['network']\n params['approximator_params'] = deepcopy(approximator_params)\n params['approximator_params']['network'] = RainbowNetwork\n params['approximator_params']['features_network'] = features_network\n params['approximator_params']['n_atoms'] = n_atoms\n params['approximator_params']['v_min'] = v_min\n params['approximator_params']['v_max'] = v_max\n params['approximator_params']['sigma_coeff'] = sigma_coeff\n params['approximator_params']['loss'] = categorical_loss\n\n self._n_atoms = n_atoms\n self._v_min = v_min\n self._v_max = v_max\n self._delta = (v_max - v_min) / (n_atoms - 1)\n self._a_values = np.arange(v_min, v_max + self._delta, self._delta)\n self._n_steps_return = n_steps_return\n self._sigma_coeff = sigma_coeff\n\n params['replay_memory'] = PrioritizedReplayMemory(\n params['initial_replay_size'], params['max_replay_size'], alpha=alpha_coeff,\n beta=beta\n )\n\n self._add_save_attr(\n _n_atoms='primitive',\n _v_min='primitive',\n _v_max='primitive',\n _delta='primitive',\n _a_values='numpy',\n _n_steps_return='primitive',\n _sigma_coeff='primitive'\n )\n\n super().__init__(mdp_info, policy, TorchApproximator, **params)\n\n def fit(self, dataset):\n self._replay_memory.add(dataset, np.ones(len(dataset)) * self._replay_memory.max_priority,\n n_steps_return=self._n_steps_return, gamma=self.mdp_info.gamma)\n if self._replay_memory.initialized:\n state, action, reward, next_state, absorbing, _, idxs, is_weight = \\\n self._replay_memory.get(self._batch_size())\n\n if self._clip_reward:\n reward = np.clip(reward, -1, 1)\n\n q_next = self.approximator.predict(next_state, **self._predict_params)\n a_max = np.argmax(q_next, axis=1)\n gamma = self.mdp_info.gamma ** self._n_steps_return * (1 - absorbing)\n p_next = self.target_approximator.predict(next_state, a_max,\n get_distribution=True, **self._predict_params)\n gamma_z = gamma.reshape(-1, 1) * np.expand_dims(\n self._a_values, 0).repeat(len(gamma), 0)\n bell_a = (reward.reshape(-1, 1) + gamma_z).clip(self._v_min,\n self._v_max)\n\n b = (bell_a - self._v_min) / self._delta\n l = np.floor(b).astype(int)\n u = np.ceil(b).astype(int)\n\n m = np.zeros((self._batch_size.get_value(), self._n_atoms))\n for i in range(self._n_atoms):\n l[:, i][(u[:, i] > 0) * (l[:, i] == u[:, i])] -= 1\n u[:, i][(l[:, i] < (self._n_atoms - 1)) * (l[:, i] == u[:, i])] += 1\n\n m[np.arange(len(m)), l[:, i]] += p_next[:, i] * (u[:, i] - b[:, i])\n m[np.arange(len(m)), u[:, i]] += p_next[:, i] * (b[:, i] - l[:, i])\n\n kl = -np.sum(m * np.log(self.approximator.predict(state, action, get_distribution=True,\n **self._predict_params).clip(1e-5)), 1)\n self._replay_memory.update(kl, idxs)\n\n self.approximator.fit(state, action, m, weights=is_weight,\n get_distribution=True, **self._fit_params)\n\n self._n_updates += 1\n\n if self._n_updates % self._target_update_frequency == 0:\n self._update_target()\n"
]
| [
[
"torch.nn.functional.softmax"
]
]
|
bryanwweber/poliastro | [
"09bf683c62cc2a3454622bb8c0eb071966953a92"
]
| [
"tests/tests_twobody/test_events.py"
]
| [
"import numpy as np\nimport pytest\nfrom astropy import units as u\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.time import Time\nfrom numpy.linalg import norm\n\nfrom poliastro.bodies import Earth\nfrom poliastro.constants import H0_earth, rho0_earth\nfrom poliastro.core.events import line_of_sight\nfrom poliastro.core.perturbations import atmospheric_drag_exponential\nfrom poliastro.core.propagation import func_twobody\nfrom poliastro.twobody import Orbit\nfrom poliastro.twobody.events import (\n AltitudeCrossEvent,\n LatitudeCrossEvent,\n LithobrakeEvent,\n LosEvent,\n NodeCrossEvent,\n PenumbraEvent,\n UmbraEvent,\n)\nfrom poliastro.twobody.propagation import cowell\n\n\[email protected]\ndef test_altitude_crossing():\n # Test decreasing altitude cross over Earth. No analytic solution.\n R = Earth.R.to(u.km).value\n\n orbit = Orbit.circular(Earth, 230 * u.km)\n t_flight = 48.209538 * u.d\n\n # Parameters of a body\n C_D = 2.2 # Dimensionless (any value would do)\n A_over_m = ((np.pi / 4.0) * (u.m ** 2) / (100 * u.kg)).to_value(\n u.km ** 2 / u.kg\n ) # km^2/kg\n\n # Parameters of the atmosphere\n rho0 = rho0_earth.to(u.kg / u.km ** 3).value # kg/km^3\n H0 = H0_earth.to(u.km).value # km\n\n tofs = [50] * u.d\n\n thresh_alt = 50 # km\n altitude_cross_event = AltitudeCrossEvent(thresh_alt, R)\n events = [altitude_cross_event]\n\n def f(t0, u_, k):\n du_kep = func_twobody(t0, u_, k)\n ax, ay, az = atmospheric_drag_exponential(\n t0, u_, k, R=R, C_D=C_D, A_over_m=A_over_m, H0=H0, rho0=rho0\n )\n du_ad = np.array([0, 0, 0, ax, ay, az])\n return du_kep + du_ad\n\n rr, _ = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n events=events,\n f=f,\n )\n\n assert_quantity_allclose(norm(rr[0].to(u.km).value) - thresh_alt, R)\n assert_quantity_allclose(altitude_cross_event.last_t, t_flight, rtol=1e-2)\n\n\ndef test_altitude_cross_not_happening_is_ok():\n R = Earth.R.to(u.km).value\n\n orbit = Orbit.circular(Earth, 230 * u.km)\n\n tofs = [25] * u.d\n\n thresh_alt = 50 # km\n altitude_cross_event = AltitudeCrossEvent(thresh_alt, R)\n events = [altitude_cross_event]\n\n rr, _ = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n events=events,\n )\n\n assert altitude_cross_event.last_t == tofs[-1]\n\n\ndef test_latitude_cross_event():\n r = [-6142438.668, 3492467.56, -25767.257] << u.km\n v = [505.848, 942.781, 7435.922] << u.km / u.s\n orbit = Orbit.from_vectors(Earth, r, v)\n\n thresh_lat = 60 * u.deg\n latitude_cross_event = LatitudeCrossEvent(orbit, thresh_lat, terminal=True)\n t_lat = 1701.716842130476 * u.s\n\n tofs = [5] * u.d\n\n events = [latitude_cross_event]\n rr, _ = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n events=events,\n )\n\n assert_quantity_allclose(latitude_cross_event.last_t, t_lat)\n\n\ndef test_penumbra_event_not_triggering_is_ok():\n attractor = Earth\n tof = 100 * u.s\n r0 = np.array([281.89, 1411.473, 750.672])\n v0 = np.array([7.36138, 2.98997, 1.64354])\n orbit = Orbit.from_vectors(attractor, r0 * u.km, v0 * u.km / u.s)\n\n penumbra_event = PenumbraEvent(orbit)\n events = [penumbra_event]\n\n rr, _ = cowell(\n attractor.k,\n orbit.r,\n orbit.v,\n [tof] * u.s,\n events=events,\n )\n\n assert penumbra_event.last_t == tof\n\n\ndef test_umbra_event_not_triggering_is_ok():\n attractor = Earth\n tof = 100 * u.s\n r0 = np.array([281.89, 1411.473, 750.672])\n v0 = np.array([7.36138, 2.98997, 1.64354])\n orbit = Orbit.from_vectors(attractor, r0 * u.km, v0 * u.km / u.s)\n\n umbra_event = UmbraEvent(orbit)\n events = [umbra_event]\n\n rr, _ = cowell(\n attractor.k,\n orbit.r,\n orbit.v,\n [tof] * u.s,\n events=events,\n )\n\n assert umbra_event.last_t == tof\n\n\ndef test_umbra_event_crossing():\n expected_umbra_t = Time(\"2020-01-01 00:04:51.328\", scale=\"utc\") # From Orekit.\n attractor = Earth\n tof = 2 * u.d\n epoch = Time(\"2020-01-01\", scale=\"utc\")\n orbit = Orbit.from_classical(\n attractor=attractor,\n a=6828137.0 * u.m,\n ecc=0.0073 * u.one,\n inc=87.0 * u.deg,\n raan=20.0 * u.deg,\n argp=10.0 * u.deg,\n nu=0 * u.deg,\n epoch=epoch,\n )\n\n umbra_event = UmbraEvent(orbit, terminal=True)\n events = [umbra_event]\n\n rr, _ = cowell(\n attractor.k,\n orbit.r,\n orbit.v,\n [tof] * u.s,\n events=events,\n )\n\n assert expected_umbra_t.isclose(epoch + umbra_event.last_t, atol=1 * u.s)\n\n\ndef test_penumbra_event_crossing():\n expected_penumbra_t = Time(\"2020-01-01 00:04:26.060\", scale=\"utc\") # From Orekit.\n attractor = Earth\n tof = 2 * u.d\n epoch = Time(\"2020-01-01\", scale=\"utc\")\n orbit = Orbit.from_classical(\n attractor=attractor,\n a=6828137.0 * u.m,\n ecc=0.0073 * u.one,\n inc=87.0 * u.deg,\n raan=20.0 * u.deg,\n argp=10.0 * u.deg,\n nu=0 * u.deg,\n epoch=epoch,\n )\n\n penumbra_event = PenumbraEvent(orbit, terminal=True)\n events = [penumbra_event]\n\n rr, _ = cowell(\n attractor.k,\n orbit.r,\n orbit.v,\n [tof] * u.s,\n events=events,\n )\n\n assert expected_penumbra_t.isclose(epoch + penumbra_event.last_t, atol=1 * u.s)\n\n\ndef test_node_cross_event():\n t_node = 3.46524036 * u.s\n r = [-6142438.668, 3492467.56, -25767.257] << u.km\n v = [505.848, 942.781, 7435.922] << u.km / u.s\n orbit = Orbit.from_vectors(Earth, r, v)\n\n node_event = NodeCrossEvent(terminal=True)\n events = [node_event]\n\n tofs = [0.01, 0.1, 0.5, 0.8, 1, 3, 5, 6, 10, 15] << u.s\n rr, vv = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n events=events,\n )\n\n assert_quantity_allclose(node_event.last_t, t_node)\n\n\ndef test_node_event_equatorial_orbit():\n node_event = NodeCrossEvent(terminal=True)\n events = [node_event]\n\n r = np.array([9946.2, 1035.4, 0.0])\n v = np.array([7.0, -0.1, 0.0])\n orb = Orbit.from_vectors(Earth, r * u.km, v * u.km / u.s)\n\n tofs = [5, 10, 50] << u.s\n rr, vv = cowell(\n Earth.k,\n orb.r,\n orb.v,\n tofs,\n events=events,\n )\n\n assert_quantity_allclose(node_event.last_t, 0.0 * u.s, atol=1e-1 * u.s)\n\n\ndef test_orbit_propagation_continues_if_events_terminal_is_False():\n r = [-6142438.668, 3492467.56, -25767.257] << u.km\n v = [505.848, 942.781, 7435.922] << u.km / u.s\n orbit = Orbit.from_vectors(Earth, r, v)\n\n thresh_lat = 60 * u.deg\n # Event occurs at ~1701.7 s.\n latitude_cross_event = LatitudeCrossEvent(orbit, thresh_lat, terminal=False)\n events = [latitude_cross_event]\n\n # The last two tofs are after the detection of the event.\n tofs = [1000, 1250, 1500, 1710, 2000] << u.s\n rr, _ = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n events=events,\n )\n\n # Check position vectors don't repeat during propagation.\n assert not np.allclose(rr[-1], rr[-2])\n\n\ndef test_orbit_propagation_position_vector_does_not_repeat_if_events_terminal_is_True():\n r = [-6142438.668, 3492467.56, -25767.257] << u.km\n v = [505.848, 942.781, 7435.922] << u.km / u.s\n orbit = Orbit.from_vectors(Earth, r, v)\n\n thresh_lat = 60 * u.deg\n # Event occurs at ~1701.7 s.\n latitude_cross_event = LatitudeCrossEvent(orbit, thresh_lat, terminal=True)\n events = [latitude_cross_event]\n\n # The last two tofs are after the detection of the event.\n tofs = [1000, 1250, 1500, 1710, 2000] << u.s\n rr, _ = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n events=events,\n )\n\n # Check position vector doesn't repeat if terminal set to True.\n assert len(rr) == 4 # From the 5th tof in tofs, position vector starts repeating.\n\n\[email protected](\n \"latitude_terminal,penumbra_terminal,rr_length,t_end\",\n [\n (True, True, 4, 266.15058 * u.s),\n (True, False, 5, 305.65173 * u.s),\n (False, True, 4, 266.15058 * u.s),\n (False, False, 6, 500 * u.s),\n ],\n)\ndef test_propagation_stops_if_atleast_one_event_has_terminal_set_to_True(\n latitude_terminal, penumbra_terminal, rr_length, t_end\n):\n # Penumbra occurs at 266.15058s and latitude event occurs at 305.65173s.\n # `terminals` is for latitude event and penumbra event, in that order.\n attractor = Earth\n tofs = [50, 100, 150, 300, 400, 500] << u.s\n epoch = Time(\"2020-01-01\", scale=\"utc\")\n orbit = Orbit.from_classical(\n attractor=attractor,\n a=6828137.0 * u.m,\n ecc=0.0073 * u.one,\n inc=87.0 * u.deg,\n raan=20.0 * u.deg,\n argp=10.0 * u.deg,\n nu=0 * u.deg,\n epoch=epoch,\n )\n\n penumbra_event = PenumbraEvent(orbit, terminal=penumbra_terminal)\n\n thresh_lat = 30 * u.deg\n latitude_cross_event = LatitudeCrossEvent(\n orbit, thresh_lat, terminal=latitude_terminal\n )\n events = [penumbra_event, latitude_cross_event]\n\n rr, _ = cowell(\n attractor.k,\n orbit.r,\n orbit.v,\n tofs,\n events=events,\n )\n\n assert len(rr) == rr_length\n if penumbra_terminal:\n assert_quantity_allclose(penumbra_event.last_t, t_end)\n elif latitude_terminal and not penumbra_terminal:\n assert_quantity_allclose(latitude_cross_event.last_t, t_end)\n else:\n assert_quantity_allclose(t_end, tofs[-1])\n\n\ndef test_line_of_sight():\n # From Vallado example 5.6\n r1 = np.array([0, -4464.696, -5102.509]) << u.km\n r2 = np.array([0, 5740.323, 3189.068]) << u.km\n r_sun = np.array([122233179, -76150708, 33016374]) << u.km\n R = Earth.R.to(u.km).value\n\n los = line_of_sight(r1.value, r2.value, R)\n los_with_sun = line_of_sight(r1.value, r_sun.value, R)\n\n assert los < 0 # No LOS condition.\n assert los_with_sun >= 0 # LOS condition.\n\n\ndef test_LOS_event_raises_warning_if_norm_of_r1_less_than_attractor_radius_during_propagation():\n r2 = np.array([-500, 1500, 4012.09]) << u.km\n v2 = np.array([5021.38, -2900.7, 1000.354]) << u.km / u.s\n orbit = Orbit.from_vectors(Earth, r2, v2)\n\n tofs = [100, 500, 1000, 2000] << u.s\n # Propagate the secondary body to generate its position coordinates.\n rr, vv = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n )\n pos_coords = rr # Trajectory of the secondary body.\n\n r1 = (\n np.array([0, -5010.696, -5102.509]) << u.km\n ) # This position vectors' norm gets less than attractor radius.\n v1 = np.array([736.138, 29899.7, 164.354]) << u.km / u.s\n orb = Orbit.from_vectors(Earth, r1, v1)\n\n los_event = LosEvent(Earth, pos_coords, terminal=True)\n events = [los_event]\n tofs = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.5] << u.s\n\n with pytest.warns(UserWarning, match=\"The norm of the position vector\"):\n r, v = cowell(\n Earth.k,\n orb.r,\n orb.v,\n tofs,\n events=events,\n )\n\n\[email protected](\"ignore::UserWarning\")\ndef test_LOS_event_with_lithobrake_event_raises_warning_when_satellite_cuts_attractor():\n r2 = np.array([-500, 1500, 4012.09]) << u.km\n v2 = np.array([5021.38, -2900.7, 1000.354]) << u.km / u.s\n orbit = Orbit.from_vectors(Earth, r2, v2)\n\n tofs = [100, 500, 1000, 2000] << u.s\n # Propagate the secondary body to generate its position coordinates.\n rr, vv = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n )\n pos_coords = rr # Trajectory of the secondary body.\n\n r1 = np.array([0, -5010.696, -5102.509]) << u.km\n v1 = np.array([736.138, 2989.7, 164.354]) << u.km / u.s\n orb = Orbit.from_vectors(Earth, r1, v1)\n\n los_event = LosEvent(Earth, pos_coords, terminal=True)\n tofs = [0.003, 0.004, 0.01, 0.02, 0.03, 0.04, 0.07, 0.1, 0.2, 0.3, 0.4, 1, 3] << u.s\n\n lithobrake_event = LithobrakeEvent(Earth.R.to_value(u.km))\n events = [lithobrake_event, los_event]\n r, v = cowell(\n Earth.k,\n orb.r,\n orb.v,\n tofs,\n events=events,\n )\n\n assert lithobrake_event.last_t < los_event.last_t\n\n\ndef test_LOS_event():\n t_los = 2327.165 * u.s\n r2 = np.array([-500, 1500, 4012.09]) << u.km\n v2 = np.array([5021.38, -2900.7, 1000.354]) << u.km / u.s\n orbit = Orbit.from_vectors(Earth, r2, v2)\n\n tofs = [100, 500, 1000, 2000] << u.s\n # Propagate the secondary body to generate its position coordinates.\n rr, vv = cowell(\n Earth.k,\n orbit.r,\n orbit.v,\n tofs,\n )\n pos_coords = rr # Trajectory of the secondary body.\n\n orb = Orbit.from_classical(\n attractor=Earth,\n a=16000 * u.km,\n ecc=0.53 * u.one,\n inc=5 * u.deg,\n raan=5 * u.deg,\n argp=10 * u.deg,\n nu=30 * u.deg,\n )\n\n los_event = LosEvent(Earth, pos_coords, terminal=True)\n events = [los_event]\n tofs = [1, 5, 10, 100, 1000, 2000, 3000, 5000] << u.s\n\n r, v = cowell(\n Earth.k,\n orb.r,\n orb.v,\n tofs,\n events=events,\n )\n\n assert_quantity_allclose(los_event.last_t, t_los)\n"
]
| [
[
"numpy.allclose",
"numpy.array"
]
]
|
pedrobcst/Xerus | [
"09df088e0207176df0d20715e1c9778d09d28250"
]
| [
"Xerus/GSASII/exports/G2export_csv.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n########### SVN repository information ###################\n# $Date: 2020-08-20 04:53:47 +0900 (木, 20 8月 2020) $\n# $Author: vondreele $\n# $Revision: 4549 $\n# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/exports/G2export_csv.py $\n# $Id: G2export_csv.py 4549 2020-08-19 19:53:47Z vondreele $\n########### SVN repository information ###################\n'''\n*Module G2export_csv: Spreadsheet export*\n-------------------------------------------\n\nCode to create .csv (comma-separated variable) files for\nGSAS-II data export to a spreadsheet program, etc.\n\n'''\nfrom __future__ import division, print_function\nimport os.path\nimport numpy as np\nimport GSASIIpath\nGSASIIpath.SetVersionNumber(\"$Revision: 4549 $\")\nimport GSASIIIO as G2IO\nimport GSASIIpy3 as G2py3\nimport GSASIIobj as G2obj\nimport GSASIImath as G2mth\nimport GSASIIpwd as G2pwd\nimport GSASIIlattice as G2lat\n\ndef WriteList(obj,headerItems):\n '''Write a CSV header\n\n :param object obj: Exporter object\n :param list headerItems: items to write as a header\n '''\n line = ''\n for lbl in headerItems:\n if line: line += ','\n line += '\"'+lbl+'\"'\n obj.Write(line)\n\nclass ExportPhaseCSV(G2IO.ExportBaseclass):\n '''Used to create a csv file for a phase\n\n :param wx.Frame G2frame: reference to main GSAS-II frame\n '''\n def __init__(self,G2frame):\n super(self.__class__,self).__init__( # fancy way to say <parentclass>.__init__\n G2frame=G2frame,\n formatName = 'CSV file',\n extension='.csv',\n longFormatName = 'Export phase as comma-separated (csv) file'\n )\n self.exporttype = ['phase']\n self.multiple = True # allow multiple phases to be selected\n\n def Writer(self,hist,phasenam,mode='w'):\n self.OpenFile(mode=mode)\n # test for aniso atoms\n aniso = False\n AtomsList = self.GetAtoms(phasenam)\n for lbl,typ,mult,xyz,td in AtomsList:\n if len(td) != 1:\n aniso = True\n break\n if mode == 'w':\n lbllist = ['hist','phase','a','b','c','alpha','beta','gamma','volume']\n lbllist += [\"atm label\",\"elem\",\"mult\",\"x\",\"y\",\"z\",\"frac\",\"Uiso\"]\n if aniso: lbllist += ['U11','U22','U33','U12','U13','U23']\n WriteList(self,lbllist)\n \n cellList,cellSig = self.GetCell(phasenam)\n line = '\"' + str(hist)+ '\",\"' + str(phasenam) + '\"'\n for defsig,val in zip(\n 3*[-0.00001] + 3*[-0.001] + [-0.01], # sets sig. figs.\n cellList\n ):\n txt = G2mth.ValEsd(val,defsig)\n if line: line += ','\n line += txt\n self.Write(line)\n\n # get atoms and print separated by commas\n AtomsList = self.GetAtoms(phasenam)\n for lbl,typ,mult,xyz,td in AtomsList:\n line = \",,,,,,,,,\"\n line += '\"' + lbl + '\",\"' + typ + '\",' + str(mult) + ','\n for val,sig in xyz:\n line += G2mth.ValEsd(val,-abs(sig))\n line += \",\"\n if len(td) == 1:\n line += G2mth.ValEsd(td[0][0],-abs(td[0][1]))\n else:\n line += \",\"\n for val,sig in td:\n line += G2mth.ValEsd(val,-abs(sig))\n line += \",\"\n self.Write(line)\n\n if mode == 'w':\n print('Phase '+phasenam+' written to file '+self.fullpath)\n self.CloseFile()\n \n def Exporter(self,event=None):\n '''Export a phase as a csv file\n '''\n # the export process starts here\n self.InitExport(event)\n # load all of the tree into a set of dicts\n self.loadTree()\n # create a dict with refined values and their uncertainties\n self.loadParmDict()\n if self.ExportSelect(): return # set export parameters; get file name\n self.OpenFile()\n # if more than one phase is selected, put them into a single file\n for phasenam in self.phasenam:\n phasedict = self.Phases[phasenam] # pointer to current phase info \n i = self.Phases[phasenam]['pId']\n self.Write('\"'+\"Phase \"+str(phasenam)+\" from \"+str(self.G2frame.GSASprojectfile)+'\"')\n self.Write('\\n\"Space group:\",\"'+str(phasedict['General']['SGData']['SpGrp'].strip())+'\"')\n # get cell parameters & print them\n cellList,cellSig = self.GetCell(phasenam)\n WriteList(self,['a','b','c','alpha','beta','gamma','volume'])\n\n line = ''\n for defsig,val in zip(\n 3*[-0.00001] + 3*[-0.001] + [-0.01], # sign values to use when no sigma\n cellList\n ):\n txt = G2mth.ValEsd(val,defsig)\n if line: line += ','\n line += txt\n self.Write(line)\n \n # get atoms and print separated by commas\n AtomsList = self.GetAtoms(phasenam)\n # check for aniso atoms\n aniso = False\n for lbl,typ,mult,xyz,td in AtomsList:\n if len(td) != 1: aniso = True \n lbllist = [\"label\",\"elem\",\"mult\",\"x\",\"y\",\"z\",\"frac\",\"Uiso\"]\n if aniso: lbllist += ['U11','U22','U33','U12','U13','U23']\n WriteList(self,lbllist)\n \n for lbl,typ,mult,xyz,td in AtomsList:\n line = '\"' + lbl + '\",\"' + typ + '\",' + str(mult) + ','\n for val,sig in xyz:\n line += G2mth.ValEsd(val,-abs(sig))\n line += \",\"\n if len(td) == 1:\n line += G2mth.ValEsd(td[0][0],-abs(td[0][1]))\n else:\n line += \",\"\n for val,sig in td:\n line += G2mth.ValEsd(val,-abs(sig))\n line += \",\"\n self.Write(line)\n print('Phase '+phasenam+' written to file '+self.fullpath)\n self.CloseFile()\n\nclass ExportPowderCSV(G2IO.ExportBaseclass):\n '''Used to create a csv file for a powder data set\n\n :param wx.Frame G2frame: reference to main GSAS-II frame\n '''\n def __init__(self,G2frame):\n super(self.__class__,self).__init__( # fancy way to say <parentclass>.__init__\n G2frame=G2frame,\n formatName = 'histogram CSV file',\n extension='.csv',\n longFormatName = 'Export powder data as comma-separated (csv) file'\n )\n self.exporttype = ['powder']\n #self.multiple = False # only allow one histogram to be selected\n self.multiple = True\n\n def Writer(self,TreeName,filename=None):\n #print filename\n self.OpenFile(filename)\n histblk = self.Histograms[TreeName]\n Parms = self.Histograms[TreeName]['Instrument Parameters'][0]\n for parm in Parms:\n if parm in ['Type','Source',]:\n line = '\"Instparm: %s\",\"%s\"'%(parm,Parms[parm][0])\n elif parm in ['Lam','Zero',]:\n line = '\"Instparm: %s\",%10.6f'%(parm,Parms[parm][1])\n else:\n line = '\"Instparm: %s\",%10.2f'%(parm,Parms[parm][1])\n self.Write(line)\n Samp = self.Histograms[TreeName]['Sample Parameters']\n for samp in Samp:\n if samp in ['InstrName','Type']:\n line = '\"Samparm: %s\",%s'%(samp,Samp[samp])\n elif samp in ['Azimuth','Chi','Gonio. radius','Omega','Phi','Pressure','Temperature','Time']:\n line = '\"Samparm: %s\",%10.2f'%(samp,Samp[samp])\n elif samp in ['DisplaceX','DisplaceY','Scale','Shift','SurfRoughA','SurfRoughB','Transparency']:\n line = '\"Samparm: %s\",%10.2f'%(samp,Samp[samp][0])\n else:\n continue\n self.Write(line)\n WriteList(self,(\"x\",\"y_obs\",\"weight\",\"y_calc\",\"y_bkg\",\"Q\"))\n digitList = 2*((13,3),) + ((13,5),) + 3*((13,3),)\n for vallist in zip(histblk['Data'][0],\n histblk['Data'][1],\n histblk['Data'][2],\n histblk['Data'][3],\n histblk['Data'][4],\n #histblk['Data'][5],\n 2*np.pi/G2lat.Pos2dsp(Parms,histblk['Data'][0])\n ):\n line = \"\"\n for val,digits in zip(vallist,digitList):\n if line: line += ','\n line += '%.6g'%val\n# line += G2py3.FormatValue(val,digits)\n self.Write(line)\n self.CloseFile()\n \n def Exporter(self,event=None):\n '''Export a set of powder data as a csv file\n '''\n # the export process starts here\n self.InitExport(event)\n # load all of the tree into a set of dicts\n self.loadTree()\n if self.ExportSelect( # set export parameters\n AskFile='single' # get a file name/directory to save in\n ): return\n filenamelist = []\n for hist in self.histnam:\n if len(self.histnam) == 1:\n name = self.filename\n else: # multiple files: create a unique name from the histogram\n name = self.MakePWDRfilename(hist)\n fileroot = os.path.splitext(G2obj.MakeUniqueLabel(name,filenamelist))[0]\n # create the file\n self.filename = os.path.join(self.dirname,fileroot + self.extension)\n self.Writer(hist)\n print('Histogram '+hist+' written to file '+self.fullpath)\n\nclass ExportMultiPowderCSV(G2IO.ExportBaseclass):\n '''Used to create a csv file for a stack of powder data sets suitable for display \n purposes only; no y-calc or weights are exported only x & y-obs\n :param wx.Frame G2frame: reference to main GSAS-II frame\n '''\n def __init__(self,G2frame):\n super(self.__class__,self).__init__( # fancy way to say <parentclass>.__init__\n G2frame=G2frame,\n formatName = 'stacked CSV file',\n extension='.csv',\n longFormatName = 'Export powder data sets as a (csv) file - x,y-o1,y-o2,... only'\n )\n self.exporttype = ['powder']\n #self.multiple = False # only allow one histogram to be selected\n self.multiple = True\n\n def Exporter(self,event=None):\n '''Export a set of powder data as a single csv file\n '''\n # the export process starts here\n self.InitExport(event)\n # load all of the tree into a set of dicts\n self.loadTree()\n if self.ExportSelect( # set export parameters\n AskFile='ask' # only one file is ever written\n ): return\n csvData = []\n headList = [\"x\",]\n digitList = []\n self.filename = os.path.join(self.dirname,os.path.splitext(self.filename)[0]\n + self.extension)\n for ihst,hist in enumerate(self.histnam):\n histblk = self.Histograms[hist]\n headList.append('y_obs_'+G2obj.StripUnicode(hist[5:].replace(' ','_')))\n if not ihst:\n digitList = [(13,3),]\n csvData.append(histblk['Data'][0])\n digitList += [(13,3),]\n csvData.append(histblk['Data'][1])\n print('Histogram '+hist+' added to file...')\n self.OpenFile()\n WriteList(self,headList)\n for vallist in np.array(csvData).T:\n line = \"\"\n for val,digits in zip(vallist,digitList):\n if line: line += ','\n line += '%.6g'%val\n# line += G2py3.FormatValue(val,digits)\n self.Write(line)\n self.CloseFile()\n print('...file '+self.fullpath+' written')\n\nclass ExportPowderReflCSV(G2IO.ExportBaseclass):\n '''Used to create a csv file of reflections from a powder data set\n\n :param wx.Frame G2frame: reference to main GSAS-II frame\n '''\n def __init__(self,G2frame):\n super(self.__class__,self).__init__( # fancy way to say <parentclass>.__init__\n G2frame=G2frame,\n formatName = 'reflection list as CSV',\n extension='.csv',\n longFormatName = 'Export powder reflection list as a comma-separated (csv) file'\n )\n self.exporttype = ['powder']\n self.multiple = False # only allow one histogram to be selected\n\n def Writer(self,TreeName,filename=None):\n print(filename)\n self.OpenFile(filename)\n histblk = self.Histograms[TreeName]\n self.write(TreeName,histblk)\n self.CloseFile()\n print(TreeName+' reflections written to file '+self.fullpath)\n \n def Exporter(self,event=None):\n '''Export a set of powder reflections as a csv file\n '''\n self.InitExport(event)\n # load all of the tree into a set of dicts\n self.loadTree()\n if self.ExportSelect(): return # set export parameters, get file name\n hist = list(self.histnam)[0] # there should only be one histogram, in any case take the 1st\n histblk = self.Histograms[hist]\n self.OpenFile()\n self.write(hist,histblk)\n self.CloseFile()\n print(hist+' reflections written to file '+self.fullpath)\n \n def write(self,hist,histblk):\n self.Write('\"Histogram\"')\n self.Write('\"'+hist+'\"')\n self.Write('')\n # table of phases\n self.Write('\"Phase name\",\"phase #\"')\n for i,phasenam in enumerate(sorted(histblk['Reflection Lists'])):\n self.Write('\"'+str(phasenam)+'\",'+str(i))\n self.Write('')\n # note addition of a phase # flag at end (i)\n for i,phasenam in enumerate(sorted(histblk['Reflection Lists'])):\n phasDict = histblk['Reflection Lists'][phasenam]\n tname = {'T':'TOF','C':'2-theta','B':'2-theta'}[phasDict['Type'][2]]\n if phasDict.get('Super',False):\n WriteList(self,(\"h\",\"k\",\"l\",\"m\",\"d-sp\",tname,\"F_obs\",\"F_calc\",\"phase\",\"mult\",\"sig\",\"gam\",\"FWHM\",\"Prfo\",\"phase #\"))\n if 'T' in phasDict['Type']:\n fmt = \"{:.0f},{:.0f},{:.0f},{:.0f},{:.5f},{:.3f},{:.3f},{:.3f},{:.2f},{:.0f},{:.3f},{:.3f},{:.3f},{:.4f},{:d}\"\n else:\n fmt = \"{:.0f},{:.0f},{:.0f},{:.0f},{:.5f},{:.3f},{:.3f},{:.3f},{:.2f},{:.0f},{:.5f},{:.5f},{:.5f},{:.4f},{:d}\"\n refList = phasDict['RefList']\n for refItem in refList:\n if 'T' in phasDict['Type']:\n h,k,l,m,mult,dsp,pos,sig,gam,Fobs,Fcalc,phase,Icorr,x,x,x,Prfo = refItem[:17]\n FWHM = G2pwd.getgamFW(gam,sig)\n self.Write(fmt.format(h,k,l,m,dsp,pos,Fobs,Fcalc,phase,mult,sig,gam,FWHM,i))\n elif 'C' in phasDict['Type']: #convert to deg \n h,k,l,m,mult,dsp,pos,sig,gam,Fobs,Fcalc,phase,Icorr,Prfo = refItem[:14]\n s = np.sqrt(max(sig,0.0001))/100. #var -> sig in deg\n g = gam/100. #-> deg\n FWHM = G2pwd.getgamFW(g,s)\n self.Write(fmt.format(h,k,l,m,dsp,pos,Fobs,Fcalc,phase,mult,s,g,FWHM,i))\n elif 'B' in phasDict['Type']: #convert to deg \n h,k,l,m,mult,dsp,pos,sig,gam,Fobs,Fcalc,phase,Icorr,x,x,x,Prfo = refItem[:17]\n s = np.sqrt(max(sig,0.0001))/100. #var -> sig in deg\n g = gam/100. #-> deg\n FWHM = G2pwd.getgamFW(g,s)\n self.Write(fmt.format(h,k,l,m,dsp,pos,Fobs,Fcalc,phase,mult,s,g,FWHM,i))\n else:\n WriteList(self,(\"h\",\"k\",\"l\",\"d-sp\",tname,\"F_obs\",\"F_calc\",\"phase\",\"mult\",\"sig\",\"gam\",\"FWHM\",\"Prfo\",\"phase #\"))\n if 'T' in phasDict['Type']:\n fmt = \"{:.0f},{:.0f},{:.0f},{:.5f},{:.3f},{:.3f},{:.3f},{:.2f},{:.0f},{:.3f},{:.3f},{:.3f},{:.4f},{:d}\"\n else:\n fmt = \"{:.0f},{:.0f},{:.0f},{:.5f},{:.3f},{:.3f},{:.3f},{:.2f},{:.0f},{:.5f},{:.5f},{:.5f},{:.4f},{:d}\"\n refList = phasDict['RefList']\n for refItem in refList:\n if 'T' in phasDict['Type']:\n h,k,l,mult,dsp,pos,sig,gam,Fobs,Fcalc,phase,Icorr,x,x,x,Prfo = refItem[:16]\n FWHM = G2pwd.getgamFW(gam,sig)\n self.Write(fmt.format(h,k,l,dsp,pos,Fobs,Fcalc,phase,mult,sig,gam,FWHM,Prfo,i))\n elif 'C' in phasDict['Type']: #convert to deg \n h,k,l,mult,dsp,pos,sig,gam,Fobs,Fcalc,phase,Icorr,Prfo = refItem[:13]\n g = gam/100.\n s = np.sqrt(max(sig,0.0001))/100.\n FWHM = G2pwd.getgamFW(g,s)\n self.Write(fmt.format(h,k,l,dsp,pos,Fobs,Fcalc,phase,mult,s,g,FWHM,Prfo,i))\n elif 'B' in phasDict['Type']: #convert to deg \n h,k,l,mult,dsp,pos,sig,gam,Fobs,Fcalc,phase,Icorr,x,x,x,Prfo = refItem[:16]\n g = gam/100.\n s = np.sqrt(max(sig,0.0001))/100.\n FWHM = G2pwd.getgamFW(g,s)\n self.Write(fmt.format(h,k,l,dsp,pos,Fobs,Fcalc,phase,mult,s,g,FWHM,Prfo,i))\n \nclass ExportSASDCSV(G2IO.ExportBaseclass):\n '''Used to create a csv file for a small angle data set\n\n :param wx.Frame G2frame: reference to main GSAS-II frame\n '''\n def __init__(self,G2frame):\n super(self.__class__,self).__init__( # fancy way to say <parentclass>.__init__\n G2frame=G2frame,\n formatName = 'CSV file',\n extension='.csv',\n longFormatName = 'Export small angle data as comma-separated (csv) file'\n )\n self.exporttype = ['sasd']\n #self.multiple = False # only allow one histogram to be selected\n self.multiple = True\n\n def Writer(self,TreeName,filename=None):\n self.OpenFile(filename)\n histblk = self.Histograms[TreeName]\n if len(self.Histograms[TreeName]['Models']['Size']['Distribution']):\n self.Write('\"Size Distribution\"')\n Distr = np.array(self.Histograms[TreeName]['Models']['Size']['Distribution'])\n WriteList(self,(\"bin_pos\",\"bin_width\",\"bin_value\"))\n digitList = 2*((13,3),)+((13,4,'g'),)\n for bindata in Distr.T:\n line = \"\"\n for val,digits in zip(bindata,digitList):\n if line: line += ','\n line += G2py3.FormatValue(val,digits)\n self.Write(line) \n self.Write('\"Small angle data\"')\n Parms = self.Histograms[TreeName]['Instrument Parameters'][0]\n for parm in Parms:\n if parm in ['Type','Source',]:\n line = '\"Instparm: %s\",\"%s\"'%(parm,Parms[parm][0])\n elif parm in ['Lam',]:\n line = '\"Instparm: %s\",%10.6f'%(parm,Parms[parm][1])\n else:\n line = '\"Instparm: %s\",%10.2f'%(parm,Parms[parm][1])\n self.Write(line)\n WriteList(self,(\"q\",\"y_obs\",\"y_sig\",\"y_calc\",\"y_bkg\"))\n digitList = 5*((13,5,'g'),)\n for vallist in zip(histblk['Data'][0],\n histblk['Data'][1],\n 1./np.sqrt(histblk['Data'][2]),\n histblk['Data'][3],\n histblk['Data'][4],\n ):\n line = \"\"\n for val,digits in zip(vallist,digitList):\n if line: line += ','\n line += '%.6g'%val\n# line += G2py3.FormatValue(val,digits)\n self.Write(line)\n self.CloseFile()\n \n def Exporter(self,event=None):\n '''Export a set of small angle data as a csv file\n '''\n # the export process starts here\n self.InitExport(event)\n # load all of the tree into a set of dicts\n self.loadTree()\n if self.ExportSelect( # set export parameters\n AskFile='single' # get a file name/directory to save in\n ): return\n filenamelist = []\n for hist in self.histnam:\n if len(self.histnam) == 1:\n name = self.filename\n else: # multiple files: create a unique name from the histogram\n name = self.MakePWDRfilename(hist)\n fileroot = os.path.splitext(G2obj.MakeUniqueLabel(name,filenamelist))[0]\n # create the file\n self.filename = os.path.join(self.dirname,fileroot + self.extension)\n self.Writer(hist)\n print('Histogram '+hist+' written to file '+self.fullpath)\n\nclass ExportREFDCSV(G2IO.ExportBaseclass):\n '''Used to create a csv file for a reflectometry data set\n\n :param wx.Frame G2frame: reference to main GSAS-II frame\n '''\n def __init__(self,G2frame):\n super(self.__class__,self).__init__( # fancy way to say <parentclass>.__init__\n G2frame=G2frame,\n formatName = 'CSV file',\n extension='.csv',\n longFormatName = 'Export reflectometry data as comma-separated (csv) file'\n )\n self.exporttype = ['refd']\n #self.multiple = False # only allow one histogram to be selected\n self.multiple = True\n\n def Writer(self,TreeName,filename=None):\n self.OpenFile(filename)\n histblk = self.Histograms[TreeName]\n self.Write('\"Reflectometry data\"')\n Parms = self.Histograms[TreeName]['Instrument Parameters'][0]\n for parm in Parms:\n if parm in ['Type','Source',]:\n line = '\"Instparm: %s\",\"%s\"'%(parm,Parms[parm][0])\n elif parm in ['Lam',]:\n line = '\"Instparm: %s\",%10.6f'%(parm,Parms[parm][1])\n else:\n line = '\"Instparm: %s\",%10.2f'%(parm,Parms[parm][1])\n self.Write(line)\n WriteList(self,(\"q\",\"y_obs\",\"y_sig\",\"y_calc\",\"y_bkg\"))\n digitList = 5*((13,5,'g'),)\n for vallist in zip(histblk['Data'][0],\n histblk['Data'][1],\n 1./np.sqrt(histblk['Data'][2]),\n histblk['Data'][3],\n histblk['Data'][4],\n ):\n line = \"\"\n for val,digits in zip(vallist,digitList):\n if line: line += ','\n line += '%.6g'%val\n# line += G2py3.FormatValue(val,digits)\n self.Write(line)\n self.CloseFile()\n \n def Exporter(self,event=None):\n '''Export a set of reflectometry data as a csv file\n '''\n # the export process starts here\n self.InitExport(event)\n # load all of the tree into a set of dicts\n self.loadTree()\n if self.ExportSelect( # set export parameters\n AskFile='single' # get a file name/directory to save in\n ): return\n filenamelist = []\n for hist in self.histnam:\n if len(self.histnam) == 1:\n name = self.filename\n else: # multiple files: create a unique name from the histogram\n name = self.MakePWDRfilename(hist)\n fileroot = os.path.splitext(G2obj.MakeUniqueLabel(name,filenamelist))[0]\n # create the file\n self.filename = os.path.join(self.dirname,fileroot + self.extension)\n self.Writer(hist)\n print('Histogram '+hist+' written to file '+self.fullpath)\n\nclass ExportSingleCSV(G2IO.ExportBaseclass):\n '''Used to create a csv file with single crystal reflection data\n\n :param wx.Frame G2frame: reference to main GSAS-II frame\n '''\n def __init__(self,G2frame):\n super(self.__class__,self).__init__( # fancy way to say <parentclass>.__init__\n G2frame=G2frame,\n formatName = 'CSV file',\n extension='.csv',\n longFormatName = 'Export reflection list as a comma-separated (csv) file'\n )\n self.exporttype = ['single']\n self.multiple = False # only allow one histogram to be selected\n\n def Exporter(self,event=None):\n '''Export a set of single crystal data as a csv file\n '''\n # the export process starts here\n self.InitExport(event)\n # load all of the tree into a set of dicts\n self.loadTree()\n if self.ExportSelect(): return # set export parameters, get file name\n self.OpenFile()\n hist = self.histnam[0] # there should only be one histogram, in any case take the 1st\n histblk = self.Histograms[hist]\n for i,phasenam in enumerate(sorted(histblk['Reflection Lists'])):\n phasDict = histblk['Reflection Lists'][phasenam]\n tname = {'T':'TOF','C':'2-theta'}[phasDict['Type'][2]]\n if phasDict.get('Super',False):\n WriteList(self,(\"h\",\"k\",\"l\",\"m\",'d-sp',tname,\"F_obs\",\"F_calc\",\"phase\",\"mult\",\"phase #\"))\n fmt = \"{:.0f},{:.0f},{:.0f},{:.0f},{:.5f},{:.3f},{:.3f},{:.3f},{:.2f},{:.0f},{:d}\"\n refList = phasDict['RefList']\n for refItem in refList:\n h,k,l,m,mult,dsp,pos,sig,gam,Fobs,Fcalc,phase,Icorr = refItem[:13]\n self.Write(fmt.format(h,k,l,m,dsp,pos,Fobs,Fcalc,phase,mult,i)) \n else:\n WriteList(self,(\"h\",\"k\",\"l\",'d-sp',tname,\"F_obs\",\"F_calc\",\"phase\",\"mult\",\"phase #\"))\n fmt = \"{:.0f},{:.0f},{:.0f},{:.5f},{:.3f},{:.3f},{:.3f},{:.2f},{:.0f},{:d}\"\n refList = phasDict['RefList']\n for refItem in refList:\n h,k,l,mult,dsp,pos,sig,gam,Fobs,Fcalc,phase,Icorr = refItem[:12]\n self.Write(fmt.format(h,k,l,dsp,pos,Fobs,Fcalc,phase,mult,i))\n self.CloseFile()\n print(hist+' written to file '+self.fullname) \n\nclass ExportStrainCSV(G2IO.ExportBaseclass):\n '''Used to create a csv file with single crystal reflection data\n\n :param wx.Frame G2frame: reference to main GSAS-II frame\n '''\n def __init__(self,G2frame):\n super(self.__class__,self).__init__( # fancy way to say <parentclass>.__init__\n G2frame=G2frame,\n formatName = 'Strain CSV file',\n extension='.csv',\n longFormatName = 'Export strain results as a comma-separated (csv) file'\n )\n self.exporttype = ['image']\n self.multiple = False # only allow one histogram to be selected\n\n def Exporter(self,event=None):\n '''Export a set of single crystal data as a csv file\n '''\n # the export process starts here\n self.InitExport(event)\n # load all of the tree into a set of dicts\n self.loadTree()\n if self.ExportSelect(): return # set export parameters, get file name\n self.OpenFile()\n hist = self.histnam[0] # there should only be one histogram, in any case take the 1st\n histblk = self.Histograms[hist]\n StrSta = histblk['Stress/Strain']\n WriteList(self,(\"Dset\",\"Dcalc\",\"e11\",\"sig(e11)\",\"e12\",\"sig(e12)\",\"e22\",\"sig(e22)\"))\n fmt = 2*\"{:.5f},\"+6*\"{:.0f},\"\n fmt1 = \"{:.5f}\"\n fmt2 = \"{:.2f},{:.5f},{:.5f}\"\n for item in StrSta['d-zero']:\n Emat = item['Emat']\n Esig = item['Esig']\n self.Write(fmt.format(item['Dset'],item['Dcalc'],Emat[0],Esig[0],Emat[1],Esig[1],Emat[2],Esig[2]))\n for item in StrSta['d-zero']:\n WriteList(self,(\"Azm\",\"dobs\",\"dcalc\",\"Dset=\"+fmt1.format(item['Dset'])))\n ring = np.vstack((item['ImtaObs'],item['ImtaCalc']))\n for dat in ring.T:\n self.Write(fmt2.format(dat[1],dat[0],dat[2])) \n self.CloseFile()\n print(hist+' written to file '+self.fullpath)\n"
]
| [
[
"numpy.array",
"numpy.sqrt",
"numpy.vstack"
]
]
|
crwhite14/NASLib | [
"59927f2a9b80de1b1b346e71795fbbcef3b936b8"
]
| [
"naslib/optimizers/discrete/ls/optimizer.py"
]
| [
"import collections\nimport logging\nimport torch\nimport copy\nimport random\nimport numpy as np\n\nfrom naslib.optimizers.core.metaclasses import MetaOptimizer\n\nfrom naslib.search_spaces.core.query_metrics import Metric\nfrom naslib.search_spaces.nasbench201.graph import NasBench201SearchSpace\nfrom naslib.utils.utils import AttrDict, count_parameters_in_MB\nfrom naslib.utils.logging import log_every_n_seconds\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalSearch(MetaOptimizer):\n # training the models is not implemented\n using_step_function = False\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.epochs = config.search.epochs\n\n self.performance_metric = Metric.VAL_ACCURACY\n self.dataset = config.dataset\n\n self.num_init = config.search.num_init\n self.nbhd = []\n self.chosen = None\n self.best_arch = None\n\n self.history = torch.nn.ModuleList()\n\n def adapt_search_space(self, search_space, scope=None, dataset_api=None):\n assert (\n search_space.QUERYABLE\n ), \"Local search is currently only implemented for benchmarks.\"\n self.search_space = search_space.clone()\n self.scope = scope if scope else search_space.OPTIMIZER_SCOPE\n self.dataset_api = dataset_api\n\n def new_epoch(self, epoch):\n\n if epoch < self.num_init:\n # randomly sample initial architectures\n model = (\n torch.nn.Module()\n ) # hacky way to get arch and accuracy checkpointable\n model.arch = self.search_space.clone()\n model.arch.sample_random_architecture(dataset_api=self.dataset_api)\n model.accuracy = model.arch.query(\n self.performance_metric, self.dataset, dataset_api=self.dataset_api\n )\n\n if not self.best_arch or model.accuracy > self.best_arch.accuracy:\n self.best_arch = model\n self._update_history(model)\n\n else:\n if (\n len(self.nbhd) == 0\n and self.chosen\n and self.best_arch.accuracy <= self.chosen.accuracy\n ):\n logger.info(\n \"Reached local minimum. Starting from new random architecture.\"\n )\n\n model = (\n torch.nn.Module()\n ) # hacky way to get arch and accuracy checkpointable\n model.arch = self.search_space.clone()\n model.arch.sample_random_architecture(dataset_api=self.dataset_api)\n model.accuracy = model.arch.query(\n self.performance_metric, self.dataset, dataset_api=self.dataset_api\n )\n\n self.chosen = model\n self.best_arch = model\n self.nbhd = self.chosen.arch.get_nbhd(dataset_api=self.dataset_api)\n\n else:\n if len(self.nbhd) == 0:\n logger.info(\n \"Start a new iteration. Pick the best architecture and evaluate its neighbors.\"\n )\n self.chosen = self.best_arch\n self.nbhd = self.chosen.arch.get_nbhd(dataset_api=self.dataset_api)\n\n model = self.nbhd.pop()\n model.accuracy = model.arch.query(\n self.performance_metric, self.dataset, dataset_api=self.dataset_api\n )\n\n if model.accuracy > self.best_arch.accuracy:\n self.best_arch = model\n logger.info(\"Found new best architecture.\")\n self._update_history(model)\n\n def _update_history(self, child):\n if len(self.history) < 100:\n self.history.append(child)\n else:\n for i, p in enumerate(self.history):\n if child.accuracy > p.accuracy:\n self.history[i] = child\n break\n\n def train_statistics(self):\n best_arch = self.get_final_architecture()\n return (\n best_arch.query(\n Metric.TRAIN_ACCURACY, self.dataset, dataset_api=self.dataset_api\n ),\n best_arch.query(\n Metric.VAL_ACCURACY, self.dataset, dataset_api=self.dataset_api\n ),\n best_arch.query(\n Metric.TEST_ACCURACY, self.dataset, dataset_api=self.dataset_api\n ),\n )\n\n def test_statistics(self):\n best_arch = self.get_final_architecture()\n return best_arch.query(Metric.RAW, self.dataset, dataset_api=self.dataset_api)\n\n def get_final_architecture(self):\n return max(self.history, key=lambda x: x.accuracy).arch\n\n def get_op_optimizer(self):\n raise NotImplementedError()\n\n def get_checkpointables(self):\n return {\"model\": self.history}\n\n def get_model_size(self):\n return count_parameters_in_MB(self.history)\n"
]
| [
[
"torch.nn.Module",
"torch.nn.ModuleList"
]
]
|
RichardoMrMu/repvgg-xgaze | [
"c32e6bb3b9a53848cd407ce54f8b685390f293d9"
]
| [
"models/mixer.py"
]
| [
"from torch import nn\nfrom functools import partial\nfrom einops.layers.torch import Rearrange, Reduce\n\nclass PreNormResidual(nn.Module):\n def __init__(self, dim, fn):\n super().__init__()\n self.fn = fn\n self.norm = nn.LayerNorm(dim)\n\n def forward(self, x):\n return self.fn(self.norm(x)) + x\n\ndef FeedForward(dim, expansion_factor = 4, dropout = 0., dense = nn.Linear):\n return nn.Sequential(\n dense(dim, dim * expansion_factor),\n nn.GELU(),\n nn.Dropout(dropout),\n dense(dim * expansion_factor, dim),\n nn.Dropout(dropout)\n )\n\ndef MLPMixer(*, image_size, patch_size, dim, depth, num_classes, expansion_factor = 4, dropout = 0.):\n assert (image_size % patch_size) == 0, 'image must be divisible by patch size'\n num_patches = (image_size // patch_size) ** 2\n chan_first, chan_last = partial(nn.Conv1d, kernel_size = 1), nn.Linear\n\n return nn.Sequential(\n Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),\n nn.Linear((patch_size ** 2) * 3, dim),\n *[nn.Sequential(\n PreNormResidual(dim, FeedForward(num_patches, expansion_factor, dropout, chan_first)),\n PreNormResidual(dim, FeedForward(dim, expansion_factor, dropout, chan_last))\n ) for _ in range(depth)],\n nn.LayerNorm(dim),\n Reduce('b n c -> b c', 'mean'),\n nn.Linear(dim, num_classes)\n )"
]
| [
[
"torch.nn.Linear",
"torch.nn.GELU",
"torch.nn.LayerNorm",
"torch.nn.Dropout"
]
]
|
jvamvas/sacrerouge | [
"d3938d15d747707cb92067b1f711b775ddaf211e"
]
| [
"sacrerouge/commands/stat_sig_test.py"
]
| [
"import argparse\nimport functools\nimport json\nimport logging\nimport numpy as np\nimport os\nimport random\nfrom overrides import overrides\nfrom scipy.stats import kendalltau, pearsonr, spearmanr\nfrom typing import Dict, List, Tuple, Union\n\nfrom sacrerouge.commands import RootSubcommand\nfrom sacrerouge.commands.correlate import load_metrics, filter_metrics, merge_metrics\nfrom sacrerouge.common.logging import prepare_global_logging\nfrom sacrerouge.data import Metrics\nfrom sacrerouge.stats import convert_to_matrices, corr_diff_test, global_corr, summary_level_corr, system_level_corr\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_hypotheses(two_tailed: bool,\n dependent_metric: str,\n metric_A: str,\n metric_B: str,) -> Tuple[str, str]:\n if two_tailed:\n return f'r({metric_A}, {dependent_metric}) == r({metric_B}, {dependent_metric})', \\\n f'r({metric_A}, {dependent_metric}) != r({metric_B}, {dependent_metric})'\n else:\n return f'r({metric_A}, {dependent_metric}) <= r({metric_B}, {dependent_metric})', \\\n f'r({metric_A}, {dependent_metric}) > r({metric_B}, {dependent_metric})'\n\n\ndef _run_test(corr_func,\n X: np.ndarray, Y: np.ndarray, Z: np.ndarray,\n test_method: str,\n alpha: float,\n two_tailed: bool) -> Dict:\n pearson = functools.partial(corr_func, pearsonr)\n spearman = functools.partial(corr_func, spearmanr)\n kendall = functools.partial(corr_func, kendalltau)\n\n r_pvalue, r_statistic = corr_diff_test(pearson, X, Y, Z, test_method, two_tailed, kwargs={\"return_test_statistic\": True})\n rho_pvalue, rho_statistic = corr_diff_test(spearman, X, Y, Z, test_method, two_tailed, kwargs={\"return_test_statistic\": True})\n tau_pvalue, tau_statistic = corr_diff_test(kendall, X, Y, Z, test_method, two_tailed, kwargs={\"return_test_statistic\": True})\n\n # For some reason, without casting `pvalue <= alpha` to a bool, the result\n # would be type `bool_` which was not json serializable\n return {\n 'pearson': {\n 'pvalue': r_pvalue,\n 'is_significant': bool(r_pvalue <= alpha),\n \"test_statistic\": r_statistic,\n },\n 'spearman': {\n 'pvalue': rho_pvalue,\n 'is_significant': bool(rho_pvalue <= alpha),\n \"test_statistic\": rho_statistic,\n },\n 'kendall': {\n 'pvalue': tau_pvalue,\n 'is_significant': bool(tau_pvalue <= alpha),\n \"test_statistic\": tau_statistic,\n },\n }\n\n\ndef run_hypothesis_tests(metrics_jsonl_files_or_metrics_list: Union[str, List[str], List[Metrics]],\n dependent_metric: str,\n metric_A: str,\n metric_B: str,\n summarizer_type: str,\n test_method: str = 'permutation-both',\n alpha: float = 0.05,\n two_tailed: bool = True,\n skip_summary_level: bool = False,\n skip_system_level: bool = False,\n skip_global: bool = False) -> Dict:\n if isinstance(metrics_jsonl_files_or_metrics_list, str):\n # A single file\n metrics_list = load_metrics([metrics_jsonl_files_or_metrics_list])\n elif isinstance(metrics_jsonl_files_or_metrics_list, list) and all(\n isinstance(item, str) for item in metrics_jsonl_files_or_metrics_list):\n # A list of files\n metrics_list = load_metrics(metrics_jsonl_files_or_metrics_list)\n else:\n # A list of metrics\n assert isinstance(metrics_jsonl_files_or_metrics_list, list) and all(\n isinstance(item, Metrics) for item in metrics_jsonl_files_or_metrics_list)\n metrics_list = metrics_jsonl_files_or_metrics_list\n\n # Merge duplicate metrics objects into one\n metrics_list = merge_metrics(metrics_list)\n\n for metrics in metrics_list:\n metrics.flatten_keys()\n\n metrics_list = filter_metrics(metrics_list, summarizer_type, dependent_metric, metric_A, metric_B)\n for metrics in metrics_list:\n metrics.select_metrics([dependent_metric, metric_A, metric_B])\n metrics.average_values()\n\n # Follow the math in the paper: the dependent metric is Z\n X, Y, Z = convert_to_matrices(metrics_list, metric_A, metric_B, dependent_metric)\n\n H0, H1 = _get_hypotheses(two_tailed, dependent_metric, metric_A, metric_B)\n results = {\n 'dependent_metric': dependent_metric,\n 'metric_A': metric_A,\n 'metric_B': metric_B,\n 'summarizer_type': summarizer_type,\n 'test_method': test_method,\n 'alpha': alpha,\n 'two_tailed': two_tailed,\n 'H0': H0,\n 'H1': H1\n }\n if not skip_summary_level:\n results['summary_level'] = _run_test(summary_level_corr, X, Y, Z, test_method, alpha, two_tailed)\n\n if not skip_system_level:\n results['system_level'] = _run_test(system_level_corr, X, Y, Z, test_method, alpha, two_tailed)\n\n if not skip_global:\n results['global'] = _run_test(global_corr, X, Y, Z, test_method, alpha, two_tailed)\n\n return results\n\n\[email protected]('stat-sig-test')\nclass StatisticalSignificanceTestSubcommand(RootSubcommand):\n @overrides\n def add_subparser(self, parser: argparse._SubParsersAction):\n description = 'Run hypothesis testing on the difference between the correlation between metric A and the ' \\\n 'dependent metric versus metric B and the dependent metric'\n self.parser = parser.add_parser('stat-sig-test', description=description, help=description)\n self.parser.add_argument(\n '--metrics-jsonl-files',\n nargs='+',\n help='The jsonl files with the metric values. If the values are split across multiple files, they can all '\n 'be passed as arguments.',\n required=True\n )\n self.parser.add_argument(\n '--dependent-metric',\n type=str,\n help='The flattened name of the dependent metric against which A and B will be correlated',\n required=True\n )\n self.parser.add_argument(\n '--metric-A',\n type=str,\n help='The flattened name of metric A',\n required=True\n )\n self.parser.add_argument(\n '--metric-B',\n type=str,\n help='The flattened name of metric B',\n required=True\n )\n self.parser.add_argument(\n '--summarizer-type',\n choices=['all', 'reference', 'peer'],\n help='The type of summarizer which should be included in the correlation calculation',\n required=True\n )\n self.parser.add_argument(\n '--hypothesis-test',\n choices=['bootstrap-system', 'bootstrap-input', 'bootstrap-both', 'permutation-both',\n 'permutation-input', 'permutation-system', 'williams'],\n default='permutation-both',\n help='The hypothesis test to use'\n )\n self.parser.add_argument(\n '--confidence',\n type=float,\n default=95,\n help='The confidence level of the hypothesis test'\n )\n self.parser.add_argument(\n '--num-tails',\n type=int,\n choices=[1, 2],\n default=1,\n help='The number of tails to use in the hypothesis test'\n )\n self.parser.add_argument(\n '--random-seed',\n type=int,\n help='The random seed to use for numpy. Python random will be this number plus one'\n )\n self.parser.add_argument(\n '--skip-summary-level',\n action='store_true',\n help='Indicates the summary-level correlations should not be tested'\n )\n self.parser.add_argument(\n '--skip-system-level',\n action='store_true',\n help='Indicates the system-level correlations should not be tested'\n )\n self.parser.add_argument(\n '--skip-global',\n action='store_true',\n help='Indicates the global correlations should not be tested'\n )\n self.parser.add_argument(\n '--output-file',\n type=str,\n help='The json output file which will contain the test results'\n )\n self.parser.add_argument(\n '--log-file',\n type=str,\n help='The file where the log should be written'\n )\n self.parser.add_argument(\n '--silent',\n action='store_true',\n help='Controls whether the log should be written to stdout'\n )\n self.parser.set_defaults(func=self.run)\n\n def run(self, args):\n prepare_global_logging(file_path=args.log_file, silent=args.silent)\n\n if args.random_seed is not None:\n np.random.seed(args.random_seed)\n random.seed(args.random_seed + 1)\n\n two_tailed = args.num_tails == 2\n alpha = 1.0 - args.confidence / 100\n results = run_hypothesis_tests(args.metrics_jsonl_files,\n args.dependent_metric,\n args.metric_A,\n args.metric_B,\n args.summarizer_type,\n test_method=args.hypothesis_test,\n alpha=alpha,\n two_tailed=two_tailed,\n skip_summary_level=args.skip_summary_level,\n skip_system_level=args.skip_system_level,\n skip_global=args.skip_global)\n\n if args.output_file:\n dirname = os.path.dirname(args.output_file)\n if dirname:\n os.makedirs(dirname, exist_ok=True)\n with open(args.output_file, 'w') as out:\n out.write(json.dumps(results, indent=2))\n\n if not args.silent:\n logger.info(json.dumps(results, indent=2))\n"
]
| [
[
"numpy.random.seed"
]
]
|
shivamtundele/dirichlet_python | [
"24d90a804b68231724d171e194e7baf91793ea94"
]
| [
"dirichletcal/calib/multinomial.py"
]
| [
"from __future__ import division\n\nimport logging\n\nimport jax.numpy as np\nimport jax\nimport numpy as raw_np\nfrom sklearn.base import BaseEstimator, RegressorMixin\nfrom sklearn.preprocessing import label_binarize\n\nimport scipy\nimport scipy.optimize\nimport scipy.linalg\n\nfrom ..utils import clip_jax\n\nfrom jax.config import config\nconfig.update(\"jax_enable_x64\", True)\n\n\nclass MultinomialRegression(BaseEstimator, RegressorMixin):\n def __init__(self, weights_0=None, method='Full', initializer='identity',\n reg_format=None, reg_lambda=0.0, reg_mu=None, reg_norm=False,\n ref_row=True):\n if method not in ['Full', 'Diag', 'FixDiag']:\n raise(ValueError('method {} not avaliable'.format(method)))\n\n self.weights_0 = weights_0\n self.method = method\n self.initializer = initializer\n self.reg_format = reg_format\n self.reg_lambda = reg_lambda\n self.reg_mu = reg_mu # If number, then ODIR is applied\n self.reg_norm = reg_norm\n self.ref_row = ref_row\n\n def __setup(self):\n self.classes = None\n self.weights_ = self.weights_0\n self.weights_0_ = self.weights_0\n\n @property\n def coef_(self):\n return self.weights_[:, :-1]\n\n @property\n def intercept_(self):\n return self.weights_[:, -1]\n\n def predict_proba(self, S):\n\n S_ = np.hstack((S, np.ones((len(S), 1))))\n\n return np.asarray(_calculate_outputs(self.weights_, S_))\n\n # FIXME Should we change predict for the argmax?\n def predict(self, S):\n\n return np.asarray(self.predict_proba(S))\n\n\n def fit(self, X, y, *args, **kwargs):\n\n self.__setup()\n\n X_ = np.hstack((X, np.ones((len(X), 1))))\n\n self.classes = raw_np.unique(y)\n\n k = len(self.classes)\n\n if self.reg_norm:\n if self.reg_mu is None:\n self.reg_lambda = self.reg_lambda / (k * (k + 1))\n else:\n self.reg_lambda = self.reg_lambda / (k * (k - 1))\n self.reg_mu = self.reg_mu / k\n\n target = label_binarize(y, classes=self.classes)\n\n if k == 2:\n target = np.hstack([1-target, target])\n\n n, m = X_.shape\n\n XXT = (X_.repeat(m, axis=1) * np.hstack([X_]*m)).reshape((n, m, m))\n\n logging.debug(self.method)\n\n self.weights_0_ = self._get_initial_weights(self.initializer)\n\n if k <= 36:\n weights = _newton_update(self.weights_0_, X_, XXT, target, k,\n self.method, reg_lambda=self.reg_lambda,\n reg_mu=self.reg_mu, ref_row=self.ref_row,\n initializer=self.initializer,\n reg_format=self.reg_format)\n else:\n res = scipy.optimize.fmin_l_bfgs_b(func=_objective, fprime=_gradient,\n x0=self.weights_0_,\n args=(X_, XXT, target, k,\n self.method,\n self.reg_lambda,\n self.reg_mu, self.ref_row,\n self.initializer,\n self.reg_format),\n maxls=128,\n factr=1.0)\n weights = res[0]\n\n self.weights_ = _get_weights(weights, k, self.ref_row, self.method)\n\n return self\n\n def _get_initial_weights(self, ref_row, initializer='identity'):\n ''' Returns an array containing only the weights of the full weight\n matrix.\n\n '''\n\n if initializer not in ['identity', None]:\n raise ValueError\n\n k = len(self.classes)\n\n if self.weights_0_ is None:\n if initializer == 'identity':\n weights_0 = _get_identity_weights(k, ref_row, self.method)\n else:\n if self.method == 'Full':\n weights_0 = np.zeros(k * (k + 1))\n elif self.method == 'Diag':\n weights_0 = np.zeros(2*k)\n elif self.method == 'FixDiag':\n weights_0 = np.zeros(1)\n else:\n weights_0 = self.weights_0_\n\n return weights_0\n\n\ndef _objective(params, *args):\n (X, _, y, k, method, reg_lambda, reg_mu, ref_row, _, reg_format) = args\n weights = _get_weights(params, k, ref_row, method)\n outputs = clip_jax(_calculate_outputs(weights, X))\n loss = np.mean(-np.log(np.sum(y * outputs, axis=1)))\n\n if reg_mu is None:\n if reg_format == 'identity':\n reg = np.hstack([np.eye(k), np.zeros((k, 1))])\n else:\n reg = np.zeros((k, k+1))\n loss = loss + reg_lambda * np.sum((weights - reg)**2)\n else:\n weights_hat = weights - np.hstack([weights[:, :-1] * np.eye(k),\n np.zeros((k, 1))])\n loss = loss + reg_lambda * np.sum(weights_hat[:, :-1] ** 2) + \\\n reg_mu * np.sum(weights_hat[:, -1] ** 2)\n\n return loss\n\n\n_gradient = jax.grad(_objective, argnums=0)\n\n\n_hessian = jax.hessian(_objective, argnums=0)\n\n\ndef _get_weights(params, k, ref_row, method):\n ''' Reshapes the given params (weights) into the full matrix including 0\n '''\n\n if method in ['Full', None]:\n raw_weights = params.reshape(-1, k+1)\n # weights = np.zeros([k, k+1])\n # weights[:-1, :] = params.reshape(-1, k + 1)\n\n elif method == 'Diag':\n raw_weights = np.hstack([np.diag(params[:k]),\n params[k:].reshape(-1, 1)])\n # weights[:, :-1][np.diag_indices(k)] = params[:]\n\n elif method == 'FixDiag':\n raw_weights = np.hstack([np.eye(k) * params[0], np.zeros((k, 1))])\n # weights[np.dgag_indices(k - 1)] = params[0]\n # weights[np.diag_indices(k)] = params[0]\n else:\n raise(ValueError(\"Unknown calibration method {}\".format(method)))\n\n if ref_row:\n weights = raw_weights - np.repeat(\n raw_weights[-1, :].reshape(1, -1), k, axis=0)\n else:\n weights = raw_weights\n\n return weights\n\n\ndef _get_identity_weights(n_classes, ref_row, method):\n\n raw_weights = None\n\n if (method is None) or (method == 'Full'):\n raw_weights = np.zeros((n_classes, n_classes + 1)) + \\\n np.hstack([np.eye(n_classes), np.zeros((n_classes, 1))])\n raw_weights = raw_weights.ravel()\n\n elif method == 'Diag':\n raw_weights = np.hstack([np.ones(n_classes), np.zeros(n_classes)])\n\n elif method == 'FixDiag':\n raw_weights = np.ones(1)\n\n return raw_weights.ravel()\n\n\ndef _calculate_outputs(weights, X):\n mul = np.dot(X, weights.transpose())\n return _softmax(mul)\n\n\ndef _softmax(X):\n \"\"\"Compute the softmax of matrix X in a numerically stable way.\"\"\"\n shiftx = X - np.max(X, axis=1).reshape(-1, 1)\n exps = np.exp(shiftx)\n return exps / np.sum(exps, axis=1).reshape(-1, 1)\n\n\ndef _newton_update(weights_0, X, XX_T, target, k, method_, maxiter=int(1024),\n ftol=1e-12, gtol=1e-8, reg_lambda=0.0, reg_mu=None,\n ref_row=True, initializer=None, reg_format=None):\n\n L_list = [raw_np.float(_objective(weights_0, X, XX_T, target, k, method_,\n reg_lambda, reg_mu, ref_row, initializer,\n reg_format))]\n\n weights = weights_0.copy()\n\n # TODO move this to the initialization\n if method_ is None:\n weights = np.zeros_like(weights)\n\n for i in range(0, maxiter):\n\n gradient = _gradient(weights, X, XX_T, target, k, method_, reg_lambda,\n reg_mu, ref_row, initializer, reg_format)\n\n if np.abs(gradient).sum() < gtol:\n break\n\n # FIXME hessian is ocasionally NaN\n hessian = _hessian(weights, X, XX_T, target, k, method_, reg_lambda,\n reg_mu, ref_row, initializer, reg_format)\n\n if method_ == 'FixDiag':\n updates = gradient / hessian\n else:\n try:\n inverse = scipy.linalg.pinv2(hessian)\n updates = np.matmul(inverse, gradient)\n except (raw_np.linalg.LinAlgError, ValueError) as err:\n logging.error(err)\n updates = gradient\n\n for step_size in np.hstack((np.linspace(1, 0.1, 10),\n np.logspace(-2, -32, 31))):\n\n tmp_w = weights - (updates * step_size).ravel()\n\n if np.any(np.isnan(tmp_w)):\n logging.debug(\"{}: There are NaNs in tmp_w\".format(method_))\n\n L = _objective(tmp_w, X, XX_T, target, k, method_, reg_lambda,\n reg_mu, ref_row, initializer, reg_format)\n\n if (L - L_list[-1]) < 0:\n break\n\n L_list.append(raw_np.float(L))\n\n logging.debug(\"{}: after {} iterations log-loss = {:.7e}, sum_grad = {:.7e}\".format(\n method_, i, L, np.abs(gradient).sum()))\n\n if np.isnan(L):\n logging.error(\"{}: log-loss is NaN\".format(method_))\n break\n\n if i >= 5:\n if (raw_np.float(raw_np.min(raw_np.diff(L_list[-5:]))) > -ftol) & \\\n (raw_np.float(raw_np.sum(raw_np.diff(L_list[-5:])) > 0) == 0):\n weights = tmp_w.copy()\n logging.debug('{}: Terminate as there is not enough changes on loss.'.format(\n method_))\n break\n\n if (L_list[-1] - L_list[-2]) > 0:\n logging.debug('{}: Terminate as the loss increased {}.'.format(\n method_, np.diff(L_list[-2:])))\n break\n else:\n weights = tmp_w.copy()\n\n L = _objective(weights, X, XX_T, target, k, method_,\n reg_lambda, reg_mu, ref_row, initializer, reg_format)\n\n logging.debug(\"{}: after {} iterations final log-loss = {:.7e}, sum_grad = {:.7e}\".format(\n method_, i, L, np.abs(gradient).sum()))\n\n return weights\n"
]
| [
[
"scipy.optimize.fmin_l_bfgs_b",
"numpy.float",
"numpy.diff",
"scipy.linalg.pinv2",
"numpy.unique",
"sklearn.preprocessing.label_binarize"
]
]
|
erdogant/treeplot | [
"2d9ad3096cd7bec99df1b3df165488310bbb1851"
]
| [
"tests/test_treeplot.py"
]
| [
"import treeplot\nfrom sklearn.ensemble import RandomForestClassifier\nfrom xgboost import XGBClassifier\n\n\ndef test_randomforest():\n X,y = treeplot.import_example()\n model = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0).fit(X, y)\n # TEST 1: check output is unchanged\n ax = treeplot.randomforest(model, export='png')\n assert 'matplotlib' not in str(ax)\n\n\ndef test_xgboost():\n X,y = treeplot.import_example()\n model = XGBClassifier(n_estimators=100, max_depth=2, random_state=0).fit(X, y)\n # TEST 1: check output is unchanged\n ax = treeplot.xgboost(model)\n assert 'matplotlib' not in str(ax)\n"
]
| [
[
"sklearn.ensemble.RandomForestClassifier"
]
]
|
davidho95/tfmonopoles | [
"e91e95e786d2812763b71266c0494637f2db67f0"
]
| [
"monopoleInstanton/expandInstanton.py"
]
| [
"\"\"\"\nTakes an instanton configuration as input and returns the same solution on a\nfield twice the size in every direction. There is no change to the lattice\nspacing; the lattice is simply extended in every direction. The number of flux\nquanta in the input solution must be specified as an argument.\n\"\"\"\n\nimport tensorflow as tf\nfrom tfmonopoles import FieldTools\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Expand an instanton solution\")\nparser.add_argument(\"--outputPath\", \"-o\", default=\"\", type=str)\nparser.add_argument(\"--inputPath\", \"-i\", default=\"\", type=str)\nparser.add_argument(\"--fluxQuanta\", \"-B\", default=0, type=int)\n\nargs = parser.parse_args()\n\n# Load data from input path\ninputPath = args.inputPath\ninputR = tf.constant(np.load(inputPath + \"/R.npy\", allow_pickle=True))\ninputY = tf.constant(np.load(inputPath + \"/Y.npy\", allow_pickle=True))\ninputZ = tf.constant(np.load(inputPath + \"/Z.npy\", allow_pickle=True))\ninputScalarField = np.load(inputPath + \"/scalarField.npy\", allow_pickle=True)\ninputGaugeField = np.load(inputPath + \"/gaugeField.npy\", allow_pickle=True)\ninputParams = np.load(inputPath + \"/params.npy\", allow_pickle=True).item()\ninputLatShape = inputParams[\"latShape\"]\n\nyzPaddings = [[0,0], [1,1], [1,1], [0,0], [0,0]]\n\noutputScalarField = inputScalarField\noutputGaugeField = inputGaugeField\n\nB = 10\nsmallMagneticField = FieldTools.constantMagneticField(\n inputR, inputY, inputZ, 0, -B\n )\n# Subtract the original field so the padding works; this will be added back\noutputGaugeField = FieldTools.linearSuperpose(\n outputGaugeField, smallMagneticField\n )\n\nfor ii in range(inputLatShape[1] // 2):\n outputScalarField = tf.pad(outputScalarField, yzPaddings, \"symmetric\")\n outputGaugeFieldR = tf.pad(\n outputGaugeField[:,:,:,0,:,:], yzPaddings, \"symmetric\"\n )\n outputGaugeFieldY = tf.pad(\n outputGaugeField[:,:,:,1,:,:], yzPaddings, \"symmetric\"\n )\n outputGaugeFieldZ = tf.pad(\n outputGaugeField[:,:,:,2,:,:], yzPaddings, \"symmetric\"\n )\n outputGaugeField = tf.stack(\n [outputGaugeFieldR, outputGaugeFieldY, outputGaugeFieldZ], -3\n )\n\nrPaddings = [[0,1], [0,0], [0,0], [0,0], [0,0]]\n\nfor ii in range(inputLatShape[0]):\n outputScalarField = tf.pad(outputScalarField, rPaddings, \"symmetric\")\n outputGaugeFieldR = tf.pad(outputGaugeField[:,:,:,0,:,:], rPaddings, \"symmetric\")\n outputGaugeFieldY = tf.pad(outputGaugeField[:,:,:,1,:,:], rPaddings, \"symmetric\")\n outputGaugeFieldZ = tf.pad(outputGaugeField[:,:,:,2,:,:], rPaddings, \"symmetric\")\n outputGaugeField = tf.stack([outputGaugeFieldR, outputGaugeFieldY, outputGaugeFieldZ], -3)\n\noutputLatShape = inputLatShape + inputLatShape\n\n# Set up the lattice\nr = tf.cast(\n tf.linspace(\n -1/2, tf.cast(outputLatShape[0], tf.float32) - 1/2, outputLatShape[0]\n ), tf.float64\n )\ny = tf.cast(\n tf.linspace(\n -(outputLatShape[1]-1)/2, (outputLatShape[1]-1)/2, outputLatShape[1]\n ), tf.float64\n )\nz = tf.cast(\n tf.linspace(\n -(outputLatShape[2]-1)/2, (outputLatShape[2]-1)/2, outputLatShape[2]\n ), tf.float64\n )\n\nR,Y,Z = tf.meshgrid(r, y, z, indexing=\"ij\")\n\nprint(tf.shape(R))\nprint(tf.shape(outputGaugeField))\n\noutputMagneticField = FieldTools.constantMagneticField(R, Y, Z, 0, 4*B)\noutputGaugeField = FieldTools.linearSuperpose(\n outputGaugeField, outputMagneticField\n )\n\noutputParams = inputParams\noutputParams[\"latShape\"] = outputLatShape\n\nprint(\"Instanton expanded. Orginal size:\")\nprint(tf.shape(inputR).numpy())\nprint(\"New size:\")\nprint(tf.shape(R).numpy())\n\n# Save field and output parameters\noutputPath = args.outputPath\nif outputPath != \"\":\n np.save(outputPath + \"/R\", R.numpy())\n np.save(outputPath + \"/Y\", Y.numpy())\n np.save(outputPath + \"/Z\", Z.numpy())\n np.save(outputPath + \"/scalarField\", outputScalarField.numpy())\n np.save(outputPath + \"/gaugeField\", outputGaugeField.numpy())\n np.save(outputPath + \"/params\", outputParams)"
]
| [
[
"tensorflow.shape",
"tensorflow.linspace",
"numpy.load",
"tensorflow.meshgrid",
"numpy.save",
"tensorflow.stack",
"tensorflow.pad",
"tensorflow.cast"
]
]
|
PeizeSun/caffe2_tutorials | [
"28523ff1ff33f18eaf8b04cc4e0f308826e1861a"
]
| [
"py_gen/create_your_own_dataset.py"
]
| [
"#########################################################\n#\n# DO NOT EDIT THIS FILE. IT IS GENERATED AUTOMATICALLY. #\n# PLEASE LOOK INTO THE README FOR MORE INFORMATION. #\n#\n#########################################################\n\n\n# coding: utf-8\n\n# # How do I create my own dataset?\n# \n# So Caffe2 uses a binary DB format to store the data that we would like to train models on. A Caffe2 DB is a glorified name of a key-value storage where the keys are usually randomized so that the batches are approximately i.i.d. The values are the real stuff here: they contain the serialized strings of the specific data formats that you would like your training algorithm to ingest. So, the stored DB would look (semantically) like this:\n# \n# key1 value1\n# key2 value2\n# key3 value3\n# ...\n# \n# To a DB, it treats the keys and values as strings, but you probably want structured contents. One way to do this is to use a TensorProtos protocol buffer: it essentially wraps Tensors, aka multi-dimensional arrays, together with the tensor data type and shape information. Then, one can use the TensorProtosDBInput operator to load the data into an SGD training fashion.\n# \n# Here, we will show you one example of how to create your own dataset. To this end, we will use the UCI Iris dataset - which was a very popular classical dataset for classifying Iris flowers. It contains 4 real-valued features representing the dimensions of the flower, and classifies things into 3 types of Iris flowers. The dataset can be downloaded [here](https://archive.ics.uci.edu/ml/datasets/Iris).\n\n# In[1]:\n\n\n# First let's import some necessities\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport urllib2 # for downloading the dataset from the web.\nimport numpy as np\nfrom matplotlib import pyplot\nfrom StringIO import StringIO\nfrom caffe2.python import core, utils, workspace\nfrom caffe2.proto import caffe2_pb2\n\n\n# In[2]:\n\n\nf = urllib2.urlopen('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data')\nraw_data = f.read()\nprint('Raw data looks like this:')\nprint(raw_data[:100] + '...')\n\n\n# In[3]:\n\n\n# load the features to a feature matrix.\nfeatures = np.loadtxt(StringIO(raw_data), dtype=np.float32, delimiter=',', usecols=(0, 1, 2, 3))\n# load the labels to a feature matrix\nlabel_converter = lambda s : {'Iris-setosa':0, 'Iris-versicolor':1, 'Iris-virginica':2}[s]\nlabels = np.loadtxt(StringIO(raw_data), dtype=np.int, delimiter=',', usecols=(4,), converters={4: label_converter})\n\n\n# Before we do training, one thing that is often beneficial is to separate the dataset into training and testing. In this case, let's randomly shuffle the data, use the first 100 data points to do training, and the remaining 50 to do testing. For more sophisticated approaches, you can use e.g. cross validation to separate your dataset into multiple training and testing splits. Read more about cross validation [here](http://scikit-learn.org/stable/modules/cross_validation.html).\n\n# In[4]:\n\n\nrandom_index = np.random.permutation(150)\nfeatures = features[random_index]\nlabels = labels[random_index]\n\ntrain_features = features[:100]\ntrain_labels = labels[:100]\ntest_features = features[100:]\ntest_labels = labels[100:]\n\n\n# In[5]:\n\n\n# Let's plot the first two features together with the label.\n# Remember, while we are plotting the testing feature distribution\n# here too, you might not be supposed to do so in real research,\n# because one should not peek into the testing data.\nlegend = ['rx', 'b+', 'go']\npyplot.title(\"Training data distribution, feature 0 and 1\")\nfor i in range(3):\n pyplot.plot(train_features[train_labels==i, 0], train_features[train_labels==i, 1], legend[i])\npyplot.figure()\npyplot.title(\"Testing data distribution, feature 0 and 1\")\nfor i in range(3):\n pyplot.plot(test_features[test_labels==i, 0], test_features[test_labels==i, 1], legend[i])\n\n\n# Now, as promised, let's put things into a Caffe2 DB. In this DB, what would happen is that we will use \"train_xxx\" as the key, and use a TensorProtos object to store two tensors for each data point: one as the feature and one as the label. We will use Caffe2's Python DB interface to do so.\n\n# In[6]:\n\n\n# First, let's see how one can construct a TensorProtos protocol buffer from numpy arrays.\nfeature_and_label = caffe2_pb2.TensorProtos()\nfeature_and_label.protos.extend([\n utils.NumpyArrayToCaffe2Tensor(features[0]),\n utils.NumpyArrayToCaffe2Tensor(labels[0])])\nprint('This is what the tensor proto looks like for a feature and its label:')\nprint(str(feature_and_label))\nprint('This is the compact string that gets written into the db:')\nprint(feature_and_label.SerializeToString())\n\n\n# In[7]:\n\n\n# Now, actually write the db.\n\ndef write_db(db_type, db_name, features, labels):\n db = core.C.create_db(db_type, db_name, core.C.Mode.write)\n transaction = db.new_transaction()\n for i in range(features.shape[0]):\n feature_and_label = caffe2_pb2.TensorProtos()\n feature_and_label.protos.extend([\n utils.NumpyArrayToCaffe2Tensor(features[i]),\n utils.NumpyArrayToCaffe2Tensor(labels[i])])\n transaction.put(\n 'train_%03d'.format(i),\n feature_and_label.SerializeToString())\n # Close the transaction, and then close the db.\n del transaction\n del db\n\nwrite_db(\"minidb\", \"iris_train.minidb\", train_features, train_labels)\nwrite_db(\"minidb\", \"iris_test.minidb\", test_features, test_labels)\n\n\n# Now, let's create a very simple network that only consists of one single TensorProtosDBInput operator, to showcase how we load data from the DB that we created. For training, you might want to do something more complex: creating a network, train it, get the model, and run the prediction service. To this end you can look at the MNIST tutorial for details.\n\n# In[8]:\n\n\nnet_proto = core.Net(\"example_reader\")\ndbreader = net_proto.CreateDB([], \"dbreader\", db=\"iris_train.minidb\", db_type=\"minidb\")\nnet_proto.TensorProtosDBInput([dbreader], [\"X\", \"Y\"], batch_size=16)\n\nprint(\"The net looks like this:\")\nprint(str(net_proto.Proto()))\n\n\n# In[9]:\n\n\nworkspace.CreateNet(net_proto)\n\n\n# In[10]:\n\n\n# Let's run it to get batches of features.\nworkspace.RunNet(net_proto.Proto().name)\nprint(\"The first batch of feature is:\")\nprint(workspace.FetchBlob(\"X\"))\nprint(\"The first batch of label is:\")\nprint(workspace.FetchBlob(\"Y\"))\n\n# Let's run again.\nworkspace.RunNet(net_proto.Proto().name)\nprint(\"The second batch of feature is:\")\nprint(workspace.FetchBlob(\"X\"))\nprint(\"The second batch of label is:\")\nprint(workspace.FetchBlob(\"Y\"))\n\n\n"
]
| [
[
"matplotlib.pyplot.plot",
"numpy.random.permutation",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure"
]
]
|
MichalKononenko/T1MachineLearning | [
"5eb91aa465825b067d3e5a7c69115fc9b7545e76"
]
| [
"distributions/distributions.py"
]
| [
"\"\"\"\nContains blueprints for distributions that inherit from the abstract\ndistribution\n\"\"\"\nfrom abstract_distribution import AbstractDistribution\nfrom scipy import stats\nimport numpy as np\nimport abc\n__author__ = 'Michal Kononenko'\n\n\nclass NormalDistribution(AbstractDistribution):\n\n def __init__(self, mean=0, standard_deviation=1):\n self._mean = mean\n self._standard_deviation = standard_deviation\n self.distribution = stats.norm(loc=mean, scale=standard_deviation)\n\n def sample(self, number_of_samples=1):\n return self.distribution.rvs(number_of_samples)\n\n @property\n def mean(self):\n return self._mean\n\n\nclass UniformDistribution(AbstractDistribution):\n\n def __init__(self, lower_bound=0, upper_bound=1):\n loc = lower_bound\n scale = upper_bound - loc\n self.distribution = stats.uniform(loc=loc, scale=scale)\n\n def sample(self, number_of_samples=1):\n return self.distribution.rvs(number_of_samples)\n\n @property\n def mean(self):\n return self.distribution.mean()\n\n\nclass AbstractDiscreteDistribution(AbstractDistribution):\n\n @abc.abstractproperty\n def independent_variable(self):\n raise NotImplementedError\n\n @abc.abstractproperty\n def dependent_variable(self):\n raise NotImplementedError\n\n @property\n def mean(self):\n return np.mean(\n self.independent_variable * self.dependent_variable\n )\n"
]
| [
[
"scipy.stats.uniform",
"scipy.stats.norm",
"numpy.mean"
]
]
|
aditi184/ML-Assignment2 | [
"44a8472f6ab4d1aaedd06c8aaaaf3b2f1c1ca569"
]
| [
"All_Notebooks_scripts/VGGNet.py"
]
| [
"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.autograd import Variable\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nclass QDataset(Dataset):\n def __init__(self, x, y):\n # self.x=np.load(type+\"_data.npy\", allow_pickle=True)\n # self.y=np.load(type+'_label.npy',allow_pickle=True)\n self.x = (torch.from_numpy(x)/255).float()\n self.y = torch.from_numpy(y).float()\n\n def __getitem__(self, index):\n return self.x[index], self.y[index]\n\n def __len__(self):\n return self.x.shape[0]\n\n\nclass VGGNet(nn.Module):\n def __init__(self):\n super(VGGNet, self).__init__()\n self.cnet=nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True), # section 3.3\n nn.MaxPool2d(kernel_size=2, stride=2), # (b x 96 x 27 x 27)\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True), # section 3.3\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),\n nn.BatchNorm2d(256), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),\n nn.BatchNorm2d(256), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),\n nn.BatchNorm2d(256), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True), # section 3.3\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1),\n nn.BatchNorm2d(512), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),\n nn.BatchNorm2d(512), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),\n nn.BatchNorm2d(512), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),\n nn.BatchNorm2d(512), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),\n nn.BatchNorm2d(512), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),\n nn.BatchNorm2d(512), # (b x 64 x 15 x 15)\n nn.ReLU(inplace=True),# section 3.3\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n self.nn=nn.Sequential(\n nn.Linear(2048,4096),\n nn.BatchNorm1d(4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.75),\n nn.Linear(4096,4096),\n nn.BatchNorm1d(4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.75),\n nn.Linear(4096,200),\n )\n\n def forward(self, x):\n x=self.cnet(x)\n x=torch.flatten(x,1)\n x=self.nn(x)\n return x\n\n\ndef train(log_interval, model, device, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = nn.CrossEntropyLoss()\n target=torch.tensor(target.clone().detach(),dtype=torch.long, device=device)\n lo=loss(output, target)\n lo.backward()\n optimizer.step()\n '''if batch_idx % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), lo.item()))'''\n\n\ndef test(model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n target=torch.tensor(target.clone().detach(),dtype=torch.long, device=device)\n loss = nn.CrossEntropyLoss()\n lo=loss(output, target)\n test_loss +=lo.item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n data, target = data.to(\"cpu\"), target.to(\"cpu\")\n\n test_loss /= len(test_loader.dataset)\n\n '''print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))'''\n\n return test_loss,correct / len(test_loader.dataset)\n\n\ndef main():\n batch_size=128\n test_batch_size=1000\n epochs=7\n lr=0.1\n gamma=0.987\n no_cuda=False\n seed=1\n log_interval=100\n save_model=False\n\n use_cuda = not no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n x1=np.load('/home/cse/btech/cs1180349/scratch/train_x.npy')\n y1=np.load('/home/cse/btech/cs1180349/scratch/train_y.npy')\n train_loader = torch.utils.data.DataLoader(QDataset(x1, y1), batch_size=batch_size, shuffle=True, **kwargs)\n x2=np.load('/home/cse/btech/cs1180349/scratch/test_x.npy')\n y2=np.load('/home/cse/btech/cs1180349/scratch/test_y.npy')\n test_loader = torch.utils.data.DataLoader(QDataset(x2, y2), batch_size=test_batch_size, shuffle=True, **kwargs)\n\n model = VGGNet().to(device)\n if use_cuda:\n model=torch.nn.DataParallel(model)\n torch.backends.cudnn.benchmark=True\n optimizer = optim.SGD(model.parameters(), lr=lr)\n l1=[0]*epochs\n l2=[0]*epochs\n l3=[0]*epochs\n l4=[0]*epochs\n scheduler = StepLR(optimizer, step_size=1, gamma=gamma)\n for epoch in range(1, epochs + 1):\n print(epoch)\n train(log_interval, model, device, train_loader, optimizer, epoch)\n l1[epoch-1],l2[epoch-1]=test(model, device, test_loader)\n l3[epoch-1],l4[epoch-1]=test(model, device, train_loader)\n scheduler.step()\n plot1 = plt.figure(1)\n plt.title(\"Train Accuracy vs epochs\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Train Accuracy\")\n plt.plot(l4)\n plt.savefig(\"/home/cse/btech/cs1180349/scratch/train_acc_VGG.png\")\n plot1 = plt.figure(2)\n plt.title(\"Train Loss vs epochs\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Train Loss\")\n plt.plot(l3)\n plt.savefig(\"/home/cse/btech/cs1180349/scratch/train_loss_VGG.png\")\n plot1 = plt.figure(3)\n plt.title(\"Test Accuracy vs epochs\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Test Accuracy\")\n plt.plot(l2)\n plt.savefig(\"/home/cse/btech/cs1180349/scratch/test_acc_VGG.png\")\n plot1 = plt.figure(4)\n plt.title(\"Test Loss vs epochs\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Test Loss\")\n plt.plot(l1)\n plt.savefig(\"/home/cse/btech/cs1180349/scratch/test_loss_VGG.png\")\n if save_model:\n torch.save(model.state_dict(), \"mnist_cnn.pt\")\n\n return model\n\nif __name__=='__main__':\n a=main()\n"
]
| [
[
"torch.nn.Linear",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.BatchNorm2d",
"numpy.load",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel",
"torch.nn.MaxPool2d",
"matplotlib.pyplot.savefig",
"torch.manual_seed",
"torch.device",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Dropout",
"matplotlib.pyplot.xlabel",
"torch.no_grad",
"matplotlib.pyplot.plot",
"torch.from_numpy",
"torch.nn.BatchNorm1d",
"matplotlib.pyplot.ylabel",
"torch.flatten"
]
]
|
Pandinosaurus/cupy | [
"c98064928c8242d0c6a07e2c714e6c811f684a4e"
]
| [
"tests/cupyx_tests/scipy_tests/ndimage_tests/test_measurements.py"
]
| [
"import warnings\n\nimport numpy\nimport pytest\n\nimport cupy\nfrom cupy.cuda import runtime\nfrom cupy import testing\nfrom cupy import _util\nfrom cupy._core import _accelerator\nimport cupyx.scipy.ndimage # NOQA\n\ntry:\n import scipy.ndimage # NOQA\nexcept ImportError:\n pass\n\n\ndef _generate_binary_structure(rank, connectivity):\n if connectivity < 1:\n connectivity = 1\n if rank < 1:\n return numpy.array(True, dtype=bool)\n output = numpy.fabs(numpy.indices([3] * rank) - 1)\n output = numpy.add.reduce(output, 0)\n return output <= connectivity\n\n\[email protected](*testing.product({\n 'ndim': [1, 2, 3, 4],\n 'size': [50, 100],\n 'density': [0.2, 0.3, 0.4],\n 'connectivity': [None, 2, 3],\n 'x_dtype': [bool, numpy.int8, numpy.int32, numpy.int64,\n numpy.float32, numpy.float64],\n 'output': [None, numpy.int32, numpy.int64],\n 'o_type': [None, 'ndarray']\n}))\[email protected]\[email protected]_requires('scipy')\nclass TestLabel:\n\n @testing.numpy_cupy_array_equal(scipy_name='scp')\n def test_label(self, xp, scp):\n size = int(pow(self.size, 1 / self.ndim))\n x_shape = range(size, size + self.ndim)\n x = xp.zeros(x_shape, dtype=self.x_dtype)\n x[testing.shaped_random(x_shape, xp) < self.density] = 1\n if self.connectivity is None:\n structure = None\n else:\n structure = _generate_binary_structure(self.ndim,\n self.connectivity)\n if self.o_type == 'ndarray' and self.output is not None:\n output = xp.empty(x_shape, dtype=self.output)\n num_features = scp.ndimage.label(x, structure=structure,\n output=output)\n return output\n labels, num_features = scp.ndimage.label(x, structure=structure,\n output=self.output)\n return labels\n\n\[email protected]\[email protected]_requires('scipy')\nclass TestLabelSpecialCases:\n\n @testing.numpy_cupy_array_equal(scipy_name='scp')\n def test_label_empty(self, xp, scp):\n x = xp.empty(0)\n labels, num_features = scp.ndimage.label(x)\n return labels\n\n @testing.numpy_cupy_array_equal(scipy_name='scp')\n def test_label_0d_zero(self, xp, scp):\n x = xp.zeros([])\n labels, num_features = scp.ndimage.label(x)\n return labels\n\n @testing.numpy_cupy_array_equal(scipy_name='scp')\n def test_label_0d_one(self, xp, scp):\n x = xp.ones([])\n labels, num_features = scp.ndimage.label(x)\n return labels\n\n @testing.numpy_cupy_array_equal(scipy_name='scp')\n def test_label_swirl(self, xp, scp):\n x = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1],\n [1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1],\n [1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\n x = xp.array(x)\n labels, num_features = scp.ndimage.label(x)\n return labels\n\n\[email protected]\[email protected](*testing.product({\n 'op': ['sum', 'mean', 'variance', 'standard_deviation', 'center_of_mass'],\n}))\[email protected]_requires('scipy')\nclass TestStats:\n\n def _make_image(self, shape, xp, dtype):\n if dtype == xp.bool_:\n return testing.shaped_random(shape, xp, dtype=xp.bool_)\n else:\n return testing.shaped_arange(shape, xp, dtype=dtype)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(scipy_name='scp')\n def test_single_dim(self, xp, scp, dtype):\n image = self._make_image((100,), xp, dtype)\n labels = testing.shaped_random((100,), xp, dtype=xp.int32, scale=4)\n index = xp.array([1, 2, 3])\n op = getattr(scp.ndimage, self.op)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', _util.PerformanceWarning)\n result = op(image, labels, index)\n if self.op == 'center_of_mass':\n assert isinstance(result, list)\n # assert isinstance(result[0], tuple)\n assert len(result[0]) == image.ndim\n result = xp.asarray(result)\n return result\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(scipy_name='scp')\n def test_multi_dim(self, xp, scp, dtype):\n image = self._make_image((8, 8, 8), xp, dtype)\n labels = testing.shaped_random((8, 8, 8), xp, dtype=xp.int32, scale=4)\n index = xp.array([1, 2, 3])\n op = getattr(scp.ndimage, self.op)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', _util.PerformanceWarning)\n result = op(image, labels, index)\n if self.op == 'center_of_mass':\n assert isinstance(result, list)\n # assert isinstance(result[0], tuple)\n assert len(result[0]) == image.ndim\n result = xp.asarray(result)\n return result\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(scipy_name='scp')\n def test_broadcast_labels(self, xp, scp, dtype):\n # 1d label will be broadcast to 2d\n image = self._make_image((16, 6), xp, dtype)\n labels = xp.asarray([1, 0, 2, 2, 2, 0], dtype=xp.int32)\n op = getattr(scp.ndimage, self.op)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', _util.PerformanceWarning)\n result = op(image, labels)\n return result\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(scipy_name='scp')\n def test_broadcast_labels2(self, xp, scp, dtype):\n # 1d label will be broadcast to 2d\n image = self._make_image((16, 6), xp, dtype)\n labels = xp.asarray([1, 0, 2, 2, 2, 0], dtype=xp.int32)\n index = 2\n op = getattr(scp.ndimage, self.op)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', _util.PerformanceWarning)\n result = op(image, labels, index)\n return result\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(scipy_name='scp')\n def test_zero_dim(self, xp, scp, dtype):\n image = self._make_image((), xp, dtype)\n labels = testing.shaped_random((), xp, dtype=xp.int32, scale=4)\n index = xp.array([1, 2, 3])\n op = getattr(scp.ndimage, self.op)\n if self.op == 'center_of_mass':\n # SciPy doesn't handle 0-dimensional array input for center_of_mass\n with pytest.raises(IndexError):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', _util.PerformanceWarning)\n op(image, labels, index)\n return xp.array([])\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', _util.PerformanceWarning)\n result = op(image, labels, index)\n return result\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(scipy_name='scp')\n def test_only_input(self, xp, scp, dtype):\n image = self._make_image((100,), xp, dtype)\n op = getattr(scp.ndimage, self.op)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', _util.PerformanceWarning)\n result = op(image)\n return result\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(scipy_name='scp')\n def test_no_index(self, xp, scp, dtype):\n image = self._make_image((100,), xp, dtype)\n labels = testing.shaped_random((100,), xp, dtype=xp.int32, scale=4)\n op = getattr(scp.ndimage, self.op)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', _util.PerformanceWarning)\n result = op(image, labels)\n return result\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(scipy_name='scp')\n def test_scalar_index(self, xp, scp, dtype):\n image = self._make_image((100,), xp, dtype)\n labels = testing.shaped_random((100,), xp, dtype=xp.int32, scale=4)\n op = getattr(scp.ndimage, self.op)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', _util.PerformanceWarning)\n result = op(image, labels, 1)\n return result\n\n @testing.for_complex_dtypes()\n def test_invalid_image_dtype(self, dtype):\n image = self._make_image((100,), cupy, dtype)\n labels = testing.shaped_random((100,), cupy, dtype=cupy.int32, scale=4)\n index = cupy.array([1, 2, 3])\n op = getattr(cupyx.scipy.ndimage, self.op)\n with pytest.raises(TypeError):\n op(image, labels, index)\n\n def test_invalid_image_type(self):\n image = list(range(100))\n labels = testing.shaped_random((100,), cupy, dtype=cupy.int32, scale=4)\n index = cupy.array([1, 2, 3])\n op = getattr(cupyx.scipy.ndimage, self.op)\n with pytest.raises(TypeError):\n op(image, labels, index)\n\n def test_invalid_labels_shape(self):\n image = self._make_image((100,), cupy, cupy.int32)\n labels = testing.shaped_random((50,), cupy, dtype=cupy.int32, scale=4)\n index = cupy.array([1, 2, 3])\n op = getattr(cupyx.scipy.ndimage, self.op)\n with pytest.raises(ValueError):\n op(image, labels, index)\n\n def test_invalid_labels_type(self):\n image = self._make_image((100,), cupy, cupy.int32)\n labels = numpy.random.randint(0, 4, dtype=numpy.int32, size=100)\n index = cupy.array([1, 2, 3])\n op = getattr(cupyx.scipy.ndimage, self.op)\n with pytest.raises(TypeError):\n op(image, labels, index)\n\n def test_invalid_index_type(self):\n image = self._make_image((100,), cupy, cupy.int32)\n labels = testing.shaped_random((100,), cupy, dtype=cupy.int32, scale=4)\n index = [1, 2, 3]\n op = getattr(cupyx.scipy.ndimage, self.op)\n with pytest.raises(TypeError):\n op(image, labels, index)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_array_equal(scipy_name='scp')\n def test_no_values(self, xp, scp, dtype):\n image = xp.array([], dtype=dtype)\n labels = xp.array([])\n index = xp.array([])\n op = getattr(scp.ndimage, self.op)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', _util.PerformanceWarning)\n result = op(image, labels, index)\n return result\n\n\[email protected]\[email protected](*testing.product({\n 'op': ['maximum', 'median', 'minimum', 'maximum_position',\n 'minimum_position', 'extrema'],\n 'labels': [None, 5, 50],\n 'index': [None, 1, 'all', 'subset'],\n 'shape': [(512,), (32, 64)],\n 'enable_cub': [True, False],\n}))\[email protected]_requires('scipy')\nclass TestMeasurementsSelect:\n\n @pytest.fixture(autouse=True)\n def with_accelerators(self):\n old_accelerators = _accelerator.get_routine_accelerators()\n if self.enable_cub:\n _accelerator.set_routine_accelerators(['cub'])\n else:\n _accelerator.set_routine_accelerators([])\n yield\n _accelerator.set_routine_accelerators(old_accelerators)\n\n def _hip_skip_invalid_condition(self):\n if (runtime.is_hip\n and self.op == 'extrema'\n and (self.index is None\n or (self.index == 1 and self.labels in [None, 5])\n or (self.index in ['all', 'subset']\n and self.labels is None))):\n pytest.xfail('ROCm/HIP may have a bug')\n\n # no_bool=True due to https://github.com/scipy/scipy/issues/12836\n @testing.for_all_dtypes(no_complex=True, no_bool=True)\n @testing.numpy_cupy_allclose(scipy_name='scp')\n def test_measurements_select(self, xp, scp, dtype):\n self._hip_skip_invalid_condition()\n\n shape = self.shape\n rstate = numpy.random.RandomState(0)\n # scale must be small enough to avoid potential integer overflow due to\n # https://github.com/scipy/scipy/issues/12836\n x = testing.shaped_random(shape, xp=xp, dtype=dtype, scale=32)\n non_unique = xp.unique(x).size < x.size\n\n if (self.op in ['minimum_position', 'maximum_position'] and\n non_unique and self.index is not None):\n # skip cases with non-unique min or max position\n return xp.array([])\n\n if self.labels is None:\n labels = self.labels\n else:\n labels = rstate.choice(self.labels, x.size).reshape(shape) + 1\n labels = xp.asarray(labels)\n if self.index is None or isinstance(self.index, int):\n index = self.index\n elif self.index == 'all':\n if self.labels is not None:\n index = xp.arange(1, self.labels + 1, dtype=cupy.intp)\n else:\n index = None\n elif self.index == 'subset':\n if self.labels is not None:\n index = xp.arange(1, self.labels + 1, dtype=cupy.intp)[1::2]\n else:\n index = None\n func = getattr(scp.ndimage, self.op)\n result = func(x, labels, index)\n if self.op == 'extrema':\n if non_unique and self.index is not None:\n # omit comparison of minimum_position, maximum_position\n result = [xp.asarray(r) for r in result[:2]]\n else:\n result = [xp.asarray(r) for r in result]\n else:\n if isinstance(result, list):\n # convert list of coordinate tuples to an array for comparison\n result = xp.asarray(result)\n return result\n\n\[email protected]\[email protected](*testing.product({\n 'labels': [None, 4, 6],\n 'index': [None, [0, 2], [3, 1, 0], [1]],\n 'shape': [(200,), (16, 20)],\n}))\[email protected]_requires('scipy')\nclass TestHistogram():\n\n def _make_image(self, shape, xp, dtype, scale):\n return testing.shaped_random(shape, xp, dtype=dtype, scale=scale)\n\n @testing.for_all_dtypes(no_bool=True, no_complex=True)\n @testing.numpy_cupy_allclose(scipy_name='scp')\n def test_histogram(self, xp, scp, dtype):\n nbins = 5\n minval = 0\n maxval = 10\n image = self._make_image(self.shape, xp, dtype, scale=maxval)\n labels = self.labels\n index = self.index\n if labels is not None:\n labels = testing.shaped_random(self.shape, xp, dtype=xp.int32,\n scale=self.labels)\n if index is not None:\n index = xp.array(index)\n op = getattr(scp.ndimage, 'histogram')\n if index is not None and labels is None:\n # cannot give an index array without labels\n with pytest.raises(ValueError):\n op(image, minval, maxval, nbins, labels, index)\n return xp.asarray([])\n result = op(image, minval, maxval, nbins, labels, index)\n if index is None:\n return result\n # stack 1d arrays into a single array for comparison\n return xp.stack(result)\n\n\[email protected]\[email protected](*testing.product({\n 'labels': [None, 4],\n 'index': [None, [0, 2], [3, 1, 0], [1]],\n 'shape': [(200,), (16, 20)],\n 'dtype': [numpy.float64, 'same'],\n 'default': [0, 3],\n 'pass_positions': [True, False],\n}))\[email protected]_requires('scipy')\nclass TestLabeledComprehension():\n\n def _make_image(self, shape, xp, dtype, scale):\n if dtype == xp.bool_:\n return testing.shaped_random(shape, xp, dtype=xp.bool_)\n else:\n return testing.shaped_random(shape, xp, dtype=dtype, scale=scale)\n\n @testing.for_all_dtypes(no_bool=True, no_complex=True, no_float16=True)\n @testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-4, atol=1e-4)\n def test_labeled_comprehension(self, xp, scp, dtype):\n image = self._make_image(self.shape, xp, dtype, scale=101)\n labels = self.labels\n index = self.index\n if labels is not None:\n labels = testing.shaped_random(self.shape, xp, dtype=xp.int32,\n scale=4)\n if index is not None:\n index = xp.array(index)\n\n if self.pass_positions:\n # simple function that takes a positions argument\n def func(x, pos):\n return xp.sum(x + pos > 50)\n else:\n # simple function to apply to each lable\n func = xp.sum\n\n op = getattr(scp.ndimage, 'labeled_comprehension')\n dtype == image.dtype if self.dtype == 'same' else self.dtype\n if index is not None and labels is None:\n # cannot give an index array without labels\n with pytest.raises(ValueError):\n op(image, labels, index, func, dtype, self.default,\n self.pass_positions)\n return xp.asarray([])\n return op(image, labels, index, func, dtype, self.default,\n self.pass_positions)\n"
]
| [
[
"numpy.array",
"numpy.add.reduce",
"numpy.random.RandomState",
"numpy.random.randint",
"numpy.indices"
]
]
|
shancarter/umap | [
"dbe311eb024f7cb8e0182edb121771cc5d1e6d8f"
]
| [
"umap/tests/test_umap.py"
]
| [
"\"\"\"\nTests for UMAP to ensure things are working as expected.\n\"\"\"\nfrom nose.tools import assert_less\nfrom nose.tools import assert_greater_equal\nimport os.path\nimport numpy as np\nfrom scipy.spatial import distance\nfrom scipy import sparse\nfrom scipy import stats\nfrom sklearn.utils.estimator_checks import check_estimator\nfrom sklearn.utils.testing import (assert_equal,\n assert_array_equal,\n assert_array_almost_equal,\n assert_raises,\n assert_in,\n assert_not_in,\n assert_no_warnings,\n if_matplotlib)\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.neighbors import KDTree\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import StandardScaler\nfrom scipy.stats import mode\n\nfrom tempfile import mkdtemp\nfrom functools import wraps\nfrom nose import SkipTest\n\nfrom sklearn import datasets\n\nimport umap.distances as dist\nimport umap.sparse as spdist\nfrom umap.umap_ import (\n INT32_MAX,\n INT32_MIN,\n rptree_leaf_array,\n make_nn_descent,\n UMAP)\n\nnp.random.seed(42)\nspatial_data = np.random.randn(10, 20)\nbinary_data = np.random.choice(a=[False, True],\n size=(10, 20),\n p=[0.66, 1 - 0.66])\nsparse_spatial_data = sparse.csr_matrix(spatial_data * binary_data)\nsparse_binary_data = sparse.csr_matrix(binary_data)\n\nnn_data = np.random.uniform(0, 1, size=(1000, 5))\nbinary_nn_data = np.random.choice(a=[False, True],\n size=(1000, 5),\n p=[0.66, 1 - 0.66])\nsparse_nn_data = sparse.csr_matrix(nn_data * binary_nn_data)\n\nspatial_distances = (\n 'euclidean',\n 'manhattan',\n 'chebyshev',\n 'minkowski',\n 'hamming',\n 'canberra',\n 'braycurtis',\n 'cosine',\n 'correlation'\n)\n\nbinary_distances = (\n 'jaccard',\n 'matching',\n 'dice',\n 'kulsinski',\n 'rogerstanimoto',\n 'russellrao',\n 'sokalmichener',\n 'sokalsneath',\n 'yule'\n)\n\n\ndef test_nn_descent_neighbor_accuracy():\n rng_state = np.random.randint(INT32_MIN, INT32_MAX, size=3)\n nn_descent = make_nn_descent(dist.euclidean, ())\n leaf_array = rptree_leaf_array(nn_data, 10, rng_state)\n tmp_indices, knn_dists = nn_descent(nn_data,\n 10,\n rng_state,\n leaf_array=leaf_array)\n knn_indices = tmp_indices.astype(np.int64)\n for i in range(knn_indices.shape[0]):\n order = np.argsort(knn_dists[i])\n knn_dists[i] = knn_dists[i][order]\n knn_indices[i] = knn_indices[i][order]\n\n tree = KDTree(nn_data)\n true_indices = tree.query(nn_data, 10, return_distance=False)\n\n num_correct = 0.0\n for i in range(nn_data.shape[0]):\n num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))\n\n percent_correct = num_correct / (spatial_data.shape[0] * 10)\n assert_greater_equal(percent_correct, 0.99, 'NN-descent did not get 99% '\n 'accuracy on nearest neighbors')\n\n\ndef test_sparse_nn_descent_neighbor_accuracy():\n rng_state = np.random.randint(INT32_MIN, INT32_MAX, size=3)\n nn_descent = spdist.make_sparse_nn_descent(spdist.sparse_euclidean, ())\n leaf_array = rptree_leaf_array(sparse_nn_data, 10, rng_state)\n tmp_indices, knn_dists = nn_descent(sparse_nn_data.indices,\n sparse_nn_data.indptr,\n sparse_nn_data.data,\n sparse_nn_data.shape[0],\n 10,\n rng_state,\n leaf_array=leaf_array)\n knn_indices = tmp_indices.astype(np.int64)\n for i in range(knn_indices.shape[0]):\n order = np.argsort(knn_dists[i])\n knn_dists[i] = knn_dists[i][order]\n knn_indices[i] = knn_indices[i][order]\n\n tree = KDTree(sparse_nn_data.todense())\n true_indices = tree.query(sparse_nn_data.todense(),\n 10, return_distance=False)\n\n print(sparse_nn_data.shape)\n\n num_correct = 0.0\n for i in range(nn_data.shape[0]):\n num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))\n\n percent_correct = num_correct / (spatial_data.shape[0] * 10)\n assert_greater_equal(percent_correct, 0.99, 'Sparse NN-descent did not get '\n '99% accuracy on nearest '\n 'neighbors')\n\n\ndef test_trustworthiness():\n pass\n\n\ndef test_metrics():\n for metric in spatial_distances:\n dist_matrix = pairwise_distances(spatial_data, metric=metric)\n dist_function = dist.named_distances[metric]\n test_matrix = np.array([[dist_function(spatial_data[i], spatial_data[j])\n for j in range(spatial_data.shape[0])]\n for i in range(spatial_data.shape[0])])\n assert_array_almost_equal(test_matrix, dist_matrix,\n err_msg=\"Distances don't match \"\n \"for metric {}\".format(metric))\n\n for metric in binary_distances:\n dist_matrix = pairwise_distances(binary_data, metric=metric)\n dist_function = dist.named_distances[metric]\n test_matrix = np.array([[dist_function(binary_data[i], binary_data[j])\n for j in range(binary_data.shape[0])]\n for i in range(binary_data.shape[0])])\n assert_array_almost_equal(test_matrix, dist_matrix,\n err_msg=\"Distances don't match \"\n \"for metric {}\".format(metric))\n\n\ndef test_sparse_metrics():\n for metric in spatial_distances:\n # Sparse correlation has precision errors right now, leave out ...\n if metric in spdist.sparse_named_distances and metric is not \\\n 'correlation':\n dist_matrix = pairwise_distances(sparse_spatial_data.todense(),\n metric=metric)\n dist_function = spdist.sparse_named_distances[metric]\n if metric in spdist.sparse_need_n_features:\n test_matrix = np.array(\n [[dist_function(sparse_spatial_data[i].indices,\n sparse_spatial_data[i].data,\n sparse_spatial_data[j].indices,\n sparse_spatial_data[j].data,\n sparse_spatial_data.shape[1])\n for j in range(sparse_spatial_data.shape[0])]\n for i in range(sparse_spatial_data.shape[0])])\n else:\n test_matrix = np.array(\n [[dist_function(sparse_spatial_data[i].indices,\n sparse_spatial_data[i].data,\n sparse_spatial_data[j].indices,\n sparse_spatial_data[j].data)\n for j in range(sparse_spatial_data.shape[0])]\n for i in range(sparse_spatial_data.shape[0])])\n\n assert_array_almost_equal(test_matrix, dist_matrix,\n err_msg=\"Distances don't match \"\n \"for metric {}\".format(metric))\n\n\ndef test_sparse_fit():\n pass\n\n\n@SkipTest\ndef test_sklearn_digits():\n digits = datasets.load_digits()\n data = digits.data\n embedding = UMAP(n_neighbors=5, min_dist=0.01,\n random_state=42).fit_transform(data)\n #np.save('digits_embedding_42.npy', embedding)\n to_match = np.load(os.path.join(os.path.dirname(__file__),\n 'digits_embedding_42.npy'))\n assert_array_almost_equal(embedding, to_match, err_msg='Digits embedding '\n 'is not consistent '\n 'with previous runs')\n"
]
| [
[
"numpy.random.choice",
"sklearn.datasets.load_digits",
"numpy.random.seed",
"numpy.random.randn",
"sklearn.utils.testing.assert_array_almost_equal",
"sklearn.metrics.pairwise_distances",
"numpy.random.uniform",
"numpy.random.randint",
"numpy.argsort",
"sklearn.neighbors.KDTree",
"numpy.in1d",
"scipy.sparse.csr_matrix"
]
]
|
wellsguo/DocHub | [
"ff5db051e55e049e4150a553870d52c6c251a76b"
]
| [
"Dev/mongodb/createGovtWorkers.py"
]
| [
"import pymongo\nimport pprint\nimport numpy as np\nimport datetime\n\nfrom pymongo import MongoClient\n\nnp.random.seed(2) # set seed so everybody running it gets the same data\n\nclient = MongoClient() # connects on default host\n# client = MongoClient('localhost',27017)) # explicit connect command\n\ndb = client.db_people \n\n\n# remove entire collection, i.e. all docs in peopleDB.thePeople \ndb.thePeople.remove()\n\n# create UNIQUE INDEX\n# db.thePeople.create_index( [('pid', pymongo.ASCENDING)], unique=True )\n\n# the collection we will create\npeeps = db.thePeople \n\n\nstates = [\"AL\",\"AK\",\"AZ\",\"AZ\",\"AR\",\"CA\",\"CO\",\"CT\",\"DE\",\"FL\",\"GA\", \"HI\",\"ID\",\"IL\",\"IN\",\"IA\",\"KS\",\"KY\",\"LA\",\"ME\",\"MD\", \"MA\",\"MI\",\"MN\",\"MS\",\"MO\",\"MT\",\"NE\",\"NV\",\"NH\",\"NJ\", \"NM\",\"NY\",\"NC\",\"ND\",\"OH\",\"OK\",\"OR\",\"PA\",\"RI\",\"SC\", \"SD\",\"TN\",\"TX\",\"UT\",\"VT\",\"VA\",\"WA\",\"WV\",\"WI\",\"WY\"]\n\nfNames = [\"Bob\",\"Mary\",\"Isabella\",\"Santiago\",\"Valentina\",\"Daniella\",\"Alejandro\",\"Diego\",\"Victoria\",\"Sofia\",\"John\",\"Paul\",\"Peter\",\"Joseph\",\"Vicky\",\"David\",\"Jeffrey\",\"William\",\"Jennifer\",\"Linda\",\"Sarah\",\"Ashley\",\"Michelle\",\"Amy\",\"Julie\",\"Julia\",\"Hannah\",\"Jayden\",\"Noah\",\"Demarco\",\"Madison\",\"Ava\",\"Kayla\",\"Jayla\",\"Priya\",\"Tanya\",\"Neha\",\"Rahul\",\"Raj\",\"Amit\",\"Mohammed\",\"Mohammad\",\"Vivek\",\"Fatimah\",\"Hasan\"]\n\nmNames = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\n\nlNames = [\"Garcia\",\"Martinez\",\"Gonzalez\",\"Lopez\",\"Torres\",\"Ramirez\",\"Hernandez\",\"Baker\",\"Jackson\",\"Brown\",\"Smith\",\"Jones\",\"Miller\",\"White\",\"Johnson\",\"Wilson\",\"Williams\",\"Anderson\",\"Das\",\"Mukherjee\",\"Simha\",\"Liu\",\"Li\",\"Zhao\",\"Zhang\",\"Wu\",\"Chen\",\"Chan\",\"Lee\",\"Wong\",\"Park\",\"Kim\",\"Ngyuen\",\"Le\",\"Tran\",\"Dang\",\"Sato\",\"Tanaka\",\"Takahashi\"]\n\ntimeStartInsert = datetime.datetime.now()\nnumDocs = 2000\nprint(\"\\nStart inserting \" + str(numDocs) + \" documents at: \" + str(timeStartInsert) )\nfor i in range(0,numDocs):\n\taPid = i\n\taFName = fNames[ np.random.randint(len(fNames)) ]\n\taMName = mNames[ np.random.randint(len(mNames)) ]\n\taLName = lNames[ np.random.randint(len(lNames)) ]\n\taName = aFName + \" \" + aMName + \" \" + aLName\n\tprint(aName)\n\taAge = np.random.randint(100) + 18\n\taWeight = np.random.randint(100) + 40 # in Kilos\n\taHeight = np.random.randint(150,200) # in centimeters\n\taBirth = 2019 - aAge\n\taSalary = np.random.randint(100000) + 30000 # lowests paid is 30K\n\taState = states[ np.random.randint( len(states) ) ]\n\taChildren = []\n\tif (aAge > 20):\n\t\taNumChildren = np.random.binomial(8,0.40) # 0..8 children, binomially distributed with probability p = 0.40\n\t\tfor j in range (0,aNumChildren):\n\t\t\taChildren.append( fNames[ np.random.randint(len(fNames)) ] + \" \" + mNames[ np.random.randint(len(mNames)) ] + \" \" + aLName)\n\telse:\n\t\taNumChildren = 0\n\tnewPerson = {\"pid\":aPid,\"firstName\":aFName, \"MI\":aMName, \"lastName\":aLName, \"state\":aState, \"age\":aAge,\"birth\":aBirth, \"salary\":aSalary, \"numChildren\":aNumChildren,\"children\":aChildren, \"weight\":aWeight, \"height\":aHeight}\n\tprint(newPerson)\n\tpeeps.insert_one(newPerson)\n\ntimeEndInsert = datetime.datetime.now()\ntimeElapsedInsert = timeEndInsert - timeStartInsert\ntimeStartQueries = datetime.datetime.now()\n\nprint(\"\\nNumber of docs in db.thePeople = \" + str(db.thePeople.count()))\n# print(\"\\nAt start, output from peeps.find():\")\n# for objs in peeps.find():\n# \tprint(objs)\n\nnumQueries = 4\nprint(\"\\nStart \" + str(numQueries) + \" random queries at: \")\nprint(datetime.datetime.now())\nfor i in range(1,numQueries):\n\trandPID = np.random.randint(numDocs)\n\tanObject = db.thePeople.find_one( {\"pid\":randPID} )\n\tprint(anObject)\n\ntimeEndQueries = datetime.datetime.now()\ntimeElapsedQueries = timeEndQueries - timeStartQueries\n\t\n'''\nprint(\"\\nFinished random queries at: \")\nprint(datetime.datetime.now())\n\n\nprint(\"\\nElapsed time for inserts = \" + str(timeElapsedInsert) ) ;\nprint(\"\\nElapsed time for queries = \" + str(timeElapsedQueries) ) ;\n\n'''\n"
]
| [
[
"numpy.random.seed",
"numpy.random.binomial",
"numpy.random.randint"
]
]
|
vanduc103/coral_examples | [
"a514d003a3948cb0888d2dabc0bdd93939f8ddd0"
]
| [
"opencv/csv_output/detect_2csv.py"
]
| [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A demo that runs object detection on camera frames using OpenCV.\n\nTEST_DATA=../all_models\n\nRun face detection model:\npython3 detect.py \\\n --model ${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite\n\nRun coco model:\npython3 detect.py \\\n --model ${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite \\\n --labels ${TEST_DATA}/coco_labels.txt\n\n\"\"\"\nimport argparse\nimport collections\nimport common\nimport cv2\nimport numpy as np\nimport os\nimport csv\nimport glob\nimport time\nfrom PIL import Image\nimport re\nimport tflite_runtime.interpreter as tflite\n\nObject = collections.namedtuple('Object', ['id', 'score', 'bbox'])\n\ndef load_labels(path):\n p = re.compile(r'\\s*(\\d+)(.+)')\n with open(path, 'r', encoding='utf-8') as f:\n lines = (p.match(line).groups() for line in f.readlines())\n return {int(num): text.strip() for num, text in lines}\n\nclass BBox(collections.namedtuple('BBox', ['xmin', 'ymin', 'xmax', 'ymax'])):\n \"\"\"Bounding box.\n Represents a rectangle which sides are either vertical or horizontal, parallel\n to the x or y axis.\n \"\"\"\n __slots__ = ()\n\ndef get_output(interpreter, score_threshold, top_k, class_list, image_scale=1.0):\n \"\"\"Returns list of detected objects.\"\"\"\n boxes = common.output_tensor(interpreter, 0)\n class_ids = common.output_tensor(interpreter, 1)\n scores = common.output_tensor(interpreter, 2)\n count = int(common.output_tensor(interpreter, 3))\n\n def make(i):\n ymin, xmin, ymax, xmax = boxes[i]\n return Object(\n id=int(class_ids[i]),\n score=scores[i],\n bbox=BBox(xmin=np.maximum(0.0, xmin),\n ymin=np.maximum(0.0, ymin),\n xmax=np.minimum(1.0, xmax),\n ymax=np.minimum(1.0, ymax)))\n\n return [make(i) for i in range(top_k) if scores[i] >= score_threshold and class_ids[i] in class_list]\n\ndef main():\n default_model_dir = '../all_models'\n default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'\n default_labels = 'coco_labels.txt'\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', help='.tflite model path',\n default=os.path.join(default_model_dir,default_model))\n parser.add_argument('--labels', help='label file path',\n default=os.path.join(default_model_dir, default_labels))\n parser.add_argument('--top_k', type=int, default=10,\n help='number of categories with highest score to display')\n parser.add_argument('--threshold', type=float, default=0.3,\n help='classifier score threshold')\n parser.add_argument('--class_ids', nargs='*', type=int, default=0,\n help='Array of class id')\n parser.add_argument('--input_files', default='/home/mendel/dataset/*.jpg',\n help='Input files')\n parser.add_argument('--csv_out', default='detect_output.csv',\n help='csv output file')\n args = parser.parse_args()\n if args.class_ids == 0:\n args.class_ids = [0]\n\n print('Loading {} with {} labels.'.format(args.model, args.labels))\n interpreter = common.make_interpreter(args.model)\n interpreter.allocate_tensors()\n labels = load_labels(args.labels)\n\n # csv writer\n f = open(args.csv_out, 'w')\n with f:\n fnames = ['timestamp', 'idx', 'label', 'width', 'height', 'xmin', 'ymin', 'xmax', 'ymax', 'score']\n writer = csv.DictWriter(f, fieldnames=fnames)\n writer.writeheader()\n\n # read frames\n inference_time = []\n for image_path in sorted(glob.glob(args.input_files)):\n image_name = os.path.splitext(os.path.basename(image_path))[0]\n #print(image_name)\n pil_im = Image.open(image_path)\n\n # inference\n start = time.time()\n common.set_input(interpreter, pil_im)\n interpreter.invoke()\n objs = get_output(interpreter, score_threshold=args.threshold, top_k=args.top_k, class_list=args.class_ids)\n inference_time.append(time.time() - start)\n\n # return results\n (width, height) = pil_im.size\n idx = -1\n for obj in objs:\n x0, y0, x1, y1 = list(obj.bbox)\n x0, y0, x1, y1 = int(x0*width), int(y0*height), int(x1*width), int(y1*height)\n score = obj.score\n label = labels.get(obj.id, obj.id)\n idx += 1\n writer.writerow({'timestamp' : image_name, 'idx': idx, 'label': label, 'width': width, 'height': height, 'xmin': x0, 'ymin': y0, 'xmax': x1, 'ymax': y1, 'score': score})\n \n print(\"Inference time : {:.3f} ms\".format(sum(inference_time)*1000/len(inference_time)))\n print(\"Frames per second : {:.2f} fps\".format(len(inference_time)/sum(inference_time)))\n \n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.minimum",
"numpy.maximum"
]
]
|
Omer-Sella/ldpc | [
"955c0bc32236e171365cbbb88f00574302771610"
]
| [
"wifiMatrices.py"
]
| [
"import numpy as np\nfrom scipy.linalg import circulant\nWIFI_LDPC_DATA_TYPE = np.int64\n\n\nWIFI_1944_81_5_6 = [[13, 48, 80, 66, 4, 74, 7, 30, 76, 52, 37, 60, None, 49, 73, 31, 74, 73, 23, None, 1, 0, None, None],\n[69, 63, 74, 56, 64, 77, 57, 65, 6, 16, 51, None, 64, None, 68, 9, 48, 62, 54, 27, None, 0, 0, None],\n[51, 15, 0, 80, 24, 25, 42, 54, 44, 71, 71, 9, 67, 35, None, 58, None, 29, None, 53, 0, None, 0, 0],\n[16, 29, 36, 41, 44, 56, 59, 37, 50, 24, None, 65, 4, 65, 52, None, 4, None, 73, 52, 1, None, None, 0]]\n\n\ndef getWifiParityMatrix(codewordSize = 1944, circulantSize = 81, rate = 5/6):\n \n \n if codewordSize == 1944:\n assert circulantSize == 81\n if rate == 5/6:\n for i in range (4):\n for j in range (24): # 1944 / 81 == 24\n \n newVector = np.zeros(circulantSize, dtype = WIFI_LDPC_DATA_TYPE)\n if WIFI_1944_81_5_6[i][j] != None:\n newVector[WIFI_1944_81_5_6[i][j]]= 1\n \n newCirculant = circulant(newVector).T\n if j != 0:\n newMatrixHstack = np.hstack((newMatrixHstack, newCirculant))\n else:\n newMatrixHstack = newCirculant\n if i != 0:\n newMatrix = np.vstack((newMatrix, newMatrixHstack))\n else:\n newMatrix = newMatrixHstack\n return newMatrix\n \n\n\n"
]
| [
[
"numpy.hstack",
"scipy.linalg.circulant",
"numpy.vstack",
"numpy.zeros"
]
]
|
peterdsharpe/FastFlow3D | [
"d02c8ff97fa84adcd9db988b09157695d9e2b318"
]
| [
"aerosandbox/tools/pretty_plots/plotting_with_uncertainty.py"
]
| [
"from aerosandbox.tools.pretty_plots.utilities.natural_univariate_spline import NaturalUnivariateSpline as Spline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\ndef plot_with_bootstrapped_uncertainty(\n x: np.ndarray,\n y: np.ndarray,\n y_stdev: float,\n ci: float = 0.95,\n color=None,\n draw_line=True,\n draw_ci=True,\n draw_data=True,\n label_line=None,\n label_ci=None,\n label_data=None,\n n_bootstraps=2000,\n n_fit_points=300,\n spline_degree=3,\n):\n if not (ci > 0 and ci < 1):\n raise ValueError(\"Confidence interval `ci` should be in the range of (0, 1).\")\n\n ### Discard any NaN points\n isnan = np.logical_or(\n np.isnan(x),\n np.isnan(y),\n )\n x = x[~isnan]\n y = y[~isnan]\n\n ### Prepare for the bootstrap\n x_fit = np.linspace(x.min(), x.max(), n_fit_points)\n\n y_bootstrap_fits = np.empty((n_bootstraps, len(x_fit)))\n\n for i in tqdm(range(n_bootstraps), desc=\"Bootstrapping\", unit=\" samples\"):\n\n ### Obtain a bootstrap resample\n ### Here, instead of truly resampling, we just pick weights that effectively mimic a resample.\n ### A computationally-efficient way to pick weights is the following clever trick with uniform sampling:\n splits = np.random.rand(len(x) + 1) * len(x) # \"limit\" bootstrapping\n splits[0] = 0\n splits[-1] = len(x)\n\n weights = np.diff(np.sort(splits))\n\n y_bootstrap_fits[i, :] = Spline(\n x=x,\n y=y,\n w=weights,\n s=len(x) * y_stdev,\n k=spline_degree,\n ext='extrapolate'\n )(x_fit)\n\n ### Compute a confidence interval using equal-tails method\n y_median_and_ci = np.nanquantile(\n y_bootstrap_fits,\n q=[\n (1 - ci) / 2,\n 0.5,\n 1 - (1 - ci) / 2\n ],\n axis=0\n )\n\n if draw_line:\n line, = plt.plot(\n x_fit,\n y_median_and_ci[1, :],\n color=color,\n label=label_line\n )\n if color is None:\n color = line.get_color()\n\n if draw_ci:\n plt.fill_between(\n x_fit,\n y_median_and_ci[0, :],\n y_median_and_ci[2, :],\n color=color,\n label=label_ci,\n alpha=0.25,\n linewidth=0\n )\n if draw_data:\n line, = plt.plot(\n x,\n y,\n \".\",\n color=color,\n label=label_data,\n alpha=0.5\n )\n if color is None:\n color = line.get_color()\n return x_fit, y_bootstrap_fits\n\n\nif __name__ == '__main__':\n\n import matplotlib.pyplot as plt\n import aerosandbox.tools.pretty_plots as p\n\n np.random.seed(0)\n\n ### Generate data\n x = np.linspace(0, 10, 101)\n y_true = np.abs(x - 5) # np.sin(x)\n y_noisy = y_true + 0.1 * np.random.randn(len(x))\n\n ### Plot spline regression\n fig, ax = plt.subplots(dpi=300)\n x_fit, y_bootstrap_fits = plot_with_bootstrapped_uncertainty(\n x,\n y_noisy,\n y_stdev=0.1,\n label_line=\"Best Estimate\",\n label_data=\"Data\",\n label_ci=\"95% CI\",\n )\n ax.plot(x, y_true, \"k\", label=\"True Function\", alpha=0.2)\n\n p.show_plot(\n \"Spline Bootstrapping Test\",\n r\"$x$\",\n r\"$y$\",\n )\n"
]
| [
[
"numpy.isnan",
"numpy.nanquantile",
"numpy.random.seed",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.fill_between",
"numpy.sort",
"numpy.abs",
"numpy.linspace"
]
]
|
coderrg/Self-Driving-RPi-Car | [
"41392da6356ff733f2c98feb4fe54941e7df7f30"
]
| [
"Base Algorithms/Algorithm1.py"
]
| [
"#Code written by Rhythm Garg\r\n\r\n'''\r\n###################################################################################\r\n'''\r\nimport numpy as np\r\nimport cv2\r\n\r\n'''\r\n###################################################################################\r\n'''\r\n\r\ndef makeYsGood(lines):\r\n '''make sure y1 is below (greater in index) y2\r\n takes in a list of lines, each line is [x1,y1,x2,y2]\r\n and retuens a list of lines such that y1 is greater \r\n in index than y2''' \r\n for line in lines:\r\n #if the value of y2 is greater than y1, then y2 is lower than y1 because of indexing\r\n if line[3] > line[1]:\r\n #switch the coordinates\r\n tempx = line[0]\r\n tempy = line[1]\r\n line[0] = line[2]\r\n line[1] = line[3]\r\n line[2] = tempx\r\n line[3] = tempy\r\n return lines\r\n\r\n\r\n'''\r\n###################################################################################\r\n'''\r\n\r\ndef averageLine(lines):\r\n '''\r\n Input is a list of lines. Each line is a 4 element list:\r\n [x1, y1, x2, y2]\r\n A list [x1, y1, x2, y2] representing the average line of \r\n the inputted lines is returned. If there are no lines, None\r\n is returned.\r\n '''\r\n numLines = len(lines)\r\n if numLines == 0:\r\n return None\r\n \r\n sumX1 = 0\r\n sumY1 = 0\r\n sumX2 = 0\r\n sumY2 = 0\r\n \r\n for line in lines:\r\n sumX1 = sumX1 + line[0]\r\n sumY1 = sumY1 + line[1]\r\n sumX2 = sumX2 + line[2]\r\n sumY2 = sumY2 + line[3]\r\n \r\n avgX1 = sumX1/numLines\r\n avgY1 = sumY1/numLines\r\n avgX2 = sumX2/numLines\r\n avgY2 = sumY2/numLines\r\n \r\n avgLine = [avgX1, avgY1, avgX2, avgY2]\r\n return avgLine\r\n\r\n'''\r\n###################################################################################\r\n'''\r\n\r\ndef algorithm1(myLines):\r\n '''\r\n Input is a list of lines. Each line is a 4 element list:\r\n [x1, y1, x2, y2]\r\n A list of two lines (each line is [x1, y1, x2, y2])\r\n representing the lines of the edges of the path is returned using algorithm 1. \r\n If there are not enough lines, two vertical lines are returned.\r\n '''\r\n \r\n if len(myLines) < 1:\r\n return [[160,360,160,460],[480,360,480,460]]\r\n \r\n if len(myLines) == 1:\r\n return [myLines[0], myLines[0]]\r\n \r\n lanes = [myLines[0], myLines[-1]]\r\n \r\n #make ys good\r\n lanes = makeYsGood(lanes)\r\n \r\n #sort by x1\r\n lanes.sort(key=lambda x: x[0])\r\n\r\n return lanes\r\n\r\n'''\r\n###################################################################################\r\n'''\r\n\r\ndef algo1GetSlopes(image):\r\n '''\r\n Input is a 480 rows x 640 columns RGB image of the road.\r\n A list of two slopes (both are floats) representing the slopes\r\n of the lines of the edges of the path is returned. \r\n '''\r\n blur = cv2.GaussianBlur(image,(13,13), 0)\r\n #cv2.imwrite(\"2GaussianBlur.jpg\", blur)\r\n \r\n edges = cv2.Canny(blur,100,300)\r\n #cv2.imwrite(\"3CannyTransform.jpg\", edges)\r\n \r\n lines = cv2.HoughLinesP(edges, 1, np.pi/180, 40, np.array([]), minLineLength=10, maxLineGap=50)\r\n \r\n #input for algorithms\r\n myLines = []\r\n for line in lines:\r\n nextLine = []\r\n lineTemp = line[0]\r\n nextLine.append(lineTemp[0])\r\n nextLine.append(lineTemp[1])\r\n nextLine.append(lineTemp[2])\r\n nextLine.append(lineTemp[3])\r\n myLines.append(nextLine)\r\n \r\n '''Only needed to visualize Hough Lines\r\n allLanes = image.copy()\r\n for line in myLines:\r\n x1 = line[0]\r\n y1 = line[1]\r\n x2 = line[2]\r\n y2 = line[3]\r\n cv2.line(allLanes, (x1, y1), (x2, y2), color=[255, 255, 255], thickness=2)\r\n cv2.imwrite(\"4HoughTransform.jpg\", allLanes)\r\n '''\r\n \r\n #black image\r\n output = np.zeros((480,640,3), np.uint8)\r\n for line in myLines:\r\n x1 = line[0]\r\n y1 = line[1]\r\n x2 = line[2]\r\n y2 = line[3]\r\n cv2.line(output, (x1, y1), (x2, y2), color=[255, 255, 255], thickness=2)\r\n \r\n '''no longer needed since we are only using black and white for our neural net input image\r\n gray_output = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)\r\n cv2.imwrite(\"5Output.jpg\", gray_output)\r\n '''\r\n #make ys good\r\n myLines = makeYsGood(myLines)\r\n \r\n #sort by x1\r\n myLines.sort(key=lambda x: x[0])\r\n \r\n \r\n '''\r\n ###################################################################################\r\n '''\r\n #NOW WE CHOOSE THE ALGORITHM\r\n \r\n finalEdges = algorithm1(myLines)\r\n image1 = image.copy()\r\n for line in finalEdges:\r\n x1 = line[0]\r\n y1 = line[1]\r\n x2 = line[2]\r\n y2 = line[3]\r\n cv2.line(image1, (int(x1), int(y1)), (int(x2), int(y2)), color=[255, 255, 255], thickness=2)\r\n cv2.imwrite(\"Algo1.jpg\", image1)\r\n \r\n leftAvgLine = finalEdges[0]\r\n rightAvgLine = finalEdges[1]\r\n \r\n try:\r\n leftSlope= (-1)*(leftAvgLine[3]-leftAvgLine[1])/(leftAvgLine[2]-leftAvgLine[0])\r\n except: \r\n leftSlope = 1000\r\n \r\n try:\r\n rightSlope= (-1)*(rightAvgLine[3]-rightAvgLine[1])/(rightAvgLine[2]-rightAvgLine[0])\r\n except:\r\n rightSlope = 1000\r\n \r\n return [leftSlope, rightSlope]\r\n"
]
| [
[
"numpy.array",
"numpy.zeros"
]
]
|
rsinghlab/SCOT | [
"ddcbd998687f12c76b4a8cb605ed5435471149cc"
]
| [
"src/tlb_sinkhorn_solver.py"
]
| [
"import torch\nfrom utils_pytorch import l2_distortion, grad_l2_distortion\nimport numpy as np\n\n\nclass TLBSinkhornSolver(object):\n\n def __init__(self, nits, nits_sinkhorn, gradient=False, tol=1e-7, tol_sinkhorn=1e-7):\n self.nits = nits\n self.nits_sinkhorn = nits_sinkhorn\n self.gradient = gradient\n self.tol = tol\n self.tol_sinkhorn = tol_sinkhorn\n\n @staticmethod\n def rescale_mass_plan(pi, gamma, a, Cx, b, Cy, rho, eps):\n \"\"\"Same scaling to apply to pi and gamma\"\"\"\n mp, mg = pi.sum(), gamma.sum()\n mup, nup = torch.sum(pi, dim=1), torch.sum(pi, dim=0)\n mug, nug = torch.sum(gamma, dim=1), torch.sum(gamma, dim=0)\n s = mp * torch.sum(mug * (mug / a + 1e-10).log()) + mg * torch.sum(mup * (mup / a + 1e-10).log()) \\\n + mp * torch.sum(nug * (nug / b + 1e-10).log()) + mg * torch.sum(nup * (nup / b + 1e-10).log())\n s = rho * s + eps * (mg * torch.sum(pi * (pi / (a[:, None] * b[None, :]) + 1e-10).log())\n + mp * torch.sum(gamma * (gamma / (a[:, None] * b[None, :]) + 1e-10).log()))\n s = s + l2_distortion(pi, Cx, Cy)\n return (- s / (2 * (2 * rho + eps) * (mp * mg))).exp()\n\n @staticmethod\n def init_plan(a, b, init):\n if init is not None:\n return init\n else:\n return a[:, None] * b[None, :] / (a.sum() * b.sum()).sqrt()\n\n @staticmethod\n def translate_potential(u, v, C, a, b, rho, eps):\n c1 = (torch.sum(a * (-u / rho).exp()) + torch.sum(b * (-v / rho).exp())).log()\n c2 = (a.log()[:, None] * b.log()[None, :]\n + ((u[:, None] + v[None, :] - C) / eps)).logsumexp(dim=1).logsumexp(dim=0)\n z = (rho * eps) / (2 * rho + eps)\n k = z * (c1 - c2)\n return u + k, v + k\n\n def compute_local_cost(self, pi, a, Cx, b, Cy, rho, eps):\n mu, nu = torch.sum(pi, dim=1), torch.sum(pi, dim=0)\n A = torch.einsum('ij,j->i', Cx ** 2, mu)\n B = torch.einsum('kl,l->k', Cy ** 2, nu)\n C = torch.einsum('ij,kj->ik', Cx, torch.einsum('kl,jl->kj', Cy, pi))\n kl_mu = torch.sum(mu * (mu / a + 1e-10).log())\n kl_nu = torch.sum(nu * (nu / b + 1e-10).log())\n kl_pi = torch.sum(pi * (pi / (a[:, None] * b[None, :]) + 1e-10).log())\n return (A[:, None] + B[None, :] - 2 * C) + rho * kl_mu + rho * kl_nu + eps * kl_pi\n\n @staticmethod\n def quad_kl_div(pi, gamma, ref):\n massp, massg = pi.sum(), gamma.sum()\n return massg * torch.sum(pi * (pi / ref + 1e-10).log()) \\\n + massp * torch.sum(gamma * (gamma / ref + 1e-10).log()) - massp * massg + ref.sum() ** 2\n\n @staticmethod\n def l2_distortion(pi, gamma, Cx, Cy):\n A = torch.einsum('ij,i,j', Cx ** 2, torch.sum(pi, dim=1), torch.sum(gamma, dim=1))\n B = torch.einsum('ij,i,j', Cy ** 2, torch.sum(pi, dim=0), torch.sum(gamma, dim=0))\n C = torch.sum(torch.einsum('ij,jl->il', Cx, pi) * torch.einsum('ij,jl->il', gamma, Cy))\n return A + B - 2 * C\n\n def tlb_cost(self, pi, gamma, a, Cx, b, Cy, rho, eps):\n return l2_distortion(pi, Cx, Cy) + rho * self.quad_kl_div(torch.sum(pi, dim=1), torch.sum(gamma, dim=1), a) \\\n + rho * self.quad_kl_div(torch.sum(pi, dim=0), torch.sum(gamma, dim=0), b) \\\n + eps * self.quad_kl_div(pi, gamma, a[:, None] * b[None, :])\n\n @staticmethod\n def kl_prox_softmin(K, a, b, rho, eps):\n tau = rho / (rho + eps)\n\n def s_y(v):\n return torch.einsum('ij,j->i', K, b * v) ** (-tau)\n\n def s_x(u):\n return torch.einsum('ij,i->j', K, a * u) ** (-tau)\n\n return s_x, s_y\n\n @staticmethod\n def aprox_softmin(C, a, b, rho, eps):\n tau = rho / (rho + eps)\n\n def s_y(g):\n return - tau * eps * ((g / eps + b.log())[None, :] - C / eps).logsumexp(dim=1)\n\n def s_x(f):\n return - tau * eps * ((f / eps + a.log())[:, None] - C / eps).logsumexp(dim=0)\n\n return s_x, s_y\n\n def sinkhorn_procedure(self, T, u, v, a, b, rho, eps, exp_form=True):\n if u is None or v is None:\n u, v = self.translate_potential(torch.zeros_like(a), torch.zeros_like(b), T, a, b, rho, eps)\n if exp_form:\n K = (-T / eps).exp()\n exp_form = K.gt(torch.zeros_like(K)).all()\n if ~exp_form:\n del K\n if exp_form:\n u, v = (u / eps).exp(), (v / eps).exp()\n s_x, s_y = self.kl_prox_softmin(K, a, b, rho, eps)\n for j in range(self.nits_sinkhorn):\n u_prev = u.clone()\n v = s_x(u)\n u = s_y(v)\n if eps * (u.log() - u_prev.log()).abs().max().item() < 1e-7:\n break\n pi = u[:, None] * v[None, :] * K * a[:, None] * b[None, :]\n u, v = eps * u.log(), eps * v.log()\n\n if ~exp_form:\n s_x, s_y = self.aprox_softmin(T, a, b, rho, eps)\n for j in range(self.nits_sinkhorn):\n u_prev = u.clone()\n v = s_x(u)\n u = s_y(v)\n if (u - u_prev).abs().max().item() < 1e-7:\n break\n pi = ((u[:, None] + v[None, :] - T) / eps).exp() * a[:, None] * b[None, :]\n return u, v, pi\n\n def tlb_sinkhorn(self, a, Cx, b, Cy, rho, eps, init=None):\n # Initialize plan and local cost\n pi = self.init_plan(a, b, init=init)\n ug, vg, up, vp = None, None, None, None\n for i in range(self.nits):\n \n pi_prev = pi.clone()\n Tp = self.compute_local_cost(pi, a, Cx, b, Cy, rho, eps)\n mp = pi.sum()\n\n ug, vg, gamma = self.sinkhorn_procedure(Tp, ug, vg, a, b, mp * rho, mp * eps)\n gamma = (mp / gamma.sum()).sqrt() * gamma\n Tg = self.compute_local_cost(gamma, a, Cx, b, Cy, rho, eps)\n mg = gamma.sum()\n\n up, vp, pi = self.sinkhorn_procedure(Tg, up, vp, a, b, mg * rho, mg * eps)\n pi = (mg / pi.sum()).sqrt() * pi\n if (pi - pi_prev).abs().max().item() < 1e-7:\n break\n return pi, gamma\n\n def ugw_sinkhorn(self, a, Cx, b, Cy, rho, eps, init=None):\n # Initialize plan and local cost\n pi = self.init_plan(a, b, init=init)\n up, vp = None, None\n for i in range(self.nits):\n\n pi_prev = pi.clone()\n Tp = self.compute_local_cost(pi, a, Cx, b, Cy, rho, eps)\n mp = pi.sum()\n\n up, vp, pi = self.sinkhorn_procedure(Tp, up, vp, a, b, mp * rho, mp * eps)\n if (pi - pi_prev).abs().max().item() < 1e-7:\n break\n return pi\n"
]
| [
[
"torch.zeros_like",
"torch.einsum",
"torch.sum"
]
]
|
kskuchin/LinkedSV_debug | [
"c912d193ca4490581735c005a7fea1b7ec62c612"
]
| [
"scripts/cluster_weird_reads.py"
]
| [
"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport os\nimport sys\nfrom scipy.spatial import *\nfrom scipy.sparse import csr_matrix # csr_matrix\nfrom scipy.sparse.csgraph import connected_components # connected_components\nimport gc\n\ntry:\n from scripts import my_utils\nexcept ImportError:\n import my_utils\n\n\ntab = '\\t'\nendl = '\\n'\narg = sys.argv[1:]\n\nusage = 'python ' + __file__ + ' ' + '<weird_reads.txt> <out_file> <faidx_file>'\nargc = 3 \n\n\n\nclass ShortReadSupport: \n def __init__(self):\n self.tid1 = self.tid2 = self.start1 = self.end1 = self.start2 = self.end2 = -1\n self.mapq1 = self.mapq2 = 0\n \n self.read_id = self.bcd = '' \n self.hap_type = self.flag1 = self.flag2 = -1\n\n def init_from_two_lines(self, line1, line2):\n line1 = line1.strip().split(tab)\n line2 = line2.strip().split(tab)\n if len(line1) < 13:\n my_utils.myprint('ERROR! This line is less than 13 coloumns: %s' % tab.join(line1)) \n return\n \n if len(line2) < 13:\n my_utils.myprint('ERROR! This line is less than 13 coloumns: %s' % tab.join(line2)) \n return\n\n if line1[6] != line2[6]:\n my_utils.myprint('ERROR! line1 and line2 have different read id!')\n my_utils.myprint('line1: %s' % tab.join(line1))\n my_utils.myprint('line2: %s' % tab.join(line2))\n sys.exit()\n return\n \n if line1[0] != line2[0]:\n my_utils.myprint('ERROR! line1 and line2 have different tid!')\n my_utils.myprint('line1: %s' % tab.join(line1))\n my_utils.myprint('line2: %s' % tab.join(line2))\n sys.exit()\n return\n \n if int(line1[1]) > int(line2[1]):\n tmp = line1\n line1 = line2\n line2 = tmp\n\n self.tid1, self.start1, self.end1, self.mapq1 = line1[0:4]\n self.tid2, self.start2, self.end2, self.mapq2 = line2[0:4]\n\n self.tid1 = int(self.tid1)\n self.start1 = int(self.start1)\n self.end1 = int(self.end1)\n self.mapq1 = int(self.mapq1)\n\n self.tid2 = int(self.tid2)\n self.start2 = int(self.start2)\n self.end2 = int(self.end2)\n self.mapq2 = int(self.mapq2)\n\n self.flag1 = int(line1[7])\n self.flag2 = int(line2[7])\n self.read_id = line1[6]\n self.bcd = line1[4]\n self.hap_type = int(line1[5])\n\n if self.start1 > self.start2:\n my_utils.myprint('ERROR! start1 > start2')\n sys.exit()\n\n return\n\n def key1(self):\n return my_utils.FIX_LENGTH * self.tid1 + self.end1\n \n def key2(self):\n return my_utils.FIX_LENGTH * self.tid2 + self.start2\n\n def inner_size(self):\n return self.start2 - self.end1\n \n def output(self):\n outstring = '%s\\t%d\\t%d\\t%d\\t%s\\t%s\\t%d' % (self.tid1, self.end1, self.start2, self.start2-self.end1, self.bcd, self.read_id, self.hap_type)\n return outstring\n def output_info(self):\n outstring = '%s|%d|%d|%d|%s|%s|%d' % (self.tid1, self.end1, self.start2, self.start2-self.end1, self.bcd, self.read_id, self.hap_type)\n return outstring\n\n def pos1(self):\n return self.end1\n def pos2(self):\n return self.start2\n\ndef main():\n\n if len(arg) < argc:\n print (usage)\n sys.exit()\n\n in_weird_reads_file = arg.pop(0)\n out_file = arg.pop(0)\n faidx_file = arg.pop(0)\n\n cluster_weird_reads(in_weird_reads_file, out_file, faidx_file)\n return\n\ndef cluster_weird_reads(in_weird_reads_file, out_file, faidx_file):\n\n tid2chrname_list, chrname2tid_dict = my_utils.get_chrnames(faidx_file)\n max_distance = 300\n min_n_short_read_supp = 2\n max_n_short_read_supp = 1000\n min_sv_length = 1000\n\n my_utils.myprint('reading file: %s' % in_weird_reads_file)\n short_read_support_list35 = read_weird_reads_file(in_weird_reads_file, chrname2tid_dict, min_sv_length)\n my_utils.myprint('finished reading file: %s' % in_weird_reads_file)\n\n out_fp = open(out_file, 'w')\n out_fp.write('')\n out_fp.close()\n\n my_utils.myprint('clustering discordant reads')\n cluster_weird_reads1type(short_read_support_list35, out_file, min_n_short_read_supp, max_distance, tid2chrname_list, chrname2tid_dict, max_n_short_read_supp)\n\n return\n\ndef cluster_weird_reads1type(short_read_support_list, out_file, min_n_short_read_supp, max_distance, tid2chrname_list, chrname2tid_dict, max_n_short_read_supp):\n \n coord_list = list()\n \n for short_read_support in short_read_support_list:\n coord_list.append( (short_read_support.key1(), short_read_support.key2()) )\n\n cluster_one_region(short_read_support_list, coord_list, out_file, min_n_short_read_supp, max_distance, tid2chrname_list, chrname2tid_dict, max_n_short_read_supp)\n gc.collect()\n\n return\n\ndef cluster_one_region(short_read_support_list, coord_list, out_file, min_n_short_read_supp, max_distance, tid2chrname_list, chrname2tid_dict, max_n_short_read_supp):\n\n if len(coord_list) < 1: return\n edge_list = list()\n\n distance_buffer = max_distance * 1.415\n tree = cKDTree(coord_list, leafsize = 10000)\n\n for i in range(0, len(short_read_support_list)):\n\n if i > 0 and i % 100000 == 0: my_utils.myprint ('finished searching for %d weird reads' % i)\n\n node1 = (short_read_support_list[i].key1(), short_read_support_list[i].key2())\n index_list = tree.query_ball_point( node1, distance_buffer )\n\n if len(index_list) > max_n_short_read_supp: continue\n\n nearby_node_index_list = list()\n for j in index_list:\n if i == j: continue\n node2 = (short_read_support_list[j].key1(), short_read_support_list[j].key2())\n if abs(node1[0] - node2[0]) < max_distance and abs(node1[1] - node2[1]) < max_distance:\n nearby_node_index_list.append(j)\n\n for j in nearby_node_index_list: \n edge = (i, j) \n edge_list.append(edge)\n\n row = list()\n col = list()\n data = list()\n for edge in edge_list:\n row.append (edge[0])\n col.append (edge[1])\n data.append (1) \n\n n_node = len(short_read_support_list)\n\n my_utils.myprint ('get connected components')\n n_components, label_list, component_node_index_db = get_connected_components(n_node, row, col, data, False, 'weak')\n node_cluster_list = [0] * n_components\n for i in range(0, n_components):\n node_cluster_list[i] = list()\n for index in component_node_index_db[i]:\n node_cluster_list[i].append(short_read_support_list[index])\n\n my_utils.myprint ('output clusters of weird reads')\n out_fp = open(out_file, 'w')\n for i in range(0, len(node_cluster_list)): # for i-th cluster\n node_cluster = node_cluster_list[i]\n if len(node_cluster) < min_n_short_read_supp: continue\n if len(node_cluster) > max_n_short_read_supp: continue\n mean_start_pos = mean_end_pos = 0\n hap_type_cnt = [0] * 3\n output_info_string = 'SVTYPE=DEL'\n for j in range(0, len(node_cluster)):\n short_read_support = node_cluster[j]\n output_info_string += ';' + short_read_support.output_info() \n mean_start_pos += short_read_support.pos1()\n mean_end_pos += short_read_support.pos2()\n hap_type_cnt[short_read_support.hap_type] += 1\n\n num_pe_supp = len(node_cluster)\n mean_start_pos = int( 0.5 + (float(mean_start_pos)) / num_pe_supp)\n mean_end_pos = int( 0.5 + (float(mean_end_pos)) / num_pe_supp)\n \n tid = node_cluster[0].tid1\n chrom = tid2chrname_list[tid]\n if len(node_cluster) >= 5:\n flt = 'PASS'\n else:\n flt = 'LowQual'\n sv_size = mean_end_pos - mean_start_pos\n sv_type = 'DEL'\n out_fp.write('%s\\t%d\\t%d\\t%s\\t%d\\t%d\\t%s\\t%s\\t%d\\t%d\\t%d\\t%d\\t%d\\t%s\\n' % (chrom, mean_start_pos, mean_start_pos+1, chrom, mean_end_pos, mean_end_pos+1, sv_type, flt, sv_size, num_pe_supp, hap_type_cnt[0], hap_type_cnt[1], hap_type_cnt[2], output_info_string))\n\n del edge_list, row, col, data, component_node_index_db, label_list, node_cluster_list\n gc.collect()\n\n return\n\ndef get_connected_components(n_node, row, col, data, is_directed = False, connection_type = 'weak'):\n\n node_csr_matrix = csr_matrix((data, (row, col)), shape=[n_node, n_node])\n n_components, label_list = connected_components(node_csr_matrix, directed = is_directed, connection = connection_type)\n component_node_index_db = [0] * n_components\n for i in range(0, len(component_node_index_db)):\n component_node_index_db[i] = list()\n # component_node_index_db[component_id] = index of node\n for i in range(0, len(label_list)):\n component_node_index_db[label_list[i]].append(i)\n\n return n_components, label_list, component_node_index_db\n\ndef get_weird_readname_dict (chrname2tid_dict, weird_reads_file):\n\n weird_reads_fp = open(weird_reads_file, 'r')\n weird_readname_dict = dict()\n while 1:\n line = weird_reads_fp.readline()\n if not line: break\n line = line.strip().split(tab)\n if len(line) < 10: continue\n tid1 = chrname2tid_dict[line[0]]\n tid2 = chrname2tid_dict[line[2]]\n short_read_support = ShortReadSupport(line + [tid1, tid2])\n\n readname = short_read_support.aligned_read1.split('@')[0]\n weird_readname_dict[readname] = short_read_support\n\n weird_reads_fp.close()\n return weird_readname_dict\n\n\ndef read_weird_reads_file(in_weird_reads_file, chrname2tid_dict, min_sv_length):\n\n short_read_support_list35 = list()\n\n in_weird_reads_fp = open(in_weird_reads_file, 'r')\n\n while 1:\n line1 = in_weird_reads_fp.readline()\n line2 = in_weird_reads_fp.readline()\n if not line1: break\n if not line2: break\n\n short_read_support = ShortReadSupport()\n short_read_support.init_from_two_lines(line1, line2)\n\n if short_read_support.inner_size() > 45000: continue \n if short_read_support.inner_size() < min_sv_length - 200: continue\n short_read_support_list35.append(short_read_support)\n\n in_weird_reads_fp.close()\n\n\n return short_read_support_list35\n\n \n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"scipy.sparse.csr_matrix",
"scipy.sparse.csgraph.connected_components"
]
]
|
max0x7ba/ray | [
"bc7d2caa93ca1aef52672da166ce7173afbdbb3d"
]
| [
"python/ray/data/dataset.py"
]
| [
"import logging\nfrom typing import List, Any, Callable, Iterator, Iterable, Generic, TypeVar, \\\n Dict, Optional, Union, TYPE_CHECKING\nfrom uuid import uuid4\n\nif TYPE_CHECKING:\n import pyarrow\n import pandas\n import mars\n import modin\n import dask\n import pyspark\n import ray.util.sgd\n import torch\n import tensorflow as tf\n from ray.data.dataset_pipeline import DatasetPipeline\n\nimport collections\nimport itertools\nimport numpy as np\n\nimport ray\nfrom ray.types import ObjectRef\nfrom ray.util.annotations import DeveloperAPI, PublicAPI\nfrom ray.data.block import Block, BlockAccessor, BlockMetadata\nfrom ray.data.datasource import (Datasource, CSVDatasource, JSONDatasource,\n NumpyDatasource, ParquetDatasource)\nfrom ray.data.impl.remote_fn import cached_remote_fn\nfrom ray.data.impl.batcher import Batcher\nfrom ray.data.impl.compute import get_compute, cache_wrapper, \\\n CallableClass\nfrom ray.data.impl.progress_bar import ProgressBar\nfrom ray.data.impl.shuffle import simple_shuffle\nfrom ray.data.impl.sort import sort_impl\nfrom ray.data.impl.block_list import BlockList\nfrom ray.data.impl.lazy_block_list import LazyBlockList\nfrom ray.data.impl.arrow_block import DelegatingArrowBlockBuilder\n\nT = TypeVar(\"T\")\nU = TypeVar(\"U\")\n\n# An output type of iter_batches() determined by the batch_format parameter.\nBatchType = Union[\"pandas.DataFrame\", \"pyarrow.Table\", np.ndarray, list]\n\nlogger = logging.getLogger(__name__)\n\n\n@PublicAPI(stability=\"beta\")\nclass Dataset(Generic[T]):\n \"\"\"Implements a distributed Arrow dataset.\n\n Datasets are implemented as a list of ``ObjectRef[Block]``. The block\n also determines the unit of parallelism. The default block type is the\n ``pyarrow.Table``. Tensor objects are held in ``np.ndarray`` blocks,\n and other Arrow-incompatible objects are held in ``list`` blocks.\n\n Since Datasets are just lists of Ray object refs, they can be passed\n between Ray tasks and actors just like any other object. Datasets support\n conversion to/from several more featureful dataframe libraries\n (e.g., Spark, Dask, Modin, MARS), and are also compatible with distributed\n TensorFlow / PyTorch.\n\n Dataset supports parallel transformations such as .map(), .map_batches(),\n and simple repartition, but currently not aggregations and joins.\n \"\"\"\n\n def __init__(self, blocks: BlockList[T]):\n \"\"\"Construct a Dataset (internal API).\n\n The constructor is not part of the Dataset API. Use the ``ray.data.*``\n read methods to construct a dataset.\n \"\"\"\n self._blocks: BlockList[T] = blocks\n self._uuid = uuid4().hex\n assert isinstance(self._blocks, BlockList), self._blocks\n\n def map(self,\n fn: Union[CallableClass, Callable[[T], U]],\n *,\n compute: Optional[str] = None,\n **ray_remote_args) -> \"Dataset[U]\":\n \"\"\"Apply the given function to each record of this dataset.\n\n This is a blocking operation. Note that mapping individual records\n can be quite slow. Consider using `.map_batches()` for performance.\n\n Examples:\n >>> # Transform python objects.\n >>> ds.map(lambda x: x * 2)\n\n >>> # Transform Arrow records.\n >>> ds.map(lambda record: {\"v2\": record[\"value\"] * 2})\n\n >>> # Define a callable class that persists state across\n >>> # function invocations for efficiency.\n >>> class CachedModel:\n ... def __init__(self):\n ... self.model = init_model()\n ... def __call__(self, batch):\n ... return self.model(batch)\n\n >>> # Apply the transform in parallel on GPUs. Since\n >>> # compute=\"actors\", the transform will be applied on an\n >>> # autoscaling pool of Ray actors, each allocated 1 GPU by Ray.\n >>> ds.map(CachedModel, compute=\"actors\", num_gpus=1)\n\n Time complexity: O(dataset size / parallelism)\n\n Args:\n fn: The function to apply to each record, or a class type\n that can be instantiated to create such a callable.\n compute: The compute strategy, either \"tasks\" (default) to use Ray\n tasks, or \"actors\" to use an autoscaling Ray actor pool.\n ray_remote_args: Additional resource requirements to request from\n ray (e.g., num_gpus=1 to request GPUs for the map tasks).\n \"\"\"\n\n fn = cache_wrapper(fn)\n\n def transform(block: Block) -> Block:\n block = BlockAccessor.for_block(block)\n builder = DelegatingArrowBlockBuilder()\n for row in block.iter_rows():\n builder.add(fn(row))\n return builder.build()\n\n compute = get_compute(compute)\n\n return Dataset(compute.apply(transform, ray_remote_args, self._blocks))\n\n def map_batches(self,\n fn: Union[CallableClass, Callable[[BatchType], BatchType]],\n *,\n batch_size: int = None,\n compute: Optional[str] = None,\n batch_format: str = \"native\",\n **ray_remote_args) -> \"Dataset[Any]\":\n \"\"\"Apply the given function to batches of records of this dataset.\n\n This is a blocking operation.\n\n Examples:\n >>> # Transform batches in parallel.\n >>> ds.map_batches(lambda batch: [v * 2 for v in batch])\n\n >>> # Define a callable class that persists state across\n >>> # function invocations for efficiency.\n >>> class CachedModel:\n ... def __init__(self):\n ... self.model = init_model()\n ... def __call__(self, item):\n ... return self.model(item)\n\n >>> # Apply the transform in parallel on GPUs. Since\n >>> # compute=\"actors\", the transform will be applied on an\n >>> # autoscaling pool of Ray actors, each allocated 1 GPU by Ray.\n >>> ds.map_batches(\n ... CachedModel,\n ... batch_size=256, compute=\"actors\", num_gpus=1)\n\n Time complexity: O(dataset size / parallelism)\n\n Args:\n fn: The function to apply to each record batch, or a class type\n that can be instantiated to create such a callable.\n batch_size: Request a specific batch size, or leave unspecified\n to use entire blocks as batches.\n compute: The compute strategy, either \"tasks\" (default) to use Ray\n tasks, or \"actors\" to use an autoscaling Ray actor pool.\n batch_format: Specify \"native\" to use the native block format,\n \"pandas\" to select ``pandas.DataFrame`` as the batch format,\n or \"pyarrow\" to select ``pyarrow.Table/Tensor``.\n ray_remote_args: Additional resource requirements to request from\n ray (e.g., num_gpus=1 to request GPUs for the map tasks).\n \"\"\"\n if batch_size is not None and batch_size < 1:\n raise ValueError(\"Batch size cannot be negative or 0\")\n import pyarrow as pa\n import pandas as pd\n\n fn = cache_wrapper(fn)\n\n def transform(block: Block) -> Block:\n block = BlockAccessor.for_block(block)\n total_rows = block.num_rows()\n max_batch_size = batch_size\n if max_batch_size is None:\n max_batch_size = max(total_rows, 1)\n\n builder = DelegatingArrowBlockBuilder()\n\n for start in range(0, total_rows, max_batch_size):\n # Build a block for each batch.\n end = min(total_rows, start + max_batch_size)\n view = block.slice(start, end, copy=False)\n if batch_format == \"native\":\n pass\n elif batch_format == \"pandas\":\n view = BlockAccessor.for_block(view).to_pandas()\n elif batch_format == \"pyarrow\":\n view = BlockAccessor.for_block(view).to_arrow()\n else:\n raise ValueError(\n \"The batch format must be one of 'native', 'pandas', \"\n \"or 'pyarrow', got: {}\".format(batch_format))\n\n applied = fn(view)\n if (isinstance(applied, list) or isinstance(applied, pa.Table)\n or isinstance(applied, np.ndarray)):\n applied = applied\n elif isinstance(applied, pd.core.frame.DataFrame):\n applied = pa.Table.from_pandas(applied)\n elif isinstance(applied, pa.Tensor):\n applied = applied.to_numpy()\n else:\n raise ValueError(\"The map batches UDF returned a type \"\n f\"{type(applied)}, which is not allowed. \"\n \"The return type must be either list, \"\n \"pandas.DataFrame, np.ndarray, \"\n \"pyarrow.Tensor, or pyarrow.Table\")\n builder.add_block(applied)\n\n return builder.build()\n\n compute = get_compute(compute)\n\n return Dataset(compute.apply(transform, ray_remote_args, self._blocks))\n\n def flat_map(self,\n fn: Union[CallableClass, Callable[[T], Iterable[U]]],\n *,\n compute: Optional[str] = None,\n **ray_remote_args) -> \"Dataset[U]\":\n \"\"\"Apply the given function to each record and then flatten results.\n\n This is a blocking operation. Consider using ``.map_batches()`` for\n better performance (the batch size can be altered in map_batches).\n\n Examples:\n >>> ds.flat_map(lambda x: [x, x ** 2, x ** 3])\n\n Time complexity: O(dataset size / parallelism)\n\n Args:\n fn: The function to apply to each record, or a class type\n that can be instantiated to create such a callable.\n compute: The compute strategy, either \"tasks\" (default) to use Ray\n tasks, or \"actors\" to use an autoscaling Ray actor pool.\n ray_remote_args: Additional resource requirements to request from\n ray (e.g., num_gpus=1 to request GPUs for the map tasks).\n \"\"\"\n\n fn = cache_wrapper(fn)\n\n def transform(block: Block) -> Block:\n block = BlockAccessor.for_block(block)\n builder = DelegatingArrowBlockBuilder()\n for row in block.iter_rows():\n for r2 in fn(row):\n builder.add(r2)\n return builder.build()\n\n compute = get_compute(compute)\n\n return Dataset(compute.apply(transform, ray_remote_args, self._blocks))\n\n def filter(self,\n fn: Union[CallableClass, Callable[[T], bool]],\n *,\n compute: Optional[str] = None,\n **ray_remote_args) -> \"Dataset[T]\":\n \"\"\"Filter out records that do not satisfy the given predicate.\n\n This is a blocking operation. Consider using ``.map_batches()`` for\n better performance (you can implement filter by dropping records).\n\n Examples:\n >>> ds.filter(lambda x: x % 2 == 0)\n\n Time complexity: O(dataset size / parallelism)\n\n Args:\n fn: The predicate to apply to each record, or a class type\n that can be instantiated to create such a callable.\n compute: The compute strategy, either \"tasks\" (default) to use Ray\n tasks, or \"actors\" to use an autoscaling Ray actor pool.\n ray_remote_args: Additional resource requirements to request from\n ray (e.g., num_gpus=1 to request GPUs for the map tasks).\n \"\"\"\n\n fn = cache_wrapper(fn)\n\n def transform(block: Block) -> Block:\n block = BlockAccessor.for_block(block)\n builder = block.builder()\n for row in block.iter_rows():\n if fn(row):\n builder.add(row)\n return builder.build()\n\n compute = get_compute(compute)\n\n return Dataset(compute.apply(transform, ray_remote_args, self._blocks))\n\n def repartition(self, num_blocks: int) -> \"Dataset[T]\":\n \"\"\"Repartition the dataset into exactly this number of blocks.\n\n This is a blocking operation.\n\n Examples:\n >>> # Set the number of output partitions to write to disk.\n >>> ds.repartition(100).write_parquet(...)\n\n Time complexity: O(dataset size / parallelism)\n\n Args:\n num_blocks: The number of blocks.\n\n Returns:\n The repartitioned dataset.\n \"\"\"\n\n new_blocks = simple_shuffle(self._blocks, num_blocks)\n return Dataset(new_blocks)\n\n def random_shuffle(self,\n *,\n seed: Optional[int] = None,\n num_blocks: Optional[int] = None) -> \"Dataset[T]\":\n \"\"\"Randomly shuffle the elements of this dataset.\n\n This is a blocking operation similar to repartition().\n\n Examples:\n >>> # Shuffle this dataset randomly.\n >>> ds.random_shuffle()\n\n >>> # Shuffle this dataset with a fixed random seed.\n >>> ds.random_shuffle(seed=12345)\n\n Time complexity: O(dataset size / parallelism)\n\n Args:\n seed: Fix the random seed to use, otherwise one will be chosen\n based on system randomness.\n num_blocks: The number of output blocks after the shuffle, or None\n to retain the number of blocks.\n\n Returns:\n The shuffled dataset.\n \"\"\"\n\n new_blocks = simple_shuffle(\n self._blocks,\n num_blocks or self.num_blocks(),\n random_shuffle=True,\n random_seed=seed)\n return Dataset(new_blocks)\n\n def split(self,\n n: int,\n *,\n equal: bool = False,\n locality_hints: List[Any] = None) -> List[\"Dataset[T]\"]:\n \"\"\"Split the dataset into ``n`` disjoint pieces.\n\n This returns a list of sub-datasets that can be passed to Ray tasks\n and actors and used to read the dataset records in parallel.\n\n Examples:\n >>> # Split up a dataset to process over `n` worker actors.\n >>> shards = ds.split(len(workers), locality_hints=workers)\n >>> for shard, worker in zip(shards, workers):\n ... worker.consume.remote(shard)\n\n Time complexity: O(1)\n\n See also: ``Dataset.split_at_indices``\n\n Args:\n n: Number of child datasets to return.\n equal: Whether to guarantee each split has an equal\n number of records. This may drop records if they cannot be\n divided equally among the splits.\n locality_hints: A list of Ray actor handles of size ``n``. The\n system will try to co-locate the blocks of the ith dataset\n with the ith actor to maximize data locality.\n\n Returns:\n A list of ``n`` disjoint dataset splits.\n \"\"\"\n if n <= 0:\n raise ValueError(f\"The number of splits {n} is not positive.\")\n\n if n > self.num_blocks() and equal:\n raise NotImplementedError(\n f\"The number of splits {n} > the number of dataset blocks \"\n f\"{self.num_blocks()}, yet an equal split was requested.\")\n\n if locality_hints and len(locality_hints) != n:\n raise ValueError(\n f\"The length of locality_hints {len(locality_hints)} \"\n \"doesn't equal the number of splits {n}.\")\n\n # TODO(ekl) we could do better than truncation here. This could be a\n # problem if block sizes are very skewed.\n def equalize(splits: List[Dataset[T]]) -> List[Dataset[T]]:\n if not equal:\n return splits\n lower_bound = min([s.count() for s in splits])\n assert lower_bound > 0, splits\n return [s.limit(lower_bound) for s in splits]\n\n block_refs = list(self._blocks)\n metadata_mapping = {\n b: m\n for b, m in zip(self._blocks, self._blocks.get_metadata())\n }\n\n if locality_hints is None:\n return equalize([\n Dataset(\n BlockList(\n list(blocks), [metadata_mapping[b] for b in blocks]))\n for blocks in np.array_split(block_refs, n)\n ])\n\n # If the locality_hints is set, we use a two-round greedy algorithm\n # to co-locate the blocks with the actors based on block\n # and actor's location (node_id).\n #\n # The split algorithm tries to allocate equally-sized blocks regardless\n # of locality. Thus we first calculate the expected number of blocks\n # for each split.\n #\n # In the first round, for each actor, we look for all blocks that\n # match the actor's node_id, then allocate those matched blocks to\n # this actor until we reach the limit(expected number).\n #\n # In the second round: fill each actor's allocation with\n # remaining unallocated blocks until we reach the limit.\n\n ray.wait(block_refs, num_returns=len(block_refs))\n\n def build_allocation_size_map(num_blocks: int,\n actors: List[Any]) -> Dict[Any, int]:\n \"\"\"Given the total number of blocks and a list of actors, calcuate\n the expected number of blocks to allocate for each actor.\n \"\"\"\n num_actors = len(actors)\n num_blocks_per_actor = num_blocks // num_actors\n num_blocks_left = num_blocks - num_blocks_per_actor * n\n num_blocks_by_actor = {}\n for i, actor in enumerate(actors):\n num_blocks_by_actor[actor] = num_blocks_per_actor\n if i < num_blocks_left:\n num_blocks_by_actor[actor] += 1\n return num_blocks_by_actor\n\n def build_block_refs_by_node_id(blocks: List[ObjectRef[Block]]\n ) -> Dict[str, List[ObjectRef[Block]]]:\n \"\"\"Build the reverse index from node_id to block_refs. For\n simplicity, if the block is stored on multiple nodes we\n only pick the first one.\n \"\"\"\n block_ref_locations = ray.experimental.get_object_locations(blocks)\n block_refs_by_node_id = collections.defaultdict(list)\n for block_ref in blocks:\n node_ids = block_ref_locations.get(block_ref, {}).get(\n \"node_ids\", [])\n node_id = node_ids[0] if node_ids else None\n block_refs_by_node_id[node_id].append(block_ref)\n return block_refs_by_node_id\n\n def build_node_id_by_actor(actors: List[Any]) -> Dict[Any, str]:\n \"\"\"Build a map from a actor to its node_id.\n \"\"\"\n actors_state = ray.state.actors()\n return {\n actor: actors_state.get(actor._actor_id.hex(), {}).get(\n \"Address\", {}).get(\"NodeID\")\n for actor in actors\n }\n\n # expected number of blocks to be allocated for each actor\n expected_block_count_by_actor = build_allocation_size_map(\n len(block_refs), locality_hints)\n # the reverse index from node_id to block_refs\n block_refs_by_node_id = build_block_refs_by_node_id(block_refs)\n # the map from actor to its node_id\n node_id_by_actor = build_node_id_by_actor(locality_hints)\n\n allocation_per_actor = collections.defaultdict(list)\n\n # In the first round, for each actor, we look for all blocks that\n # match the actor's node_id, then allocate those matched blocks to\n # this actor until we reach the limit(expected number)\n for actor in locality_hints:\n node_id = node_id_by_actor[actor]\n matching_blocks = block_refs_by_node_id[node_id]\n expected_block_count = expected_block_count_by_actor[actor]\n allocation = []\n while matching_blocks and len(allocation) < expected_block_count:\n allocation.append(matching_blocks.pop())\n allocation_per_actor[actor] = allocation\n\n # In the second round: fill each actor's allocation with\n # remaining unallocated blocks until we reach the limit\n remaining_block_refs = list(\n itertools.chain.from_iterable(block_refs_by_node_id.values()))\n for actor in locality_hints:\n while len(allocation_per_actor[actor]\n ) < expected_block_count_by_actor[actor]:\n allocation_per_actor[actor].append(remaining_block_refs.pop())\n\n assert len(remaining_block_refs) == 0, len(remaining_block_refs)\n\n return equalize([\n Dataset(\n BlockList(\n allocation_per_actor[actor],\n [metadata_mapping[b]\n for b in allocation_per_actor[actor]]))\n for actor in locality_hints\n ])\n\n def split_at_indices(self, indices: List[int]) -> List[\"Dataset[T]\"]:\n \"\"\"Split the dataset at the given indices (like np.split).\n\n Examples:\n >>> d1, d2, d3 = ray.data.range(10).split_at_indices([2, 5])\n >>> d1.take()\n [0, 1]\n >>> d2.take()\n [2, 3, 4]\n >>> d3.take()\n [5, 6, 7, 8, 9]\n\n Time complexity: O(num splits)\n\n See also: ``Dataset.split``\n\n Args:\n indices: List of sorted integers which indicate where the dataset\n will be split. If an index exceeds the length of the dataset,\n an empty dataset will be returned.\n\n Returns:\n The dataset splits.\n \"\"\"\n\n if len(indices) < 1:\n raise ValueError(\"indices must be at least of length 1\")\n if sorted(indices) != indices:\n raise ValueError(\"indices must be sorted\")\n if indices[0] < 0:\n raise ValueError(\"indices must be positive\")\n\n rest = self\n splits = []\n prev = 0\n for i in indices:\n first, rest = rest._split(i - prev, return_right_half=True)\n prev = i\n splits.append(first)\n splits.append(rest)\n\n return splits\n\n def union(self, *other: List[\"Dataset[T]\"]) -> \"Dataset[T]\":\n \"\"\"Combine this dataset with others of the same type.\n\n Args:\n other: List of datasets to combine with this one. The datasets\n must have the same schema as this dataset, otherwise the\n behavior is undefined.\n\n Returns:\n A new dataset holding the union of their data.\n \"\"\"\n\n blocks: List[ObjectRef[Block]] = []\n metadata: List[BlockMetadata] = []\n pending_blocks: List[Callable[[], ObjectRef[Block]]] = []\n pending_metadata: List[BlockMetadata] = []\n\n datasets = [self] + list(other)\n for ds in datasets:\n bl = ds._blocks\n if isinstance(bl, LazyBlockList):\n for block, meta in zip(bl._blocks, bl._metadata):\n blocks.append(block)\n metadata.append(meta)\n lim = len(bl._blocks)\n for call, meta in zip(bl._calls[lim:], bl._metadata[lim:]):\n pending_blocks.append(call)\n pending_metadata.append(meta)\n else:\n assert isinstance(bl, BlockList), bl\n blocks.extend(list(bl._blocks))\n metadata.extend(bl.get_metadata())\n\n result = LazyBlockList([], [])\n result._calls = ([None] * len(blocks)) + pending_blocks\n result._blocks = blocks\n result._metadata = metadata + pending_metadata\n\n assert len(result._calls) == len(result._metadata), result\n assert len(result._blocks) <= len(result._calls), result\n return Dataset(result)\n\n def sort(self,\n key: Union[None, str, List[str], Callable[[T], Any]] = None,\n descending: bool = False) -> \"Dataset[T]\":\n \"\"\"Sort the dataset by the specified key column or key function.\n (experimental support)\n\n This is a blocking operation.\n\n Examples:\n >>> # Sort using the entire record as the key.\n >>> ds.sort()\n\n >>> # Sort by a single column in descending order.\n >>> ds.sort(\"field1\", descending=True)\n\n >>> # Sort by a key function.\n >>> ds.sort(lambda record: record[\"field1\"] % 100)\n\n >>> # Sort by multiple columns (not yet supported).\n >>> ds.sort([(\"field1\", \"ascending\"), (\"field2\", \"descending)])\n\n Time complexity: O(dataset size * log(dataset size / parallelism))\n\n Args:\n key:\n - For Arrow tables, key must be a single column name.\n - For datasets of Python objects, key can be either a lambda\n function that returns a comparison key to sort by, or None\n to sort by the original value.\n descending: Whether to sort in descending order.\n\n Returns:\n A new, sorted dataset.\n \"\"\"\n return Dataset(sort_impl(self._blocks, key, descending))\n\n def limit(self, limit: int) -> \"Dataset[T]\":\n \"\"\"Limit the dataset to the first number of records specified.\n\n Examples:\n >>> ds.limit(100).map(lambda x: x * 2).take()\n\n Time complexity: O(limit specified)\n\n Args:\n limit: The size of the dataset to truncate to.\n\n Returns:\n The truncated dataset.\n \"\"\"\n\n left, _ = self._split(limit, return_right_half=False)\n return left\n\n def take(self, limit: int = 20) -> List[T]:\n \"\"\"Take up to the given number of records from the dataset.\n\n Time complexity: O(limit specified)\n\n Args:\n limit: The max number of records to return.\n\n Returns:\n A list of up to ``limit`` records from the dataset.\n \"\"\"\n output = []\n for row in self.iter_rows():\n output.append(row)\n if len(output) >= limit:\n break\n return output\n\n def show(self, limit: int = 20) -> None:\n \"\"\"Print up to the given number of records from the dataset.\n\n Time complexity: O(limit specified)\n\n Args:\n limit: The max number of records to print.\n \"\"\"\n for row in self.take(limit):\n print(row)\n\n def count(self) -> int:\n \"\"\"Count the number of records in the dataset.\n\n Time complexity: O(dataset size / parallelism), O(1) for parquet\n\n Returns:\n The number of records in the dataset.\n \"\"\"\n\n # For parquet, we can return the count directly from metadata.\n meta_count = self._meta_count()\n if meta_count is not None:\n return meta_count\n\n get_num_rows = cached_remote_fn(_get_num_rows)\n\n return sum(\n ray.get([get_num_rows.remote(block) for block in self._blocks]))\n\n def sum(self) -> int:\n \"\"\"Sum up the elements of this dataset.\n\n Time complexity: O(dataset size / parallelism)\n\n Returns:\n The sum of the records in the dataset.\n \"\"\"\n\n get_sum = cached_remote_fn(_get_sum)\n\n return sum(ray.get([get_sum.remote(block) for block in self._blocks]))\n\n def schema(self) -> Union[type, \"pyarrow.lib.Schema\"]:\n \"\"\"Return the schema of the dataset.\n\n For datasets of Arrow records, this will return the Arrow schema.\n For datasets of Python objects, this returns their Python type.\n\n Time complexity: O(1)\n\n Returns:\n The Python type or Arrow schema of the records, or None if the\n schema is not known.\n \"\"\"\n metadata = self._blocks.get_metadata()\n # Some blocks could be empty, in which case we cannot get their schema.\n # TODO(ekl) validate schema is the same across different blocks.\n for m in metadata:\n if m.schema:\n return m.schema\n return None\n\n def num_blocks(self) -> int:\n \"\"\"Return the number of blocks of this dataset.\n\n Time complexity: O(1)\n\n Returns:\n The number of blocks of this dataset.\n \"\"\"\n return len(self._blocks)\n\n def size_bytes(self) -> int:\n \"\"\"Return the in-memory size of the dataset.\n\n Time complexity: O(1)\n\n Returns:\n The in-memory size of the dataset in bytes, or None if the\n in-memory size is not known.\n \"\"\"\n metadata = self._blocks.get_metadata()\n if not metadata or metadata[0].size_bytes is None:\n return None\n return sum(m.size_bytes for m in metadata)\n\n def input_files(self) -> List[str]:\n \"\"\"Return the list of input files for the dataset.\n\n Time complexity: O(num input files)\n\n Returns:\n The list of input files used to create the dataset, or an empty\n list if the input files is not known.\n \"\"\"\n metadata = self._blocks.get_metadata()\n files = set()\n for m in metadata:\n for f in m.input_files:\n files.add(f)\n return list(files)\n\n def write_parquet(self,\n path: str,\n *,\n filesystem: Optional[\"pyarrow.fs.FileSystem\"] = None,\n **arrow_parquet_args) -> None:\n \"\"\"Write the dataset to parquet.\n\n This is only supported for datasets convertible to Arrow records.\n To control the number of files, use ``.repartition()``.\n\n The format of the output files will be {uuid}_{block_idx}.parquet,\n where ``uuid`` is an unique id for the dataset.\n\n Examples:\n >>> ds.write_parquet(\"s3://bucket/path\")\n\n Time complexity: O(dataset size / parallelism)\n\n Args:\n path: The path to the destination root directory, where Parquet\n files will be written to.\n filesystem: The filesystem implementation to write to.\n arrow_parquet_args: Options to pass to\n pyarrow.parquet.write_table(), which is used to write out each\n block to a file.\n \"\"\"\n self.write_datasource(\n ParquetDatasource(),\n path=path,\n dataset_uuid=self._uuid,\n filesystem=filesystem,\n **arrow_parquet_args)\n\n def write_json(self,\n path: str,\n *,\n filesystem: Optional[\"pyarrow.fs.FileSystem\"] = None,\n **pandas_json_args) -> None:\n \"\"\"Write the dataset to json.\n\n This is only supported for datasets convertible to Arrow records.\n To control the number of files, use ``.repartition()``.\n\n The format of the output files will be {self._uuid}_{block_idx}.json,\n where ``uuid`` is an unique id for the dataset.\n\n Examples:\n >>> ds.write_json(\"s3://bucket/path\")\n\n Time complexity: O(dataset size / parallelism)\n\n Args:\n path: The path to the destination root directory, where json\n files will be written to.\n filesystem: The filesystem implementation to write to.\n pandas_json_args: These args will be passed to\n pandas.DataFrame.to_json(), which we use under the hood to\n write out each Datasets block. These\n are dict(orient=\"records\", lines=True) by default.\n \"\"\"\n self.write_datasource(\n JSONDatasource(),\n path=path,\n dataset_uuid=self._uuid,\n filesystem=filesystem,\n **pandas_json_args)\n\n def write_csv(self,\n path: str,\n *,\n filesystem: Optional[\"pyarrow.fs.FileSystem\"] = None,\n **arrow_csv_args) -> None:\n \"\"\"Write the dataset to csv.\n\n This is only supported for datasets convertible to Arrow records.\n To control the number of files, use ``.repartition()``.\n\n The format of the output files will be {uuid}_{block_idx}.csv, where\n ``uuid`` is an unique id for the dataset.\n\n Examples:\n >>> ds.write_csv(\"s3://bucket/path\")\n\n Time complexity: O(dataset size / parallelism)\n\n Args:\n path: The path to the destination root directory, where csv\n files will be written to.\n filesystem: The filesystem implementation to write to.\n arrow_csv_args: Other CSV write options to pass to pyarrow.\n \"\"\"\n self.write_datasource(\n CSVDatasource(),\n path=path,\n dataset_uuid=self._uuid,\n filesystem=filesystem,\n **arrow_csv_args)\n\n def write_numpy(\n self,\n path: str,\n *,\n filesystem: Optional[\"pyarrow.fs.FileSystem\"] = None) -> None:\n \"\"\"Write the dataset to npy files.\n\n This is only supported for datasets of Tensor records.\n To control the number of files, use ``.repartition()``.\n\n The format of the output files will be {self._uuid}_{block_idx}.npy,\n where ``uuid`` is an unique id for the dataset.\n\n Examples:\n >>> ds.write_numpy(\"s3://bucket/path\")\n\n Time complexity: O(dataset size / parallelism)\n\n Args:\n path: The path to the destination root directory, where npy\n files will be written to.\n filesystem: The filesystem implementation to write to.\n \"\"\"\n self.write_datasource(\n NumpyDatasource(),\n path=path,\n dataset_uuid=self._uuid,\n filesystem=filesystem)\n\n def write_datasource(self, datasource: Datasource[T],\n **write_args) -> None:\n \"\"\"Write the dataset to a custom datasource.\n\n Examples:\n >>> ds.write_datasource(CustomDatasourceImpl(...))\n\n Time complexity: O(dataset size / parallelism)\n\n Args:\n datasource: The datasource to write to.\n write_args: Additional write args to pass to the datasource.\n \"\"\"\n\n write_results = datasource.do_write(self._blocks,\n self._blocks.get_metadata(),\n **write_args)\n progress = ProgressBar(\"Write Progress\", len(write_results))\n try:\n progress.block_until_complete(write_results)\n datasource.on_write_complete(ray.get(write_results))\n except Exception as e:\n datasource.on_write_failed(write_results, e)\n raise\n finally:\n progress.close()\n\n def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[T]:\n \"\"\"Return a local row iterator over the dataset.\n\n Examples:\n >>> for i in ray.data.range(1000000).iter_rows():\n ... print(i)\n\n Time complexity: O(1)\n\n Args:\n prefetch_blocks: The number of blocks to prefetch ahead of the\n current block during the scan.\n\n Returns:\n A local iterator over the entire dataset.\n \"\"\"\n for batch in self.iter_batches(\n prefetch_blocks=prefetch_blocks, batch_format=\"native\"):\n batch = BlockAccessor.for_block(batch)\n for row in batch.iter_rows():\n yield row\n\n def iter_batches(self,\n *,\n prefetch_blocks: int = 0,\n batch_size: int = None,\n batch_format: str = \"native\",\n drop_last: bool = False) -> Iterator[BatchType]:\n \"\"\"Return a local batched iterator over the dataset.\n\n Examples:\n >>> for batch in ray.data.range(1000000).iter_batches():\n ... print(batch)\n\n Time complexity: O(1)\n\n Args:\n prefetch_blocks: The number of blocks to prefetch ahead of the\n current block during the scan.\n batch_size: Record batch size, or None to let the system pick.\n batch_format: The format in which to return each batch.\n Specify \"native\" to use the current block format, \"pandas\" to\n select ``pandas.DataFrame`` or \"pyarrow\" to select\n ``pyarrow.Table/Tensor``. Default is \"native\".\n drop_last: Whether to drop the last batch if it's incomplete.\n\n Returns:\n A list of iterators over record batches.\n \"\"\"\n\n def sliding_window(iterable: Iterable, n: int):\n \"\"\"Creates an iterator consisting of n-width sliding windows over\n iterable. The sliding windows are constructed lazily such that an\n element on the base iterator (iterable) isn't consumed until the\n first sliding window containing that element is reached.\n\n Args:\n iterable: The iterable on which the sliding window will be\n created.\n n: The width of the sliding window.\n\n Returns:\n An iterator of n-width windows over iterable.\n \"\"\"\n iters = itertools.tee(iter(iterable), n)\n for i in range(1, n):\n for it in iters[i:]:\n next(it, None)\n return zip(*iters)\n\n def format_batch(batch: Block, format: str) -> BatchType:\n if batch_format == \"native\":\n return batch\n elif batch_format == \"pandas\":\n batch = BlockAccessor.for_block(batch)\n return batch.to_pandas()\n elif batch_format == \"pyarrow\":\n batch = BlockAccessor.for_block(batch)\n return batch.to_arrow()\n else:\n raise ValueError(\n f\"The given batch format: {batch_format} \"\n f\"is invalid. Supported batch type: {BatchType}\")\n\n batcher = Batcher(batch_size=batch_size)\n\n def batch_block(block: ObjectRef[Block]):\n block = ray.get(block)\n batcher.add(block)\n while batcher.has_batch():\n yield format_batch(batcher.next_batch(), batch_format)\n\n block_window = [] # Handle empty sliding window gracefully.\n for block_window in sliding_window(self._blocks, prefetch_blocks + 1):\n block_window = list(block_window)\n ray.wait(block_window, num_returns=1, fetch_local=True)\n yield from batch_block(block_window[0])\n\n # Consume remainder of final block window.\n for block in block_window[1:]:\n yield from batch_block(block)\n\n # Yield any remainder batches.\n if batcher.has_any() and not drop_last:\n yield format_batch(batcher.next_batch(), batch_format)\n\n def to_torch(self,\n *,\n label_column: str,\n feature_columns: Optional[List[str]] = None,\n label_column_dtype: Optional[\"torch.dtype\"] = None,\n feature_column_dtypes: Optional[List[\"torch.dtype\"]] = None,\n batch_size: int = 1,\n prefetch_blocks: int = 0,\n drop_last: bool = False) -> \\\n \"torch.utils.data.IterableDataset\":\n \"\"\"Return a Torch IterableDataset over this dataset.\n\n It is recommended to use the returned ``IterableDataset`` directly\n instead of passing it into a torch ``DataLoader``.\n\n Each element in IterableDataset will be a tuple consisting of 2\n elements. The first item is a list of the feature tensors. The\n second item is the label tensor. Each tensor will be of shape (N,\n 1), where N is the ``batch_size`` used by the DataLoader.\n\n Note that you probably want to call ``.split()`` on this dataset if\n there are to be multiple Torch workers consuming the data.\n\n Time complexity: O(1)\n\n Args:\n label_column (str): The name of the column used as the label\n (second element of the output list).\n feature_columns (Optional[List[str]]): The names of the columns\n to use as the features. If None, then use all columns\n except the label columns as the features.\n label_column_dtype (Optional[torch.dtype]): The torch dtype to\n use for the label column. If None, then automatically infer\n the dtype.\n feature_column_dtypes (Optional[List[torch.dtype]]): The dtypes\n to use for the feature columns. The len of this list must\n be equal to the len of ``feature_columns``. If None,\n then automatically infer the dtype.\n batch_size (int): How many samples per batch to yield at a time.\n Defaults to 1.\n prefetch_blocks (int): The number of blocks to prefetch ahead of\n the current block during the scan.\n drop_last (bool): Set to True to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If\n False and the size of dataset is not divisible by the batch\n size, then the last batch will be smaller. Defaults to False.\n\n Returns:\n A torch IterableDataset.\n \"\"\"\n import torch\n\n from ray.data.impl.torch_iterable_dataset import \\\n TorchIterableDataset\n\n if feature_columns and feature_column_dtypes:\n if len(feature_columns) != len(feature_column_dtypes):\n raise ValueError(\"The lengths of `feature_columns` \"\n f\"({len(feature_columns)}) and \"\n f\"`feature_column_dtypes` (\"\n f\"{len(feature_column_dtypes)}) do not \"\n \"match!\")\n\n def make_generator():\n for batch in self.iter_batches(\n batch_size=batch_size,\n batch_format=\"pandas\",\n prefetch_blocks=prefetch_blocks,\n drop_last=drop_last):\n label_vals = batch.pop(label_column).values\n label_tensor = torch.as_tensor(\n label_vals, dtype=label_column_dtype)\n label_tensor = label_tensor.view(-1, 1)\n\n feature_tensor = []\n if feature_columns:\n batch = batch[feature_columns]\n\n if feature_column_dtypes:\n dtypes = feature_column_dtypes\n else:\n dtypes = [None] * len(batch.columns)\n\n for col, dtype in zip(batch.columns, dtypes):\n col_vals = batch[col].values\n t = torch.as_tensor(col_vals, dtype=dtype)\n t = t.view(-1, 1)\n feature_tensor.append(t)\n\n yield (feature_tensor, label_tensor)\n\n return TorchIterableDataset(make_generator)\n\n def to_tf(self,\n *,\n label_column: str,\n output_signature: List[\"tf.TypeSpec\"],\n feature_columns: Optional[List[str]] = None,\n prefetch_blocks: int = 0,\n batch_size: int = 1) -> \"tf.data.Dataset\":\n \"\"\"Return a TF Dataset over this dataset.\n\n The TF Dataset will be created from the generator returned by the\n ``iter_batches`` method. ``prefetch_blocks`` and ``batch_size``\n arguments will be passed to that method.\n\n This is only supported for datasets convertible to Arrow records.\n\n Requires all datasets to have the same columns.\n\n Note that you probably want to call ``.split()`` on this dataset if\n there are to be multiple TensorFlow workers consuming the data.\n\n The elements generated must be compatible with the given\n ``output_signature`` argument (same as in\n ``tf.data.Dataset.from_generator``).\n\n Time complexity: O(1)\n\n Args:\n label_column (str): The name of the column used as the label\n (second element of the output tuple).\n output_signature (List[tf.TypeSpec]): A 2-element list\n of `tf.TypeSpec` objects corresponding to (features, label).\n feature_columns (Optional[List[str]]): List of columns in datasets\n to use. If None, all columns will be used.\n prefetch_blocks: The number of blocks to prefetch ahead of the\n current block during the scan.\n batch_size: Record batch size. Defaults to 1.\n\n Returns:\n A tf.data.Dataset.\n \"\"\"\n\n # argument exception checking is done in from_generator\n\n try:\n import tensorflow as tf\n except ImportError:\n raise ValueError(\"tensorflow must be installed!\")\n\n def make_generator():\n for batch in self.iter_batches(\n prefetch_blocks=prefetch_blocks,\n batch_size=batch_size,\n batch_format=\"pandas\"):\n target_col = batch.pop(label_column)\n if feature_columns:\n batch = batch[feature_columns]\n # TODO(Clark): Support batches containing our extension array\n # TensorArray.\n yield batch.values, target_col.values\n\n return tf.data.Dataset.from_generator(\n make_generator, output_signature=output_signature)\n\n def to_dask(self) -> \"dask.DataFrame\":\n \"\"\"Convert this dataset into a Dask DataFrame.\n\n This is only supported for datasets convertible to Arrow records.\n\n Note that this function will set the Dask scheduler to Dask-on-Ray\n globally, via the config.\n\n Time complexity: O(dataset size / parallelism)\n\n Returns:\n A Dask DataFrame created from this dataset.\n \"\"\"\n import dask\n import dask.dataframe as dd\n from ray.util.client.common import ClientObjectRef\n from ray.util.dask import ray_dask_get\n\n dask.config.set(scheduler=ray_dask_get)\n\n @dask.delayed\n def block_to_df(block: Block):\n block = BlockAccessor.for_block(block)\n if isinstance(block, (ray.ObjectRef, ClientObjectRef)):\n raise ValueError(\n \"Dataset.to_dask() must be used with Dask-on-Ray, please \"\n \"set the Dask scheduler to ray_dask_get (located in \"\n \"ray.util.dask).\")\n return block.to_pandas()\n\n # TODO(Clark): Give Dask a Pandas-esque schema via the Pyarrow schema,\n # once that's implemented.\n ddf = dd.from_delayed([block_to_df(block) for block in self._blocks])\n return ddf\n\n def to_mars(self) -> \"mars.DataFrame\":\n \"\"\"Convert this dataset into a MARS dataframe.\n\n Time complexity: O(dataset size / parallelism)\n\n Returns:\n A MARS dataframe created from this dataset.\n \"\"\"\n raise NotImplementedError # P1\n\n def to_modin(self) -> \"modin.DataFrame\":\n \"\"\"Convert this dataset into a Modin dataframe.\n\n This works by first converting this dataset into a distributed set of\n Pandas dataframes (using ``.to_pandas()``). Please see caveats there.\n Then the individual dataframes are used to create the modin DataFrame\n using\n ``modin.distributed.dataframe.pandas.partitions.from_partitions()``.\n\n This is only supported for datasets convertible to Arrow records.\n This function induces a copy of the data. For zero-copy access to the\n underlying data, consider using ``.to_arrow()`` or ``.get_blocks()``.\n\n Time complexity: O(dataset size / parallelism)\n\n Returns:\n A Modin dataframe created from this dataset.\n \"\"\"\n\n from modin.distributed.dataframe.pandas.partitions import (\n from_partitions)\n pd_objs = self.to_pandas()\n return from_partitions(pd_objs, axis=0)\n\n def to_spark(self,\n spark: \"pyspark.sql.SparkSession\") -> \"pyspark.sql.DataFrame\":\n \"\"\"Convert this dataset into a Spark dataframe.\n\n Time complexity: O(dataset size / parallelism)\n\n Returns:\n A Spark dataframe created from this dataset.\n \"\"\"\n import raydp\n core_worker = ray.worker.global_worker.core_worker\n locations = [\n core_worker.get_owner_address(block)\n for block in self.get_blocks()\n ]\n return raydp.spark.ray_dataset_to_spark_dataframe(\n spark, self.schema(), self.get_blocks(), locations)\n\n def to_pandas(self) -> List[ObjectRef[\"pandas.DataFrame\"]]:\n \"\"\"Convert this dataset into a distributed set of Pandas dataframes.\n\n This is only supported for datasets convertible to Arrow records.\n This function induces a copy of the data. For zero-copy access to the\n underlying data, consider using ``.to_arrow()`` or ``.get_blocks()``.\n\n Time complexity: O(dataset size / parallelism)\n\n Returns:\n A list of remote Pandas dataframes created from this dataset.\n \"\"\"\n\n block_to_df = cached_remote_fn(_block_to_df)\n return [block_to_df.remote(block) for block in self._blocks]\n\n def to_numpy(self) -> List[ObjectRef[np.ndarray]]:\n \"\"\"Convert this dataset into a distributed set of NumPy ndarrays.\n\n This is only supported for datasets convertible to NumPy ndarrays.\n This function induces a copy of the data. For zero-copy access to the\n underlying data, consider using ``.to_arrow()`` or ``.get_blocks()``.\n\n Time complexity: O(dataset size / parallelism)\n\n Returns:\n A list of remote NumPy ndarrays created from this dataset.\n \"\"\"\n\n block_to_ndarray = cached_remote_fn(_block_to_ndarray)\n return [block_to_ndarray.remote(block) for block in self._blocks]\n\n def to_arrow(self) -> List[ObjectRef[\"pyarrow.Table\"]]:\n \"\"\"Convert this dataset into a distributed set of Arrow tables.\n\n This is only supported for datasets convertible to Arrow records.\n This function is zero-copy if the existing data is already in Arrow\n format. Otherwise, the data will be converted to Arrow format.\n\n Time complexity: O(1) unless conversion is required.\n\n Returns:\n A list of remote Arrow tables created from this dataset.\n \"\"\"\n\n check_is_arrow = cached_remote_fn(_check_is_arrow)\n blocks: List[ObjectRef[Block]] = list(self._blocks)\n is_arrow = ray.get(check_is_arrow.remote(blocks[0]))\n\n if is_arrow:\n return blocks # Zero-copy path.\n\n block_to_arrow = cached_remote_fn(_block_to_arrow)\n return [block_to_arrow.remote(block) for block in self._blocks]\n\n def repeat(self, times: int = None) -> \"DatasetPipeline[T]\":\n \"\"\"Convert this into a DatasetPipeline by looping over this dataset.\n\n Transformations prior to the call to ``repeat()`` are evaluated once.\n Transformations done on the returned pipeline are evaluated on each\n loop of the pipeline over the base dataset.\n\n Examples:\n >>> # Infinite pipeline of numbers [0, 5)\n >>> ray.data.range(5).repeat().take()\n [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, ...]\n\n >>> # Can apply transformations to the pipeline.\n >>> ray.data.range(5).repeat().map(lambda x: -x).take()\n [0, -1, -2, -3, -4, 0, -1, -2, -3, -4, ...]\n\n >>> # Can shuffle each epoch (dataset) in the pipeline.\n >>> ray.data.range(5).repeat().random_shuffle().take()\n [2, 3, 0, 4, 1, 4, 0, 2, 1, 3, ...]\n\n Args:\n times: The number of times to loop over this dataset, or None\n to repeat indefinitely.\n \"\"\"\n from ray.data.dataset_pipeline import DatasetPipeline\n\n if times is not None and times < 1:\n raise ValueError(\"`times` must be >= 1, got {}\".format(times))\n\n class Iterator:\n def __init__(self, ds: \"Dataset[T]\"):\n self._ds = ds\n self._i = 0\n\n def __next__(self) -> \"Dataset[T]\":\n if times and self._i >= times:\n raise StopIteration\n self._i += 1\n return lambda: self._ds\n\n class Iterable:\n def __init__(self, ds: \"Dataset[T]\"):\n self._ds = ds\n\n def __iter__(self):\n return Iterator(self._ds)\n\n return DatasetPipeline(Iterable(self), length=times)\n\n def pipeline(self, *, parallelism: int = 10) -> \"DatasetPipeline[T]\":\n \"\"\"Pipeline the dataset execution by splitting its blocks into groups.\n\n Transformations prior to the call to ``pipeline()`` are evaluated in\n bulk on the entire dataset. Transformations done on the returned\n pipeline are evaluated incrementally per group of blocks as data is\n read from the output of the pipeline.\n\n Pipelining execution allows for output to be read sooner without\n waiting for all transformations to fully execute, and can also improve\n efficiency if transforms use different resources (e.g., GPUs).\n\n Without pipelining::\n\n [preprocessing......]\n [inference.......]\n [write........]\n Time ----------------------------------------------------------->\n\n With pipelining::\n\n [prep1] [prep2] [prep3]\n [infer1] [infer2] [infer3]\n [write1] [write2] [write3]\n Time ----------------------------------------------------------->\n\n Examples:\n >>> # Create an inference pipeline.\n >>> ds = ray.data.read_binary_files(dir)\n >>> pipe = ds.pipeline(parallelism=10).map(infer)\n DatasetPipeline(num_stages=2, length=40)\n\n >>> # The higher the stage parallelism, the shorter the pipeline.\n >>> pipe = ds.pipeline(parallelism=20).map(infer)\n DatasetPipeline(num_stages=2, length=20)\n\n >>> # Outputs can be incrementally read from the pipeline.\n >>> for item in pipe.iter_rows():\n ... print(item)\n\n Args:\n parallelism: The parallelism (number of blocks) per stage.\n Increasing parallelism increases pipeline throughput, but also\n increases the latency to initial output, since it decreases the\n length of the pipeline. Setting this to infinity effectively\n disables pipelining.\n \"\"\"\n from ray.data.dataset_pipeline import DatasetPipeline\n\n class Iterator:\n def __init__(self, splits):\n self._splits = splits.copy()\n\n def __next__(self) -> \"Dataset[T]\":\n if not self._splits:\n raise StopIteration\n\n blocks = self._splits.pop(0)\n\n def gen():\n return Dataset(blocks)\n\n return gen\n\n class Iterable:\n def __init__(self, blocks):\n self._splits = blocks.split(split_size=parallelism)\n\n def __iter__(self):\n return Iterator(self._splits)\n\n it = Iterable(self._blocks)\n return DatasetPipeline(it, length=len(it._splits))\n\n @DeveloperAPI\n def get_blocks(self) -> List[ObjectRef[Block]]:\n \"\"\"Get a list of references to the underlying blocks of this dataset.\n\n This function can be used for zero-copy access to the data.\n\n Time complexity: O(1)\n\n Returns:\n A list of references to this dataset's blocks.\n \"\"\"\n return list(self._blocks)\n\n def _split(self, index: int,\n return_right_half: bool) -> (\"Dataset[T]\", \"Dataset[T]\"):\n get_num_rows = cached_remote_fn(_get_num_rows)\n split_block = cached_remote_fn(_split_block, num_returns=4)\n\n count = 0\n left_blocks = []\n left_metadata = []\n right_blocks = []\n right_metadata = []\n for b, m in zip(self._blocks, self._blocks.get_metadata()):\n if m.num_rows is None:\n num_rows = ray.get(get_num_rows.remote(b))\n else:\n num_rows = m.num_rows\n if count >= index:\n if not return_right_half:\n break\n right_blocks.append(b)\n right_metadata.append(m)\n elif count + num_rows < index:\n left_blocks.append(b)\n left_metadata.append(m)\n elif count + num_rows == index:\n left_blocks.append(b)\n left_metadata.append(m)\n else:\n b0, m0, b1, m1 = split_block.remote(b, m, index - count,\n return_right_half)\n left_blocks.append(b0)\n left_metadata.append(ray.get(m0))\n right_blocks.append(b1)\n right_metadata.append(ray.get(m1))\n count += num_rows\n\n left = Dataset(BlockList(left_blocks, left_metadata))\n if return_right_half:\n right = Dataset(BlockList(right_blocks, right_metadata))\n else:\n right = None\n return left, right\n\n def __repr__(self) -> str:\n schema = self.schema()\n if schema is None:\n schema_str = \"Unknown schema\"\n elif isinstance(schema, dict):\n schema_str = \"<Tensor: shape={}, dtype={}>\".format(\n schema[\"shape\"], schema[\"dtype\"])\n elif isinstance(schema, type):\n schema_str = str(schema)\n else:\n schema_str = []\n for n, t in zip(schema.names, schema.types):\n if hasattr(t, \"__name__\"):\n t = t.__name__\n schema_str.append(\"{}: {}\".format(n, t))\n schema_str = \", \".join(schema_str)\n schema_str = \"{\" + schema_str + \"}\"\n count = self._meta_count()\n if count is None:\n count = \"?\"\n return \"Dataset(num_blocks={}, num_rows={}, schema={})\".format(\n len(self._blocks), count, schema_str)\n\n def __str__(self) -> str:\n return repr(self)\n\n def _block_sizes(self) -> List[int]:\n get_num_rows = cached_remote_fn(_get_num_rows)\n return ray.get([get_num_rows.remote(b) for b in self._blocks])\n\n def _meta_count(self) -> Optional[int]:\n metadata = self._blocks.get_metadata()\n if metadata and metadata[0].num_rows is not None:\n return sum(m.num_rows for m in metadata)\n else:\n return None\n\n def _get_uuid(self) -> str:\n return self._uuid\n\n def _set_uuid(self, uuid: str) -> None:\n self._uuid = uuid\n\n\ndef _get_num_rows(block: Block) -> int:\n block = BlockAccessor.for_block(block)\n return block.num_rows()\n\n\ndef _get_sum(block: Block) -> int:\n block = BlockAccessor.for_block(block)\n return sum(block.iter_rows())\n\n\ndef _block_to_df(block: Block):\n block = BlockAccessor.for_block(block)\n return block.to_pandas()\n\n\ndef _block_to_ndarray(block: Block):\n block = BlockAccessor.for_block(block)\n return block.to_numpy()\n\n\ndef _block_to_arrow(block: Block):\n block = BlockAccessor.for_block(block)\n return block.to_arrow()\n\n\ndef _check_is_arrow(block: Block) -> bool:\n import pyarrow\n return isinstance(block, pyarrow.Table)\n\n\ndef _split_block(\n block: Block, meta: BlockMetadata, count: int, return_right_half: bool\n) -> (Block, BlockMetadata, Optional[Block], Optional[BlockMetadata]):\n block = BlockAccessor.for_block(block)\n logger.debug(\"Truncating last block to size: {}\".format(count))\n b0 = block.slice(0, count, copy=True)\n a0 = BlockAccessor.for_block(b0)\n m0 = BlockMetadata(\n num_rows=a0.num_rows(),\n size_bytes=a0.size_bytes(),\n schema=meta.schema,\n input_files=meta.input_files)\n if return_right_half:\n b1 = block.slice(count, block.num_rows(), copy=True)\n a1 = BlockAccessor.for_block(b1)\n m1 = BlockMetadata(\n num_rows=a1.num_rows(),\n size_bytes=a1.size_bytes(),\n schema=meta.schema,\n input_files=meta.input_files)\n else:\n b1 = None\n m1 = None\n return b0, m0, b1, m1\n"
]
| [
[
"tensorflow.data.Dataset.from_generator",
"torch.as_tensor",
"numpy.array_split"
]
]
|
cwzrad/openvino | [
"ae4bd370eac7c695bd797a31e62317d328dbe742",
"ae4bd370eac7c695bd797a31e62317d328dbe742"
]
| [
"model-optimizer/extensions/middle/ConvertGroupedStridedSlice.py",
"model-optimizer/extensions/back/ShuffleChannelPatternOptimization.py"
]
| [
"\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging as log\nfrom copy import deepcopy\n\nimport numpy as np\n\nfrom extensions.middle.SliceConverter import ConvertSlice\nfrom extensions.ops.split import VariadicSplit\nfrom mo.front.common.partial_infer.utils import int64_array\nfrom mo.graph.graph import Graph, Node, add_opoutput\nfrom mo.middle.replacement import MiddleReplacementPattern\nfrom mo.ops.const import Const\nfrom mo.ops.op import Op\nfrom mo.ops.squeeze import Squeeze\nfrom mo.ops.unsqueeze import Unsqueeze\n\n\nclass ConvertGroupedStridedSlice(MiddleReplacementPattern):\n \"\"\"\n This pass converts subgraphs where StridedSlices used for splitting single channel to single Split layers\n In case if StrdedSlices consume not entire tensor will be created fake outputs for Split layer\n For example:\n Let's suppose we have next graph:\n Data(1,H,W,54)\n |`---->Sslice1_out (1,H,W,(10,18))\n `---->Sslice2_out (1,H,W,(18,36))\n\n In this case StridedSlices takes only [10, 36] from input tensor in 3rd dim\n So this pass will convert this graph to the next one:\n Split(1,H,W,54)\n |`---->Fake_data (1,H,W,10)\n |`---->Sslice1_out (1,H,W,8)\n |`---->Sslice2_out (1,H,W,18)\n `----->Fake_data (1,H,W,18)\n Where Fake_data - data nodes that have not any consumers.\n \"\"\"\n\n enabled = True\n\n def run_after(self):\n return [ConvertSlice]\n\n def run_before(self):\n from extensions.middle.pass_separator import MiddleFinish\n return [MiddleFinish]\n\n def find_and_replace_pattern(self, graph: Graph):\n # Iterate over all data nodes and find all with >= 1 consumers\n for input_data in list(graph.get_data_nodes()):\n # We don't use constant data nodes\n if input_data.value is not None:\n continue\n\n input_shape = np.array(input_data.shape)\n\n # Get all StridedSlice consumers\n out_nodes = [node for node in input_data.out_nodes() if node.op == 'StridedSlice' and node.in_node(0).name == input_data.name]\n if len(out_nodes) <= 1:\n continue\n\n valid_for_replacement = True\n\n for node in out_nodes:\n if len(node.slices) != len(out_nodes[0].slices):\n valid_for_replacement = False\n\n # Detect dimension for splitting\n split_channel_dim = None\n for dim_id, s in enumerate(out_nodes[0].slices):\n l, r, stride = s.start, s.stop, s.step\n if l != 0 or r != input_shape[dim_id]:\n if split_channel_dim is None:\n split_channel_dim = dim_id\n else:\n valid_for_replacement = False\n\n if split_channel_dim is None:\n valid_for_replacement = False\n\n # split_dims contains tuples with split range and output data node\n split_dims = []\n for out_id, node in enumerate(out_nodes):\n # Check that StridedSlice op has stride eq 1 and splits only feature channel\n for id, s in enumerate(node.slices):\n l, r, stride = s.start, s.stop, s.step\n # We don't support StridedSlice with stride != 1\n if stride != 1:\n valid_for_replacement = False\n if id == split_channel_dim:\n split_dims.append((s.start, s.stop, node.out_node()))\n\n if not valid_for_replacement:\n continue\n\n # Check feature split intersection\n final_data_nodes_list = []\n sorted_split_dims = sorted(split_dims, key=lambda item: (item[0], item[1]))\n\n # check if we have similar StridedSlice operations with different outputs\n prev_sd = sorted_split_dims[0]\n to_remove = []\n for i in range(1, len(sorted_split_dims)):\n if sorted_split_dims[i][0] == prev_sd[0] and sorted_split_dims[i][1] == prev_sd[1] and sorted_split_dims[i][2].name != prev_sd[2].name:\n cur_node = sorted_split_dims[i][2]\n for out in cur_node.out_nodes():\n attrs = deepcopy(graph.get_edge_data(cur_node.id, out.id)[0])\n graph.remove_edge(cur_node.id, out.id)\n graph.add_edge(prev_sd[2].id, out.id, **attrs)\n to_remove.append(i)\n\n for ind in reversed(to_remove):\n sorted_split_dims.pop(ind)\n\n size_splits = []\n prev_r = 0\n for l, r, out in sorted_split_dims:\n # Split dims shouldn't intersect\n if l < prev_r:\n valid_for_replacement = False\n prev_r = r\n\n if prev_r > input_shape[split_channel_dim]:\n valid_for_replacement = False\n\n if not valid_for_replacement:\n continue\n\n prev_r = 0\n for l, r, out in sorted_split_dims:\n # Save missing tensor part\n if l > prev_r:\n shape = np.array(input_shape)\n size_splits.append(l - prev_r)\n shape[split_channel_dim] = l - prev_r\n data_node = Op._create_data_node(graph, 'fake_data_'+out_nodes[0].name, {'shape': shape})\n add_opoutput(graph, data_node.id, 0, False)\n final_data_nodes_list.append(data_node)\n\n prev_r = r\n size_splits.append(r - l)\n final_data_nodes_list.append(out)\n\n if prev_r < input_shape[split_channel_dim]:\n # Add last part of tensor\n shape = input_shape.copy()\n shape[split_channel_dim] = input_shape[split_channel_dim] - prev_r\n size_splits.append(input_shape[split_channel_dim] - prev_r)\n data_node = Op._create_data_node(graph, 'fake_data_'+out_nodes[0].name, {'shape': shape})\n add_opoutput(graph, data_node.id, 0, False)\n final_data_nodes_list.append(data_node)\n\n for node in out_nodes:\n if not np.all([x == 0 for x in node.shrink_axis_mask]):\n out_node = node.out_node()\n if np.any(node['shrink_axis_mask']):\n self.add_squeeze_for_shrink(graph, node)\n if np.any(node['new_axis_mask']):\n self.add_unsqueeze_for_new(graph, node)\n\n for i in range(len(final_data_nodes_list)):\n if final_data_nodes_list[i].name == out_node.name:\n final_data_nodes_list[i] = node.out_node()\n break\n\n # Insert Split layer and remove old StridedSlice layers\n # 1. Remove connections from input_data to StridedSlice ops\n out_data_nodes = []\n name_for_future_split = out_nodes[0].name\n for node in out_nodes:\n out_data_nodes.append(node.out_node())\n graph.remove_edge(input_data.id, node.id)\n graph.remove_edge(node.id, node.out_node().id)\n graph.remove_node(node.id)\n log.debug(\"Removed: {}\".format(node.id))\n\n # 2. Create Split layer and reorder outputs\n name = name_for_future_split + \"/Split\"\n axis_const = Const(graph, {'value': int64_array(split_channel_dim),\n 'name': name + '/Axis'}).create_node_with_data()\n size_splits_const = Const(graph, {'value': int64_array(size_splits),\n 'name': name + '/Sizes'}).create_node_with_data()\n split = VariadicSplit(graph, dict(name=name, out_ports_count=len(size_splits)))\n\n split.create_node_with_data(inputs=[input_data, axis_const, size_splits_const],\n data_nodes=final_data_nodes_list)\n\n @staticmethod\n def add_squeeze_for_shrink(graph: Graph, ss_node: Node):\n # add Squeeze for shrink_axis_mask\n log.info(\"StridedSlice op with shrink mask '{}' has been detected\".format(ss_node.id))\n\n if len(ss_node.in_nodes()) != 4 or len(ss_node.out_nodes()) != 1:\n return\n\n shape_out = ss_node.out_node().shape\n dim = np.array(range(len(ss_node['shrink_axis_mask'])))[np.array(ss_node['shrink_axis_mask'], dtype=bool)]\n ss_shape = []\n i = 0\n k = 0\n\n # Don't permute reshape if channels were squeezed\n dont_permute = graph.graph['layout'] == 'NCHW'\n if graph.graph['layout'] == 'NHWC' and ss_node['shrink_axis_mask'][-1] == 1:\n dont_permute = True\n\n while k < len(shape_out):\n if i >= len(ss_node['shrink_axis_mask']) or not ss_node['shrink_axis_mask'][i]:\n ss_shape.append(shape_out[k])\n k = k + 1\n else:\n ss_node['shrink_axis_mask'][i] = 0\n ss_shape.append(1)\n i = i + 1\n\n while i < len(ss_node['shrink_axis_mask']):\n ss_node['shrink_axis_mask'][i] = 0\n ss_shape.append(1)\n i = i + 1\n\n ss_node.out_port(0).data.set_shape(ss_shape)\n\n # insert Squeeze\n squeeze_node = Squeeze(graph, dict(name=ss_node.name + '/Squeeze_shrink',\n nchw_layout=dont_permute,\n correct_data_layout=dont_permute)).create_node()\n ss_node.out_port(0).get_connection().insert_node(squeeze_node)\n squeeze_node.out_port(0).data.set_shape(shape_out)\n\n dims_node = Const(graph, {'name': squeeze_node.id + '/Indices', 'value': int64_array(dim)}).create_node()\n dims_node.out_port(0).connect(squeeze_node.in_port(1))\n\n @staticmethod\n def add_unsqueeze_for_new(graph: Graph, ss_node: Node):\n log.info(\"StridedSlice op with new axis mask '{}' has been detected\".format(ss_node.id))\n if len(ss_node.in_nodes()) != 4 or len(ss_node.out_nodes()) != 1:\n return\n\n shape_out = ss_node.out_node().shape\n dim = np.array(range(len(ss_node['new_axis_mask'])))[np.array(ss_node['new_axis_mask'], dtype=bool)]\n ss_shape = []\n for i in range(0, len(ss_node['new_axis_mask'])):\n if not ss_node['new_axis_mask'][i]:\n ss_shape.append(shape_out[i])\n else:\n ss_node['new_axis_mask'][i] = 0\n\n ss_node.out_port(0).data.set_shape(ss_shape)\n\n # insert Unsqueeze\n unsqueeze_node = Unsqueeze(graph, dict(name=ss_node.name + '/Unsqueeze_new')).create_node()\n ss_node.out_port(0).get_connection().insert_node(unsqueeze_node)\n unsqueeze_node.out_port(0).data.set_shape(shape_out)\n\n dims_node = Const(graph, {'name': unsqueeze_node.id + '/Indices', 'value': int64_array(dim)}).create_node()\n dims_node.out_port(0).connect(unsqueeze_node.in_port(1))\n",
"\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport numpy as np\n\nfrom extensions.back.FuseTransposesSequence import FuseTransposesSequence\nfrom mo.back.replacement import BackReplacementPattern\nfrom mo.front.common.partial_infer.utils import int64_array\nfrom mo.graph.graph import Graph\n\n\nclass ShuffleChannelPatternOptimization(BackReplacementPattern):\n enabled = True\n force_clean_up = True\n\n def run_after(self):\n return [FuseTransposesSequence]\n\n @staticmethod\n def pattern():\n return dict(\n nodes=[\n ('t_start_order', {'type': 'Const'}),\n ('t_start_order_d', {'value': lambda value: value is not None and np.all(np.array_equal(value, [0, 2, 3, 1]))}),\n ('t_start', {'type': 'Transpose'}),\n ('t_start_d', {}),\n\n ('reshape_dim', {'type': 'Const'}),\n ('reshape_dim_d', {'value': lambda value: value is not None and value.size == 5 and np.all(value[0] == -1)}),\n ('reshape_start', {'type': 'Reshape'}),\n ('reshape_start_d', {}),\n\n ('t_5d_order', {'type': 'Const'}),\n ('t_5d_order_d', {'value': lambda value: value is not None and np.all(np.array_equal(value, [0, 1, 2, 4, 3]))}),\n ('t_5d', {'type': 'Transpose'}),\n ('t_5d_d', {}),\n\n ('reshape_1_dim', {'type': 'Const'}),\n ('reshape_1_dim_d', {'value': lambda value: value is not None and value.size == 4 and np.all(value[0] == -1)}),\n ('reshape_end', {'type': 'Reshape'}),\n ('reshape_end_d', {}),\n\n ('t_end_order', {'type': 'Const'}),\n ('t_end_order_d', {'value': lambda value: value is not None and np.all(np.array_equal(value, [0, 3, 1, 2]))}),\n ('t_end', {'type': 'Transpose'}),\n ],\n edges=[\n ('t_start_order', 't_start_order_d'),\n ('t_start_order_d', 't_start', {'in': 1}),\n ('t_start', 't_start_d'),\n\n ('reshape_dim', 'reshape_dim_d'),\n ('t_start_d', 'reshape_start', {'in': 0}),\n ('reshape_dim_d', 'reshape_start', {'in': 1}),\n ('reshape_start', 'reshape_start_d'),\n\n ('t_5d_order', 't_5d_order_d'),\n ('reshape_start_d', 't_5d', {'in': 0}),\n ('t_5d_order_d', 't_5d', {'in': 1}),\n ('t_5d', 't_5d_d'),\n\n ('reshape_1_dim', 'reshape_1_dim_d'),\n ('t_5d_d', 'reshape_end', {'in': 0}),\n ('reshape_1_dim_d', 'reshape_end', {'in': 1}),\n ('reshape_end', 'reshape_end_d'),\n\n ('t_end_order', 't_end_order_d'),\n ('reshape_end_d', 't_end', {'in': 0}),\n ('t_end_order_d', 't_end', {'in': 1}),\n ],\n )\n\n @staticmethod\n def feature_dim_splitted(short_shape, long_shape):\n return all([short_shape[i] == long_shape[i] for i in range(len(short_shape) - 1)]) and \\\n short_shape[-1] == long_shape[-1] * long_shape[-2]\n\n @staticmethod\n def replace_pattern(graph: Graph, match: dict):\n reshape_5d = match['reshape_start']\n if not ShuffleChannelPatternOptimization.feature_dim_splitted(\n short_shape=reshape_5d.in_port(0).data.get_shape(), long_shape=reshape_5d.out_port(0).data.get_shape()):\n return\n\n reshape_4d = match['reshape_end']\n if not ShuffleChannelPatternOptimization.feature_dim_splitted(\n short_shape=reshape_4d.out_port(0).data.get_shape(), long_shape=reshape_4d.in_port(0).data.get_shape()):\n return\n\n start = match['t_start']\n end = match['t_end']\n\n new_start = match['reshape_start']\n new_end = match['reshape_end']\n\n start_source = start.in_port(0).get_connection().get_source()\n end_connection = end.out_port(0).get_connection()\n\n new_end.out_port(0).disconnect()\n end_connection.set_source(new_end.out_port(0))\n\n start.in_port(0).disconnect()\n new_start.in_port(0).disconnect()\n\n new_start.in_port(0).connect(start_source)\n\n match['reshape_dim']['value'] = int64_array(np.take(new_start.in_port(1).data.get_value(), [0, 3, 4, 1, 2]))\n match['reshape_dim'].infer(match['reshape_dim'])\n new_start.infer(new_start)\n\n match['t_5d_order']['value'] = int64_array([0, 2, 1, 3, 4])\n match['t_5d_order'].infer(match['t_5d_order'])\n match['t_5d'].infer(match['t_5d'])\n\n match['reshape_1_dim']['value'] = int64_array(np.take(new_end.in_port(1).data.get_value(), [0, 3, 1, 2]))\n match['reshape_1_dim'].infer(match['reshape_1_dim'])\n"
]
| [
[
"numpy.all",
"numpy.any",
"numpy.array"
],
[
"numpy.all",
"numpy.array_equal"
]
]
|
Rohde-Schwarz/examples | [
"e0e776d267613b2cabda9b7196c7427aa06a2a73"
]
| [
"SignalGenerators/Python/RsSmbv_ScpiPackage/RsSmbv_FileTransferWithProgress_Example.py"
]
| [
"\"\"\"Example showing how you can transfer a big file to the instrument and from the instrument with showing the progress.\nSince the SMBV100B is quite fast on data transfer, we slow it down by waiting for 100ms between each chunk transfer (1MB)\nThis way we see the transfer progress better and we do not need a file that is so big - let's take cca 20MB.\nFor big files, use the example without the time.sleep(0.1)\"\"\"\n\nimport time\nimport numpy as np\nfrom RsSmbv import *\n\n\ndef my_transfer_handler(args):\n \"\"\"Function called each time a chunk of data is transferred\"\"\"\n total_size = args.total_size if args.total_size is not None else \"unknown\"\n print(f\"Context: '{args.context}{'with opc' if args.opc_sync else ''}', \"\n f\"chunk {args.chunk_ix}, \"\n f\"transferred {args.transferred_size} bytes, \"\n f\"total size {total_size}, \"\n f\"direction {'reading' if args.reading else 'writing'}, \"\n f\"data '{args.data}'\")\n if args.end_of_transfer:\n print('End of Transfer')\n # Slow down the transfer by 200ms to see the progress better\n time.sleep(0.1)\n\n\nRsSmbv.assert_minimum_version('4.80.2')\nsmbv = RsSmbv('TCPIP::10.112.1.73::HISLIP')\nprint(smbv.utilities.idn_string)\nsmbv.utilities.reset()\n\npc_file = r'c:\\temp\\bigFile.bin'\ninstr_file = '/var/user/bigFileInstr.bin'\npc_file_back = r'c:\\temp\\bigFileBack.bin'\n\n# Generate a random file of 20MB size\nx1mb = 1024 * 1024\nwith open(pc_file, 'wb') as file:\n for x in range(20):\n file.write(np.random.bytes(x1mb))\n\n# Send the file to the instrument with events\nsmbv.events.on_write_handler = my_transfer_handler\nsmbv.utilities.data_chunk_size = x1mb\nprint(f'Sending file to the instrument...')\nsmbv.utilities.send_file_from_pc_to_instrument(pc_file, instr_file)\nsmbv.events.on_write_handler = None\nprint(f'Receiving file from the instrument...')\nsmbv.events.on_read_handler = my_transfer_handler\nsmbv.utilities.read_file_from_instrument_to_pc(instr_file, pc_file_back)\nsmbv.events.on_read_handler = None\nsmbv.close()\n"
]
| [
[
"numpy.random.bytes"
]
]
|
rkronberg/ncnt-random-forest | [
"d5f055eaf3485a0f2ad6024f6b818ac7ee3410b4"
]
| [
"scripts/rndforest.py"
]
| [
"'''\nRandom Forest ML implementation for H adsorption on NCNTs.\nIncludes options for randomized hyperparameter search, calculation\nof SHAP values and learning/validation curve generation.\n\nauthor: Rasmus Kronberg\nemail: [email protected]\n'''\n\n# Load necessary packages\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.metrics import r2_score as r2\n\nfrom argparse import ArgumentParser\nfrom time import time\nfrom scipy import stats\nfrom joblib import dump\nfrom os import path\n\nfrom crossval import CrossValidate\nfrom utils import Utilities\n\n\ndef parse():\n\n # Parse command line arguments\n parser = ArgumentParser(\n description='Random forest and SHAP analysis of H adsorption on NCNTs')\n parser.add_argument('--drop', default=[], nargs='+',\n help='Columns not to consider as features')\n parser.add_argument('--n_estimators', default=100, type=int,\n help='Number of estimators (decision trees)')\n parser.add_argument('--max_features', type=int,\n help='Number of features considered at each split')\n parser.add_argument('--max_depth', type=int,\n help='Max. depth of any tree')\n parser.add_argument('--min_samples_split', default=2, type=int,\n help='Min. number of samples required to split node')\n parser.add_argument('--cv_folds', default=5, type=int,\n help='Number of (outer) CV folds')\n parser.add_argument('--inner_folds', type=int,\n help='Do nested CV with given number of inner folds')\n parser.add_argument('--shap', type=int,\n help='Run SHAP (arg. < 0 includes interactions)')\n parser.add_argument('--random_search', action='store_true',\n help='Do (non-nested) randomized parameter search')\n parser.add_argument('--n_iter', default=10, type=int,\n help='Number of random parameter settings to test')\n parser.add_argument('--learning_curve', type=int,\n help='Number of learning curve training set sizes')\n parser.add_argument('--validation', type=str,\n help='Generate validation curve for given argument')\n\n required = parser.add_argument_group('Required named arguments')\n required.add_argument('-i', '--input', required=True,\n help='Input DataFrame (.csv)')\n required.add_argument('-t', '--target', required=True,\n help='Name of target column in input DataFrame')\n\n args = parser.parse_args()\n if args.shap and not args.inner_folds:\n parser.error('The following arguments are required: --inner_folds')\n\n return args\n\n\ndef line():\n\n print('\\n========================\\n')\n\n\ndef main():\n\n args = parse()\n inp = args.input\n target = args.target\n exclude = args.drop\n n_estimators = args.n_estimators\n max_features = args.max_features\n min_samples_split = args.min_samples_split\n max_depth = args.max_depth\n cv_folds = args.cv_folds\n do_shap = args.shap\n inner_folds = args.inner_folds\n random_search = args.random_search\n n_iter = args.n_iter\n n_train = args.learning_curve\n name = args.validation\n\n CURRENT_PATH = path.dirname(path.realpath(__file__))\n DATA_PATH = path.normpath(path.join(CURRENT_PATH, path.dirname(inp)))\n\n line()\n print('RANDOM FOREST REGRESSOR')\n print('Current directory: %s' % CURRENT_PATH)\n print('Output directory: %s' % DATA_PATH)\n\n # Get the data\n line()\n data = pd.read_csv(inp)\n\n print('Data types in data frame:')\n print(data.dtypes)\n print('Finished reading data with %s rows, %s columns' % data.shape)\n\n # Describe the data\n line()\n print('Metadata:')\n print(data.describe(include='all'))\n\n # Impute missing values with -999 (nan not understood by sklearn)\n data = data.apply(pd.to_numeric,\n errors='coerce').fillna(-999, downcast='infer')\n\n # Select features to test (exclude e.g. target column)\n # Get matrix of features and target variable vector\n x = data.drop(columns=exclude)\n y = data[target]\n\n # Stratify based on target variable for balanced train-test folds\n strat = np.around(y)\n\n # Initialize RF regressor with given/default hyperparameters\n rf = RandomForestRegressor(\n n_estimators=n_estimators, max_features=max_features,\n min_samples_split=min_samples_split, max_depth=max_depth,\n random_state=rnd, n_jobs=-1)\n\n # Initialize cross-validation methods\n cv = CrossValidate(x, y, data, cv_folds, inner_folds, strat, n_iter, rnd)\n\n # Perform nested CV for unbiased generalization performance estimates\n if inner_folds is not None:\n line()\n grid = {'max_features': stats.randint(5, 26),\n 'max_depth': stats.randint(10, 56),\n 'min_samples_split': stats.randint(2, 4)}\n\n cv.nested_crossval(rf, grid, do_shap, DATA_PATH)\n\n print('Unbiased generalization performance estimation:\\n')\n print('Training set:')\n print('MAE (Train): %.4f +/- %.4f'\n % (np.mean(cv.mae_train), np.std(cv.mae_train, ddof=1)))\n print('RMSE (Train): %.4f +/- %.4f'\n % (np.mean(cv.rmse_train), np.std(cv.rmse_train, ddof=1)))\n print('R2 (Train): %.4f +/- %.4f'\n % (np.mean(cv.r2_train), np.std(cv.r2_train, ddof=1)))\n print('Test set:')\n print('MAE (Test): %.4f +/- %.4f'\n % (np.mean(cv.mae_test), np.std(cv.mae_test, ddof=1)))\n print('RMSE (Test): %.4f +/- %.4f'\n % (np.mean(cv.rmse_test), np.std(cv.rmse_test, ddof=1)))\n print('R2 (Test): %.4f +/- %.4f'\n % (np.mean(cv.r2_test), np.std(cv.r2_test, ddof=1)))\n\n # Split data avoiding train_test_split n_splits > class members ValueError\n skf = StratifiedKFold(n_splits=cv_folds, shuffle=True, random_state=rnd)\n train, test = next(skf.split(x, strat))\n\n # Sample optimal hyperparameters and override defaults (normal CV)\n if random_search:\n line()\n grid = {'max_features': stats.randint(5, 26),\n 'max_depth': stats.randint(10, 56),\n 'min_samples_split': stats.randint(2, 4)}\n\n # Tune hyperparameters using training set and cross-validation\n cv.random_search(rf, train, grid)\n\n print('\\nBest parameters: %s' % cv.best_pars)\n print('Best score: %.4f' % cv.best_score)\n\n rf.set_params(**cv.best_pars)\n\n # Initialize utility methods\n u = Utilities(x, y, strat, cv_folds, rnd, DATA_PATH)\n\n # Generate learning curve using full dataset and cross-validation\n if n_train is not None:\n line()\n u.learning_curve(rf, n_train)\n\n # Generate validation curve using training set and cross-validation\n if name is not None:\n line()\n u.validation_curve(rf, name, train)\n\n # Train, test final model\n line()\n print('Train, test final model:')\n\n rf.fit(x.iloc[train], y.iloc[train])\n y_pred_test = rf.predict(x.iloc[test])\n y_pred_train = rf.predict(x.iloc[train])\n\n print('\\nTraining set scoring:')\n print('MAE (Train): %.4f' % mae(y_pred_train, y.iloc[train]))\n print('RMSE (Train): %.4f' % np.sqrt(mse(y_pred_train, y.iloc[train])))\n print('R2 (Train): %.4f' % r2(y_pred_train, y.iloc[train]))\n print('Test set scoring:')\n print('MAE (Test): %.4f' % mae(y_pred_test, y.iloc[test]))\n print('RMSE (Test): %.4f' % np.sqrt(mse(y_pred_test, y.iloc[test])))\n print('R2 (Test): %.4f' % r2(y_pred_test, y.iloc[test]))\n\n # Pickle model\n dump(rf, '%s/model.pkl' % DATA_PATH)\n\n print('\\nScript executed in %.0f seconds' % (time()-t0))\n\n\nif __name__ == '__main__':\n t0 = time()\n rnd = np.random.RandomState(42)\n main()\n"
]
| [
[
"sklearn.metrics.mean_squared_error",
"sklearn.model_selection.StratifiedKFold",
"numpy.random.RandomState",
"numpy.mean",
"scipy.stats.randint",
"sklearn.metrics.mean_absolute_error",
"numpy.std",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.metrics.r2_score",
"numpy.around",
"pandas.read_csv"
]
]
|
ehu-ai/domrand | [
"f1f3b901bd29caa575760335c7cc5d5f4a8cf661"
]
| [
"domrand/sim_manager.py"
]
| [
"import numpy as np\nimport quaternion\nimport skimage\nimport time\nimport os\nimport yaml\n\n\nimport mujoco_py\nfrom mujoco_py import load_model_from_path, MjSim, MjViewer , functions\nfrom mujoco_py.modder import BaseModder, CameraModder, LightModder, MaterialModder\n\nfrom domrand.define_flags import FLAGS\nfrom domrand.utils.image import display_image, preproc_image\nfrom domrand.utils.data import get_real_cam_pos\nfrom domrand.utils.modder import TextureModder\nfrom domrand.utils.sim import look_at\nfrom domrand.utils.sim import Range, Range3D, rto3d # object type things\nfrom domrand.utils.sim import sample, sample_xyz, sample_joints, sample_light_dir, sample_quat, sample_geom_type, random_quat, jitter_quat, jitter_angle\n\n# GLOSSARY:\n# gid = geom_id\n# bid = body_id\nclass SimManager(object):\n \"\"\"Object to handle randomization of all relevant properties of Mujoco sim\"\"\"\n def __init__(self, filepath, random_params={}, gpu_render=False, gui=False, display_data=False):\n self.model = load_model_from_path(filepath)\n self.sim = MjSim(self.model)\n self.filepath = filepath\n self.gui = gui\n self.display_data = display_data\n # Take the default random params and update anything we need\n self.RANDOM_PARAMS = {}\n self.RANDOM_PARAMS.update(random_params)\n\n if gpu_render:\n self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, device_id=-1)\n # self.viewer = MjViewer(self.sim)\n else:\n self.viewer = None\n\n # Get start state of params to slightly jitter later\n self.START_GEOM_POS = self.model.geom_pos.copy()\n self.START_GEOM_SIZE = self.model.geom_size.copy()\n self.START_GEOM_QUAT = self.model.geom_quat.copy()\n self.START_BODY_POS = self.model.body_pos.copy()\n self.START_BODY_QUAT = self.model.body_quat.copy()\n self.START_MATID = self.model.geom_matid.copy()\n #self.FLOOR_OFFSET = self.model.body_pos[self.model.body_name2id('floor')]\n\n self.tex_modder = TextureModder(self.sim)\n self.tex_modder.whiten_materials() # ensures materials won't impact colors\n self.cam_modder = CameraModder(self.sim)\n self.light_modder = LightModder(self.sim)\n self.start_obj_pose = self.sim.data.get_joint_qpos('object:joint').copy()\n\n def get_data(self):\n self._randomize()\n self._forward()\n gt = self._get_ground_truth()\n cam = self._get_cam_frame(gt)\n return cam, gt\n\n def _forward(self):\n \"\"\"Advances simulator a step (NECESSARY TO MAKE CAMERA AND LIGHT MODDING WORK)\n And add some visualization\"\"\"\n self.sim.forward()\n if self.viewer and self.gui:\n # Get angle of camera and display it\n quat = np.quaternion(*self.model.cam_quat[0])\n ypr = quaternion.as_euler_angles(quat) * 180 / np.pi\n cam_pos = self.model.cam_pos[0]\n cam_fovy = self.model.cam_fovy[0]\n #self.viewer.add_marker(pos=cam_pos, label=\"CAM: {}{}\".format(cam_pos, ypr))\n #self.viewer.add_marker(pos=cam_pos, label=\"CAM: {}\".format(ypr))\n #self.viewer.add_marker(pos=cam_pos, label=\"CAM: {}\".format(cam_pos))\n # self.viewer.add_marker(pos=cam_pos, label=\"FOVY: {}, CAM: {}\".format(cam_fovy, cam_pos))\n # self.viewer.render()\n\n # def _get_ground_truth(self):\n # robot_gid = self.sim.model.geom_name2id('base_link')\n # obj_gid = self.sim.model.geom_name2id('object')\n\n # obj_pos_in_robot_frame = self.sim.data.geom_xpos[obj_gid] - self.sim.data.geom_xpos[robot_gid]\n # return obj_pos_in_robot_frame.astype(np.float32)\n\n def _get_ground_truth(self):\n \"\"\"\n Return x, y, and z rotation\n 3 dim total\n \"\"\"\n obj_gid = self.sim.model.geom_name2id('object')\n obj_bid = self.sim.model.geom_name2id('object')\n # only x and y pos needed\n # obj_world_pos = self.sim.model.geom_pos[obj_gid]\n # obj_world_quat = quaternion.as_quat_array(self.sim.model.geom_quat[obj_gid].copy())\n obj_world_pose = self.sim.data.get_joint_qpos('object:joint')\n obj_world_pos = obj_world_pose[:3]\n obj_world_quat = quaternion.as_quat_array(obj_world_pose[3:])\n zrot = quaternion.as_rotation_vector(obj_world_quat)[-1]\n pose = np.zeros((3,))\n pose[:2] = obj_world_pos[:2]\n pose[2] = zrot\n print(pose)\n return pose\n\n\n def _get_cam_frame(self, ground_truth=None):\n \"\"\"Grab an image from the camera (224, 244, 3) to feed into CNN\"\"\"\n #IMAGE_NOISE_RVARIANCE = Range(0.0, 0.0001)\n cam_img = self.sim.render(640, 480, camera_name='camera1')[::-1, :, :] # Rendered images are upside-down.\n # make camera crop be more like kinect\n #cam_img = self.sim.render(854, 480, camera_name='camera1')[::-1, 107:-107, :] # Rendered images are upside-down.\n\n #image_noise_variance = sample(IMAGE_NOISE_RVARIANCE)\n #cam_img = (skimage.util.random_noise(cam_img, mode='gaussian', var=image_noise_variance) * 255).astype(np.uint8)\n\n if self.display_data:\n print(ground_truth)\n #label = str(ground_truth[3:6])\n display_image(cam_img, mode='preproc')#, label)\n\n cam_img = preproc_image(cam_img)\n return cam_img\n\n def _randomize(self):\n self._rand_textures()\n self._rand_camera()\n self._rand_lights()\n #self._rand_robot()\n self._rand_object()\n self._rand_walls()\n self._rand_distract()\n\n def _rand_textures(self):\n \"\"\"Randomize all the textures in the scene, including the skybox\"\"\"\n bright = np.random.binomial(1, 0.5)\n for name in self.sim.model.geom_names + ('skybox',):\n self.tex_modder.rand_all(name)\n if bright:\n self.tex_modder.brighten(name, np.random.randint(0,150))\n\n def _rand_camera(self):\n \"\"\"Randomize pos, orientation, and fov of camera\n\n FOVY:\n Kinect2 is 53.8\n ASUS is 45\n https://www.asus.com/us/3D-Sensor/Xtion_PRO_LIVE/specifications/\n http://smeenk.com/kinect-field-of-view-comparison/\n \"\"\"\n # Params\n # FOVY_R = Range(40, 50)\n X = Range(-2, -1)\n Y = Range(-0.1, 2)\n Z = Range(1.5, 2.1)\n C_R3D = Range3D(X, Y, Z)\n cam_pos = np.array(sample_xyz(C_R3D))\n #L_R3D = rto3d([-0.1, 0.1])\n\n # C_R3D = Range3D([-0.07,0.07], [-0.07,0.07], [-0.07,0.07])\n ANG3 = Range3D([-3,3], [-3,3], [-3,3])\n\n # Look approximately at the robot, but then randomize the orientation around that\n # cam_pos = np.array([-1.75, 0, 1.62])\n target_id = self.model.body_name2id(FLAGS.look_at)\n\n cam_off = 0 #sample_xyz(L_R3D)\n target_off = 0 #sample_xyz(L_R3D)\n quat = look_at(cam_pos+cam_off, self.sim.data.body_xpos[target_id]+target_off)\n quat = jitter_angle(quat, ANG3)\n #quat = jitter_quat(quat, 0.01)\n\n # cam_pos = np.array(sample_xyz(C_R3D))\n self.cam_modder.set_quat('camera1', quat)\n self.cam_modder.set_pos('camera1', cam_pos)\n self.cam_modder.set_fovy('camera1', 60)\n\n def _rand_lights(self):\n \"\"\"Randomize pos, direction, and lights\"\"\"\n # light stuff\n #X = Range(-1.5, 1.5)\n #Y = Range(-1.2, 1.2)\n #Z = Range(0, 2.8)\n X = Range(-1.5, -0.5)\n Y = Range(-0.6, 0.6)\n Z = Range(1.0, 1.5)\n LIGHT_R3D = Range3D(X, Y, Z)\n LIGHT_UNIF = Range3D(Range(0,1), Range(0,1), Range(0,1))\n\n # TODO: also try not altering the light dirs and just keeping them at like -1, or [0, -0.15, -1.0]\n for i, name in enumerate(self.model.light_names):\n lid = self.model.light_name2id(name)\n # random sample 80% of any given light being on\n if lid != 0:\n self.light_modder.set_active(name, sample([0,1]) < 0.8)\n self.light_modder.set_dir(name, sample_light_dir())\n\n self.light_modder.set_pos(name, sample_xyz(LIGHT_R3D))\n\n #self.light_modder.set_dir(name, sample_xyz(rto3d([-1,1])))\n\n #self.light_modder.set_specular(name, sample_xyz(LIGHT_UNIF))\n #self.light_modder.set_diffuse(name, sample_xyz(LIGHT_UNIF))\n #self.light_modder.set_ambient(name, sample_xyz(LIGHT_UNIF))\n\n spec = np.array([sample(Range(0.5,1))]*3)\n diffuse = np.array([sample(Range(0.5,1))]*3)\n ambient = np.array([sample(Range(0.5,1))]*3)\n\n self.light_modder.set_specular(name, spec)\n self.light_modder.set_diffuse(name, diffuse)\n self.light_modder.set_ambient(name, ambient)\n #self.model.light_directional[lid] = sample([0,1]) < 0.2\n self.model.light_castshadow[lid] = sample([0,1]) < 0.5\n\n def _rand_robot(self):\n \"\"\"Randomize joint angles and jitter orientation\"\"\"\n jnt_shape = self.sim.data.qpos.shape\n self.sim.data.qpos[:] = sample_joints(self.model.jnt_range, jnt_shape)\n\n robot_gid = self.model.geom_name2id('robot_table_link')\n self.model.geom_quat[robot_gid] = jitter_quat(self.START_GEOM_QUAT[robot_gid], 0.01)\n\n def _rand_object(self):\n obj_gid = self.sim.model.geom_name2id('object')\n obj_bid = self.sim.model.geom_name2id('object')\n table_gid = self.model.geom_name2id('object_table')\n table_bid = self.model.body_name2id('object_table')\n\n obj_pose = self.start_obj_pose.copy()\n xval = self.model.geom_size[table_gid][0] #- self.model.geom_size[obj_gid][0]\n yval = self.model.geom_size[table_gid][1] #- self.model.geom_size[obj_gid][1]\n\n O_X = Range(-xval, xval)\n O_Y = Range(-yval, yval)\n O_Z = Range(0, 0)\n O_R3D = Range3D(O_X, O_Y, O_Z)\n\n newpos = obj_pose[:3] + sample_xyz(O_R3D)\n newquat = jitter_quat(obj_pose[3:], 0.1)\n obj_pose[:3] = newpos\n obj_pose[3:] = newquat\n self.sim.data.set_joint_qpos('object:joint', obj_pose)\n #T_X = Range(-0.1, 0.1)\n #T_Y = Range(-0.1, 0.1)\n #T_Z = Range(-0.1, 0.1)\n #T_R3D = Range3D(T_X, T_Y, T_Z)\n #self.model.body_pos[table_bid] = self.START_BODY_POS[table_bid] + sample_xyz(T_R3D)\n ## randomize orientation a wee bit\n #self.model.geom_quat[table_gid] = jitter_quat(self.START_GEOM_QUAT[table_gid], 0.01)\n\n def _rand_walls(self):\n wall_bids = {name: self.model.body_name2id(name) for name in ['wall_'+dir for dir in 'nesw']}\n window_gid = self.model.geom_name2id('west_window')\n #floor_gid = self.model.geom_name2id('floor')\n\n WA_X = Range(-0.2, 0.2)\n WA_Y = Range(-0.2, 0.2)\n WA_Z = Range(-0.1, 0.1)\n WA_R3D = Range3D(WA_X, WA_Y, WA_Z)\n\n WI_X = Range(-0.1, 0.1)\n WI_Y = Range(0, 0)\n WI_Z = Range(-0.5, 0.5)\n WI_R3D = Range3D(WI_X, WI_Y, WI_Z)\n\n R = Range(0,0)\n P = Range(-10,10)\n Y = Range(0,0)\n RPY_R = Range3D(R,P,Y)\n\n #self.model.geom_quat[floor_gid] = jitter_quat(self.START_GEOM_QUAT[floor_gid], 0.01)\n #self.model.geom_pos[floor_gid] = self.START_GEOM_POS[floor_gid] + [0,0,sample(-0.1,0.1)\n\n self.model.geom_quat[window_gid] = sample_quat(RPY_R)\n #self.model.geom_quat[window_gid] = jitter_quat(self.START_GEOM_QUAT[window_gid], 0.01)\n self.model.geom_pos[window_gid] = self.START_GEOM_POS[window_gid] + sample_xyz(WI_R3D)\n\n for name in wall_bids:\n gid = wall_bids[name]\n self.model.body_quat[gid] = jitter_quat(self.START_BODY_QUAT[gid], 0.01)\n self.model.body_pos[gid] = self.START_BODY_POS[gid] + sample_xyz(WA_R3D)\n\n\n def _rand_distract(self):\n PREFIX = 'distract'\n geom_names = [name for name in self.model.geom_names if name.startswith(PREFIX)]\n\n # Size range\n SX = Range(0.01, 0.5)\n SY = Range(0.01, 0.9)\n SZ = Range(0.01, 0.5)\n S3D = Range3D(SX, SY, SZ)\n # Back range\n B_PX = Range(-0.5, 2)\n B_PY = Range(-1.5, 2)\n B_PZ = Range(0, 3)\n B_P3D = Range3D(B_PX, B_PY, B_PZ)\n # Front range\n F_PX = Range(-2, -0.5)\n F_PY = Range(-2, 1)\n F_PZ = Range(0, 0.5)\n F_P3D = Range3D(F_PX, F_PY, F_PZ)\n\n for name in geom_names:\n gid = self.model.geom_name2id(name)\n range = B_P3D if np.random.binomial(1, 0.5) else F_P3D\n\n self.model.geom_pos[gid] = sample_xyz(range)\n self.model.geom_quat[gid] = random_quat()\n self.model.geom_size[gid] = sample_xyz(S3D, mode='logspace')\n self.model.geom_type[gid] = sample_geom_type()\n self.model.geom_rgba[gid][-1] = np.random.binomial(1, 0.5)\n\n\n def _set_visible(self, prefix, range_top, visible):\n \"\"\"Helper function to set visibility of several objects\"\"\"\n if not visible:\n if range_top == 0:\n name = prefix\n gid = self.model.geom_name2id(name)\n self.model.geom_rgba[gid][-1] = 0.0\n\n for i in range(range_top):\n name = \"{}{}\".format(prefix, i)\n gid = self.model.geom_name2id(name)\n self.model.geom_rgba[gid][-1] = 0.0\n else:\n if range_top == 0:\n name = prefix\n gid = self.model.geom_name2id(name)\n self.model.geom_rgba[gid][-1] = 1.0\n\n for i in range(range_top):\n name = \"{}{}\".format(prefix, i)\n gid = self.model.geom_name2id(name)\n self.model.geom_rgba[gid][-1] = 1.0\n"
]
| [
[
"numpy.quaternion",
"numpy.random.binomial",
"numpy.random.randint",
"numpy.zeros"
]
]
|
DeepESP/gpt2-ml | [
"1ee721f50eca62302a4c19e3ff0f3343c45799cf"
]
| [
"train/dataloader.py"
]
| [
"# Original work Copyright 2018 The Google AI Language Team Authors.\n# Modified work Copyright 2019 Rowan Zellers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport random\nimport tensorflow as tf\n\n\ndef _decode_record(record, name_to_features, seq_length, sample_length=10240, is_training=True):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n if seq_length == sample_length:\n example[name] = t\n else:\n # On Training we randomize the sampling\n # On Eval we start always from 0\n if is_training:\n rand_positon = random.randint(0, sample_length - seq_length)\n else:\n rand_positon = 0\n example[name] = t[rand_positon:rand_positon + seq_length]\n\n return example\n\n\ndef input_fn_builder(input_files,\n seq_length,\n is_training,\n num_cpu_threads=4,\n evaluate_for_fixed_number_of_steps=True):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n name_to_features = {\n # \"input_ids\": tf.FixedLenFeature([seq_length + 1], tf.int64),\n \"input_ids\": tf.FixedLenFeature([10239 + 1], tf.int64),\n }\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n if is_training:\n d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\n d = d.repeat()\n d = d.shuffle(buffer_size=len(input_files))\n\n # `cycle_length` is the number of parallel files that get read.\n cycle_length = min(num_cpu_threads, len(input_files))\n\n # `sloppy` mode means that the interleaving is not exact. This adds\n # even more randomness to the training pipeline.\n d = d.apply(\n tf.data.experimental.parallel_interleave(\n tf.data.TFRecordDataset,\n sloppy=is_training,\n cycle_length=cycle_length))\n d = d.shuffle(buffer_size=100)\n else:\n d = tf.data.TFRecordDataset(input_files)\n # If we evaluate for a fixed number of steps we don't want to encounter\n # out-of-range exceptions.\n if evaluate_for_fixed_number_of_steps:\n d = d.repeat()\n\n # We must `drop_remainder` on training because the TPU requires fixed\n # size dimensions. For eval, we assume we are evaluating on the CPU or GPU\n # and we *don't* want to drop the remainder, otherwise we wont cover\n # every sample.\n\n # Eibriel: for some reason drop_remainder is always True, I think its ok\n d = d.apply(\n tf.data.experimental.map_and_batch(\n lambda record: _decode_record(record, name_to_features, seq_length, is_training=is_training),\n batch_size=batch_size,\n num_parallel_batches=num_cpu_threads,\n drop_remainder=True))\n return d\n\n return input_fn\n\n\n# ~~~~~~~~~~~~~~ This is for classification / AF ~~~~~~~~~~~~~~~~~~\ndef classification_convert_examples_to_features(\n examples, max_seq_length, batch_size, encoder, output_file, labels, pad_extra_examples=False,\n chop_from_front_if_needed=True):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n label_map = {label: i for i, label in enumerate(labels)}\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n # begin_summary is our [CLS] token\n tokens = example['ids'] + [encoder.begin_summary]\n\n if len(tokens) > max_seq_length:\n if chop_from_front_if_needed:\n tokens = tokens[-max_seq_length:]\n else:\n tokens = example['ids'][:(max_seq_length - 1)] + [encoder.begin_summary]\n elif len(tokens) < max_seq_length:\n tokens.extend([encoder.padding] * (max_seq_length - len(tokens)))\n\n features = collections.OrderedDict()\n features['input_ids'] = tf.train.Feature(int64_list=tf.train.Int64List(value=tokens))\n features['label_ids'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[label_map[example['label']]]))\n features['is_real_example'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[1]))\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n\n if pad_extra_examples:\n for x in range(len(examples) % batch_size):\n features = collections.OrderedDict()\n features['input_ids'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[0] * max_seq_length))\n features['label_ids'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[0]))\n features['is_real_example'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[0]))\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()\n\n\ndef classification_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder,\n buffer_size=100):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=buffer_size)\n\n d = d.apply(\n tf.data.experimental.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n"
]
| [
[
"tensorflow.data.TFRecordDataset",
"tensorflow.train.Int64List",
"tensorflow.train.Features",
"tensorflow.FixedLenFeature",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.data.experimental.parallel_interleave",
"tensorflow.constant",
"tensorflow.parse_single_example",
"tensorflow.cast"
]
]
|
cmbaker00/optimal-test-allocation | [
"b8ad23a0bde7b360ed8e01af6beac5ba441d6322"
]
| [
"plotting_code.py"
]
| [
"from SimpleModelsModule import TestOptimisation\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport param_values as scenario\n\ndef make_onward_transmission_vector(close_contact, symptomatic, asymptomatic):\n return tuple((close_contact, symptomatic, asymptomatic) for i in range(4))\n\n\ndef make_population_tuple(num_close, num_symp, total_pop,\n presenting_proporition, probability_by_indication):\n num_asymptomatic = total_pop - num_close - num_symp\n expected_cases = np.sum(np.array(probability_by_indication)*\n np.array([num_close, num_symp, num_asymptomatic]))\n expected_cases = np.round(expected_cases, 2)\n return (num_close, num_symp*presenting_proporition, num_asymptomatic), expected_cases\n\n\ndef run_analysis_save_plot(priority, onward_transmission, pop, pre_prob, cap, prop_symp, scenario_name, priority_ordering=None, directory_name=None):\n if directory_name is None:\n directory_name = 'Onward_transmission_and_postivity_basic_figures'\n test_optim = TestOptimisation(priority_queue=priority, onward_transmission=onward_transmission,\n population=pop,\n pre_test_probability=pre_prob,\n routine_capacity=cap,\n symptomatic_testing_proportion=prop_symp,\n test_prioritsation_by_indication=priority_ordering)\n max_prop_plot = 3/(cap/400)\n ax, onward, pos, exp_case, num_test\\\n = test_optim.make_plot_transmission_perc_post(max_test_proportion=max_prop_plot)\n\n rc = test_optim.routine_capacity / 100\n ax.plot([rc, rc], [50, 100], '--r')\n ax.text(rc * 1.04, 85, 'Routine capacity', rotation=270)\n priority_string = '' if priority else '_no_priority'\n priority_order_string = '' if priority_ordering == None else '_symptomatic_priority'\n plt.savefig(f'{directory_name}/{scenario_name}_test_prop_{prop_symp}_cap_{int(cap/100)}'\n f'{priority_string}{priority_order_string}.png')\n plt.close()\n def fill_box(xmin, xmax, col=(0, 0, 0), ymin=50., ymax=100.):\n plt.fill([xmin, xmax, xmax, xmin, xmin],\n [ymin, ymin, ymax, ymax, ymin],\n alpha=.3,\n color=col,\n lw=0)\n if priority_ordering:\n if priority_ordering == (2, 1, 3):\n effective_pop = [prop_symp*i/100 for i in pop]\n effective_pop_order = [effective_pop[i - 1] for i in priority_ordering]\n test_bounds = np.cumsum([0] + effective_pop_order)\n test_bounds[-1] = 12\n col_array = [[0.2]*3, [.4]*3, [.6]*3]\n for i, col in zip(range(3), col_array):\n fill_box(test_bounds[i], test_bounds[i+1],\n col=col)\n plt.plot(num_test, onward)\n\n rc = test_optim.routine_capacity / 100\n plt.plot([rc, rc], [50, 100], '--r')\n plt.text(rc * 1.04, 55, 'Routine capacity', rotation=270)\n\n plt.xlabel('Tests per 1000 people')\n plt.ylabel('Percentage of onwards transmission')\n plt.savefig(f'{directory_name}/{scenario_name}_test_prop_{prop_symp}_cap_{int(cap / 100)}'\n f'{priority_string}{priority_order_string}_onward_only.png')\n plt.show()\n plt.close()\n\n plt.figure()\n pos = pos*100\n fig_top = max(pos)*1.1\n for i, col in zip(range(3), col_array):\n fill_box(test_bounds[i], test_bounds[i+1],\n ymin=0, ymax=fig_top,\n col=col)\n plt.plot(num_test, pos)\n\n rc = test_optim.routine_capacity / 100\n plt.plot([rc, rc], [0, fig_top], '--r')\n plt.text(rc * 1.04, .05*fig_top, 'Routine capacity', rotation=270)\n\n plt.xlabel('Tests per 1000 people')\n plt.ylabel('Percentage of positive tests')\n plt.savefig(f'Onward_transmission_and_postivity_basic_figures/{scenario_name}_test_prop_{prop_symp}_cap_{int(cap / 100)}'\n f'{priority_string}{priority_order_string}_positivity_only.png')\n\n plt.show()\n plt.close()\n else:\n raise ValueError(f'priority_ordering {priority_ordering} is unkown')\n\nif __name__ == '__main__':\n\n total_population = scenario.total_population\n\n # High prevelance\n onward_transmission_vector_high = \\\n make_onward_transmission_vector(*scenario.onward_transmission_high)\n\n test_prob_high = scenario.test_prob_high\n\n population_high, cases_high = \\\n make_population_tuple(num_close=scenario.pop_high[0],\n num_symp=scenario.pop_high[1],\n total_pop=total_population,\n presenting_proporition=1,\n probability_by_indication=test_prob_high)\n\n print(f'Daily infections = {cases_high}')\n\n # Low prevelance\n onward_transmission_vector_low = \\\n make_onward_transmission_vector(*scenario.onward_transmission_low)\n\n test_prob_low = scenario.test_prob_low\n\n population_low, cases_low = \\\n make_population_tuple(num_close=scenario.pop_low[0],\n num_symp=scenario.pop_low[1],\n total_pop=total_population,\n presenting_proporition=1,\n probability_by_indication=test_prob_low)\n\n print(f'Daily infections = {cases_low}')\n\n priority_values = [True, False]\n capacity_values = [scenario.test_capacity_low, scenario.test_capacity_high]\n symp_prop_values = [.2, .4, .5, .6, .8, 1]\n scenario_names = ['Low_prev', 'High_prev']\n situation_dict = {'Low_prev': {'onward': onward_transmission_vector_low,\n 'pop': population_low,\n 'pre_prob': test_prob_low},\n 'High_prev': {'onward': onward_transmission_vector_high,\n 'pop': population_high,\n 'pre_prob': test_prob_high}\n }\n priority_allocation_options = scenario.priority_order\n\n for priority_value in priority_values:\n for priority_order in priority_allocation_options:\n for capacity_value in capacity_values:\n for symp_prop_value in symp_prop_values:\n for scenario in scenario_names:\n c_dict = situation_dict[scenario]\n run_analysis_save_plot(priority=priority_value,\n onward_transmission=c_dict['onward'],\n pop=c_dict['pop'],\n pre_prob=c_dict['pre_prob'],\n cap=capacity_value,\n prop_symp=symp_prop_value,\n scenario_name=scenario,\n priority_ordering=priority_order)\n"
]
| [
[
"numpy.array",
"matplotlib.pyplot.text",
"numpy.round",
"matplotlib.pyplot.fill",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"numpy.cumsum",
"matplotlib.pyplot.show"
]
]
|
x-zho14/Unified-LTH-GNN | [
"edbb2f9aaa7cb363424dcfcb2ce198cfb66f3d55"
]
| [
"LinkPrediction/main_gingat_rp.py"
]
| [
"\"Implementation based on https://github.com/PetarV-/DGI\"\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nfrom models import LogReg, GIC_GCN, GIC_GAT, GIC_GIN\nfrom utils import process\nimport argparse\nimport pdb\nimport pruning\nimport pruning_gin\nimport pruning_gat\nimport copy\nimport dgl\nimport warnings\nwarnings.filterwarnings('ignore')\n\ndef run_fix_mask(args, imp_num, adj_percent, wei_percent, dataset_dict):\n\n pruning_gin.setup_seed(args.seed)\n num_clusters = int(args.num_clusters)\n dataset = args.dataset\n\n batch_size = 1\n patience = 50\n l2_coef = 0.0\n hid_units = 16\n # sparse = True\n sparse = False\n nonlinearity = 'prelu' # special name to separate parameters\n\n adj = dataset_dict['adj']\n adj_sparse = dataset_dict['adj_sparse']\n features = dataset_dict['features']\n labels = dataset_dict['labels']\n val_edges = dataset_dict['val_edges']\n val_edges_false = dataset_dict['val_edges_false']\n test_edges = dataset_dict['test_edges']\n test_edges_false = dataset_dict['test_edges_false']\n\n nb_nodes = features.shape[1]\n ft_size = features.shape[2]\n\n g = dgl.DGLGraph()\n g.add_nodes(nb_nodes)\n adj = adj.tocoo()\n g.add_edges(adj.row, adj.col)\n\n \n b_xent = nn.BCEWithLogitsLoss()\n b_bce = nn.BCELoss()\n\n if args.net == 'gin':\n model = GIC_GIN(nb_nodes, ft_size, hid_units, nonlinearity, num_clusters, 100, g)\n pruning_gin.add_mask(model.gcn)\n pruning_gin.random_pruning(model.gcn, adj_percent, wei_percent)\n adj_spar, wei_spar = pruning_gin.print_sparsity(model.gcn)\n elif args.net == 'gat':\n model = GIC_GAT(nb_nodes, ft_size, hid_units, nonlinearity, num_clusters, 100, g)\n g.add_edges(list(range(nb_nodes)), list(range(nb_nodes)))\n pruning_gat.add_mask(model.gcn)\n pruning_gat.random_pruning(model.gcn, adj_percent, wei_percent)\n adj_spar, wei_spar = pruning_gat.print_sparsity(model.gcn)\n else: assert False\n\n for name, param in model.named_parameters():\n if 'mask' in name:\n param.requires_grad = False\n #print(\"NAME:{}\\tSHAPE:{}\\tGRAD:{}\".format(name, param.shape, param.requires_grad))\n\n optimiser = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=l2_coef)\n model.cuda()\n best_val_acc = {'val_acc': 0, 'epoch' : 0, 'test_acc':0}\n for epoch in range(1, args.fix_epoch + 1):\n model.train()\n optimiser.zero_grad()\n\n idx = np.random.permutation(nb_nodes)\n shuf_fts = features[:, idx, :]\n lbl_1 = torch.ones(batch_size, nb_nodes)\n lbl_2 = torch.zeros(batch_size, nb_nodes)\n lbl = torch.cat((lbl_1, lbl_2), 1)\n shuf_fts = shuf_fts.cuda()\n lbl = lbl.cuda()\n\n logits, logits2 = model(features, shuf_fts, \n g, \n sparse, None, None, None, 100) \n loss = 0.5 * b_xent(logits, lbl) + 0.5 * b_xent(logits2, lbl) \n loss.backward()\n optimiser.step()\n\n with torch.no_grad():\n acc_val, _ = pruning.test(model, features, \n g, \n sparse, \n adj_sparse, \n val_edges, \n val_edges_false)\n acc_test, _ = pruning.test(model, features, \n g, \n sparse, \n adj_sparse, \n test_edges, \n test_edges_false)\n if acc_val > best_val_acc['val_acc']:\n best_val_acc['test_acc'] = acc_test\n best_val_acc['val_acc'] = acc_val\n best_val_acc['epoch'] = epoch\n \n print(\"RP [{}] ({} {} FIX Mask) Epoch:[{}/{}], Loss:[{:.4f}] Val:[{:.2f}] Test:[{:.2f}] | Best Val:[{:.2f}] Test:[{:.2f}] at Epoch:[{}] | Adj:[{:.2f}%] Wei:[{:.2f}%]\"\n .format(imp_num,\n args.net,\n args.dataset,\n epoch, \n args.fix_epoch, \n loss, \n acc_val * 100, \n acc_test * 100, \n best_val_acc['val_acc'] * 100,\n best_val_acc['test_acc'] * 100,\n best_val_acc['epoch'],\n adj_spar,\n wei_spar))\n\n print(\"syd final: RP[{}] ({} {} FIX Mask) | Best Val:[{:.2f}] Test:[{:.2f}] at Epoch:[{}] | Adj:[{:.2f}%] Wei:[{:.2f}%]\"\n .format(imp_num,\n args.net,\n args.dataset, \n best_val_acc['val_acc'] * 100,\n best_val_acc['test_acc'] * 100,\n best_val_acc['epoch'],\n adj_spar,\n wei_spar))\n\ndef parser_loader():\n\n parser = argparse.ArgumentParser(description='Options')\n parser.add_argument('--s1', type=float, default=0.0001,help='scale sparse rate (default: 0.0001)')\n parser.add_argument('--s2', type=float, default=0.0001,help='scale sparse rate (default: 0.0001)')\n parser.add_argument('--mask_epoch', type=int, default=300)\n parser.add_argument('--fix_epoch', type=int, default=300)\n parser.add_argument('--pruning_percent_wei', type=float, default=0.2)\n parser.add_argument('--pruning_percent_adj', type=float, default=0.05)\n parser.add_argument('--net', type=str, default='',help='')\n\n parser.add_argument('--epochs', type=int, default=2000, help='')\n parser.add_argument('--lr', type=float, default=0.001, help='')\n parser.add_argument('--seed', type=int, default=1234, help='')\n parser.add_argument('--dataset', type=str, default='cora',help='')\n parser.add_argument('--b', dest='beta', type=int, default=100,help='')\n parser.add_argument('--c', dest='num_clusters', type=float, default=128,help='')\n parser.add_argument('--a', dest='alpha', type=float, default=0.5,help='')\n parser.add_argument('--test_rate', dest='test_rate', type=float, default=0.1,help='')\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n\n args = parser_loader()\n pruning_gin.print_args(args)\n\n dataset = args.dataset\n adj, features, labels, idx_train, idx_val, idx_test = process.load_data(dataset)\n adj_sparse = adj\n adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \\\n test_edges, test_edges_false = process.mask_test_edges(adj, test_frac=args.test_rate, val_frac=0.05)\n adj = adj_train\n features, _ = process.preprocess_features(features)\n features = torch.FloatTensor(features[np.newaxis]).cuda()\n # adj = torch.FloatTensor(adj.todense()).cuda()\n labels = torch.FloatTensor(labels[np.newaxis]).cuda()\n\n dataset_dict = {}\n dataset_dict['adj'] = adj\n dataset_dict['adj_sparse'] = adj_sparse\n dataset_dict['features'] = features\n dataset_dict['labels'] = labels\n dataset_dict['val_edges'] = val_edges\n dataset_dict['val_edges_false'] = val_edges_false\n dataset_dict['test_edges'] = test_edges\n dataset_dict['test_edges_false'] = test_edges_false\n\n percent_list = [(1 - (1 - args.pruning_percent_adj) ** (i + 1), 1 - (1 - args.pruning_percent_wei) ** (i + 1)) for i in range(20)]\n\n for imp_num, (adj_percent, wei_percent) in enumerate(percent_list):\n run_fix_mask(args, imp_num + 1, adj_percent, wei_percent, dataset_dict)\n "
]
| [
[
"torch.zeros",
"torch.cat",
"numpy.random.permutation",
"torch.no_grad",
"torch.FloatTensor",
"torch.ones",
"torch.nn.BCELoss",
"torch.nn.BCEWithLogitsLoss"
]
]
|
Kokookster/NTPoly | [
"a08ed06b39eca9100364d4a7fa59292d8d2f80dd"
]
| [
"Examples/OverlapMatrix/visualize.py"
]
| [
"\"\"\"\nVisualize two matrices side by side.\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom scipy.io import mmread\n\n###############################################################################\nif __name__ == \"__main__\":\n input_mat = mmread(\"input.mtx\")\n output_mat = mmread(\"output.mtx\")\n plt.subplot(211)\n plt.imshow(input_mat.todense())\n plt.subplot(212)\n plt.imshow(output_mat.todense())\n plt.show()\n"
]
| [
[
"matplotlib.pyplot.show",
"scipy.io.mmread",
"matplotlib.pyplot.subplot"
]
]
|
vfilimonov/pandas | [
"b3d1a059fd86577b46f080c3bd3661597530587f"
]
| [
"pandas/tests/test_groupby.py"
]
| [
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport nose\n\nfrom numpy.testing.decorators import slow\n\nfrom datetime import datetime\nfrom numpy import nan\n\nfrom pandas import date_range,bdate_range, Timestamp\nfrom pandas.core.index import Index, MultiIndex, Int64Index\nfrom pandas.core.api import Categorical, DataFrame\nfrom pandas.core.groupby import (SpecificationError, DataError,\n _nargsort, _lexsort_indexer)\nfrom pandas.core.series import Series\nfrom pandas.core.config import option_context\nfrom pandas.util.testing import (assert_panel_equal, assert_frame_equal,\n assert_series_equal, assert_almost_equal,\n assert_index_equal, assertRaisesRegexp)\nfrom pandas.compat import(\n range, long, lrange, StringIO, lmap, lzip, map,\n zip, builtins, OrderedDict, product as cart_product\n)\nfrom pandas import compat\nfrom pandas.core.panel import Panel\nfrom pandas.tools.merge import concat\nfrom collections import defaultdict\nfrom functools import partial\nimport pandas.core.common as com\nimport numpy as np\n\nimport pandas.core.nanops as nanops\n\nimport pandas.util.testing as tm\nimport pandas as pd\nfrom numpy.testing import assert_equal\n\ndef _skip_if_mpl_not_installed():\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise nose.SkipTest(\"matplotlib not installed\")\n\ndef commonSetUp(self):\n self.dateRange = bdate_range('1/1/2005', periods=250)\n self.stringIndex = Index([rands(8).upper() for x in range(250)])\n\n self.groupId = Series([x[0] for x in self.stringIndex],\n index=self.stringIndex)\n self.groupDict = dict((k, v) for k, v in compat.iteritems(self.groupId))\n\n self.columnIndex = Index(['A', 'B', 'C', 'D', 'E'])\n\n randMat = np.random.randn(250, 5)\n self.stringMatrix = DataFrame(randMat, columns=self.columnIndex,\n index=self.stringIndex)\n\n self.timeMatrix = DataFrame(randMat, columns=self.columnIndex,\n index=self.dateRange)\n\n\nclass TestGroupBy(tm.TestCase):\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.ts = tm.makeTimeSeries()\n\n self.seriesd = tm.getSeriesData()\n self.tsd = tm.getTimeSeriesData()\n self.frame = DataFrame(self.seriesd)\n self.tsframe = DataFrame(self.tsd)\n\n self.df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'foo', 'foo'],\n 'B': ['one', 'one', 'two', 'three',\n 'two', 'two', 'one', 'three'],\n 'C': np.random.randn(8),\n 'D': np.random.randn(8)})\n\n self.df_mixed_floats = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'foo', 'foo'],\n 'B': ['one', 'one', 'two', 'three',\n 'two', 'two', 'one', 'three'],\n 'C': np.random.randn(8),\n 'D': np.array(np.random.randn(8),\n dtype='float32')})\n\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n self.mframe = DataFrame(np.random.randn(10, 3), index=index,\n columns=['A', 'B', 'C'])\n\n self.three_group = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',\n 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two',\n 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull',\n 'dull', 'shiny', 'shiny', 'dull',\n 'shiny', 'shiny', 'shiny'],\n 'D': np.random.randn(11),\n 'E': np.random.randn(11),\n 'F': np.random.randn(11)})\n\n def test_basic(self):\n\n def checkit(dtype):\n data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)\n\n index = np.arange(9)\n np.random.shuffle(index)\n data = data.reindex(index)\n\n grouped = data.groupby(lambda x: x // 3)\n\n for k, v in grouped:\n self.assertEqual(len(v), 3)\n\n agged = grouped.aggregate(np.mean)\n self.assertEqual(agged[1], 1)\n\n assert_series_equal(agged, grouped.agg(np.mean)) # shorthand\n assert_series_equal(agged, grouped.mean())\n assert_series_equal(grouped.agg(np.sum), grouped.sum())\n\n expected = grouped.apply(lambda x: x * x.sum())\n transformed = grouped.transform(lambda x: x * x.sum())\n self.assertEqual(transformed[7], 12)\n assert_series_equal(transformed, expected)\n\n value_grouped = data.groupby(data)\n assert_series_equal(value_grouped.aggregate(np.mean), agged)\n\n # complex agg\n agged = grouped.aggregate([np.mean, np.std])\n agged = grouped.aggregate({'one': np.mean,\n 'two': np.std})\n\n group_constants = {\n 0: 10,\n 1: 20,\n 2: 30\n }\n agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())\n self.assertEqual(agged[1], 21)\n\n # corner cases\n self.assertRaises(Exception, grouped.aggregate, lambda x: x * 2)\n\n for dtype in ['int64', 'int32', 'float64', 'float32']:\n checkit(dtype)\n\n def test_select_bad_cols(self):\n df = DataFrame([[1, 2]], columns=['A', 'B'])\n g = df.groupby('A')\n self.assertRaises(KeyError, g.__getitem__, ['C']) # g[['C']]\n\n self.assertRaises(KeyError, g.__getitem__, ['A', 'C']) # g[['A', 'C']]\n with assertRaisesRegexp(KeyError, '^[^A]+$'):\n # A should not be referenced as a bad column...\n # will have to rethink regex if you change message!\n g[['A', 'C']]\n\n def test_first_last_nth(self):\n # tests for first / last / nth\n grouped = self.df.groupby('A')\n first = grouped.first()\n expected = self.df.ix[[1, 0], ['B','C','D']]\n expected.index = Index(['bar', 'foo'],name='A')\n expected = expected.sort_index()\n assert_frame_equal(first, expected)\n\n nth = grouped.nth(0)\n assert_frame_equal(nth, expected)\n\n last = grouped.last()\n expected = self.df.ix[[5, 7], ['B','C','D']]\n expected.index = Index(['bar', 'foo'],name='A')\n assert_frame_equal(last, expected)\n\n nth = grouped.nth(-1)\n assert_frame_equal(nth, expected)\n\n nth = grouped.nth(1)\n expected = self.df.ix[[2, 3],['B','C','D']].copy()\n expected.index = Index(['foo', 'bar'],name='A')\n expected = expected.sort_index()\n assert_frame_equal(nth, expected)\n\n # it works!\n grouped['B'].first()\n grouped['B'].last()\n grouped['B'].nth(0)\n\n self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan\n self.assertTrue(com.isnull(grouped['B'].first()['foo']))\n self.assertTrue(com.isnull(grouped['B'].last()['foo']))\n self.assertTrue(com.isnull(grouped['B'].nth(0)[0])) # not sure what this is testing\n\n # v0.14.0 whatsnew\n df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])\n g = df.groupby('A')\n result = g.first()\n expected = df.iloc[[1,2]].set_index('A')\n assert_frame_equal(result, expected)\n\n expected = df.iloc[[1,2]].set_index('A')\n result = g.nth(0,dropna='any')\n assert_frame_equal(result, expected)\n\n def test_first_last_nth_dtypes(self):\n\n df = self.df_mixed_floats.copy()\n df['E'] = True\n df['F'] = 1\n\n # tests for first / last / nth\n grouped = df.groupby('A')\n first = grouped.first()\n expected = df.ix[[1, 0], ['B', 'C', 'D', 'E', 'F']]\n expected.index = Index(['bar', 'foo'], name='A')\n expected = expected.sort_index()\n assert_frame_equal(first, expected)\n\n last = grouped.last()\n expected = df.ix[[5, 7], ['B', 'C', 'D', 'E', 'F']]\n expected.index = Index(['bar', 'foo'], name='A')\n expected = expected.sort_index()\n assert_frame_equal(last, expected)\n\n nth = grouped.nth(1)\n expected = df.ix[[3, 2],['B', 'C', 'D', 'E', 'F']]\n expected.index = Index(['bar', 'foo'], name='A')\n expected = expected.sort_index()\n assert_frame_equal(nth, expected)\n\n # GH 2763, first/last shifting dtypes\n idx = lrange(10)\n idx.append(9)\n s = Series(data=lrange(11), index=idx, name='IntCol')\n self.assertEqual(s.dtype, 'int64')\n f = s.groupby(level=0).first()\n self.assertEqual(f.dtype, 'int64')\n\n def test_nth(self):\n df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])\n g = df.groupby('A')\n\n assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))\n assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))\n assert_frame_equal(g.nth(2), df.loc[[],['B']])\n assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))\n assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))\n assert_frame_equal(g.nth(-3), df.loc[[],['B']])\n assert_series_equal(g.B.nth(0), df.B.iloc[[0, 2]])\n assert_series_equal(g.B.nth(1), df.B.iloc[[1]])\n assert_frame_equal(g[['B']].nth(0), df.ix[[0, 2], ['A', 'B']].set_index('A'))\n\n exp = df.set_index('A')\n assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])\n assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])\n\n exp['B'] = np.nan\n assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])\n assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])\n\n # out of bounds, regression from 0.13.1\n # GH 6621\n df = DataFrame({'color': {0: 'green', 1: 'green', 2: 'red', 3: 'red', 4: 'red'},\n 'food': {0: 'ham', 1: 'eggs', 2: 'eggs', 3: 'ham', 4: 'pork'},\n 'two': {0: 1.5456590000000001, 1: -0.070345000000000005, 2: -2.4004539999999999, 3: 0.46206000000000003, 4: 0.52350799999999997},\n 'one': {0: 0.56573799999999996, 1: -0.9742360000000001, 2: 1.033801, 3: -0.78543499999999999, 4: 0.70422799999999997}}).set_index(['color', 'food'])\n\n result = df.groupby(level=0).nth(2)\n expected = df.iloc[[-1]]\n assert_frame_equal(result,expected)\n\n result = df.groupby(level=0).nth(3)\n expected = df.loc[[]]\n assert_frame_equal(result,expected)\n\n # GH 7559\n # from the vbench\n df = DataFrame(np.random.randint(1, 10, (100, 2)),dtype='int64')\n s = df[1]\n g = df[0]\n expected = s.groupby(g).first()\n expected2 = s.groupby(g).apply(lambda x: x.iloc[0])\n assert_series_equal(expected2,expected)\n\n # validate first\n v = s[g==1].iloc[0]\n self.assertEqual(expected.iloc[0],v)\n self.assertEqual(expected2.iloc[0],v)\n\n # this is NOT the same as .first (as sorted is default!)\n # as it keeps the order in the series (and not the group order)\n # related GH 7287\n expected = s.groupby(g,sort=False).first()\n expected.index = range(1,10)\n result = s.groupby(g).nth(0,dropna='all')\n assert_series_equal(result,expected)\n\n # doc example\n df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])\n g = df.groupby('A')\n result = g.B.nth(0, dropna=True)\n expected = g.B.first()\n assert_series_equal(result,expected)\n\n # test multiple nth values\n df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],\n columns=['A', 'B'])\n g = df.groupby('A')\n\n assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))\n assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))\n assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))\n assert_frame_equal(g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))\n assert_frame_equal(g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))\n assert_frame_equal(g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))\n assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))\n assert_frame_equal(g.nth([3, 4]), df.loc[[],['B']])\n\n business_dates = pd.date_range(start='4/1/2014', end='6/30/2014', freq='B')\n df = DataFrame(1, index=business_dates, columns=['a', 'b'])\n # get the first, fourth and last two business days for each month\n result = df.groupby((df.index.year, df.index.month)).nth([0, 3, -2, -1])\n expected_dates = pd.to_datetime(['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30',\n '2014/5/1', '2014/5/6', '2014/5/29', '2014/5/30',\n '2014/6/2', '2014/6/5', '2014/6/27', '2014/6/30'])\n expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)\n assert_frame_equal(result, expected)\n\n def test_nth_multi_index(self):\n # PR 9090, related to issue 8979\n # test nth on MultiIndex, should match .first()\n grouped = self.three_group.groupby(['A', 'B'])\n result = grouped.nth(0)\n expected = grouped.first()\n assert_frame_equal(result, expected)\n\n\n def test_nth_multi_index_as_expected(self):\n # PR 9090, related to issue 8979\n # test nth on MultiIndex\n three_group = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',\n 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two',\n 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull',\n 'dull', 'shiny', 'shiny', 'dull',\n 'shiny', 'shiny', 'shiny']})\n grouped = three_group.groupby(['A', 'B'])\n result = grouped.nth(0)\n expected = DataFrame({'C': ['dull', 'dull', 'dull', 'dull']},\n index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'], ['one', 'two', 'one', 'two']],\n names=['A', 'B']))\n assert_frame_equal(result, expected)\n\n\n def test_grouper_index_types(self):\n # related GH5375\n # groupby misbehaving when using a Floatlike index\n df = DataFrame(np.arange(10).reshape(5,2),columns=list('AB'))\n for index in [ tm.makeFloatIndex, tm.makeStringIndex,\n tm.makeUnicodeIndex, tm.makeIntIndex,\n tm.makeDateIndex, tm.makePeriodIndex ]:\n\n df.index = index(len(df))\n df.groupby(list('abcde')).apply(lambda x: x)\n\n df.index = list(reversed(df.index.tolist()))\n df.groupby(list('abcde')).apply(lambda x: x)\n\n def test_grouper_multilevel_freq(self):\n\n # GH 7885\n # with level and freq specified in a pd.Grouper\n from datetime import date, timedelta\n d0 = date.today() - timedelta(days=14)\n dates = date_range(d0, date.today())\n date_index = pd.MultiIndex.from_product([dates, dates], names=['foo', 'bar'])\n df = pd.DataFrame(np.random.randint(0, 100, 225), index=date_index)\n\n # Check string level\n expected = df.reset_index().groupby([pd.Grouper(key='foo', freq='W'),\n pd.Grouper(key='bar', freq='W')]).sum()\n result = df.groupby([pd.Grouper(level='foo', freq='W'),\n pd.Grouper(level='bar', freq='W')]).sum()\n assert_frame_equal(result, expected)\n\n # Check integer level\n result = df.groupby([pd.Grouper(level=0, freq='W'),\n pd.Grouper(level=1, freq='W')]).sum()\n assert_frame_equal(result, expected)\n\n def test_grouper_creation_bug(self):\n\n # GH 8795\n df = DataFrame({'A':[0,0,1,1,2,2], 'B':[1,2,3,4,5,6]})\n g = df.groupby('A')\n expected = g.sum()\n\n g = df.groupby(pd.Grouper(key='A'))\n result = g.sum()\n assert_frame_equal(result, expected)\n\n result = g.apply(lambda x: x.sum())\n assert_frame_equal(result, expected)\n\n g = df.groupby(pd.Grouper(key='A',axis=0))\n result = g.sum()\n assert_frame_equal(result, expected)\n\n # GH8866\n s = Series(np.arange(8,dtype='int64'),\n index=pd.MultiIndex.from_product([list('ab'),\n range(2),\n date_range('20130101',periods=2)],\n names=['one','two','three']))\n result = s.groupby(pd.Grouper(level='three',freq='M')).sum()\n expected = Series([28],index=Index([Timestamp('2013-01-31')],freq='M',name='three'))\n assert_series_equal(result, expected)\n\n # just specifying a level breaks\n result = s.groupby(pd.Grouper(level='one')).sum()\n expected = s.groupby(level='one').sum()\n assert_series_equal(result, expected)\n\n def test_grouper_iter(self):\n self.assertEqual(sorted(self.df.groupby('A').grouper), ['bar', 'foo'])\n\n def test_empty_groups(self):\n # GH # 1048\n self.assertRaises(ValueError, self.df.groupby, [])\n\n def test_groupby_grouper(self):\n grouped = self.df.groupby('A')\n\n result = self.df.groupby(grouped.grouper).mean()\n expected = grouped.mean()\n assert_frame_equal(result, expected)\n\n def test_groupby_duplicated_column_errormsg(self):\n # GH7511\n df = DataFrame(columns=['A','B','A','C'], \\\n data=[range(4), range(2,6), range(0, 8, 2)])\n\n self.assertRaises(ValueError, df.groupby, 'A')\n self.assertRaises(ValueError, df.groupby, ['A', 'B'])\n\n grouped = df.groupby('B')\n c = grouped.count()\n self.assertTrue(c.columns.nlevels == 1)\n self.assertTrue(c.columns.size == 3)\n\n def test_groupby_dict_mapping(self):\n # GH #679\n from pandas import Series\n s = Series({'T1': 5})\n result = s.groupby({'T1': 'T2'}).agg(sum)\n expected = s.groupby(['T2']).agg(sum)\n assert_series_equal(result, expected)\n\n s = Series([1., 2., 3., 4.], index=list('abcd'))\n mapping = {'a': 0, 'b': 0, 'c': 1, 'd': 1}\n\n result = s.groupby(mapping).mean()\n result2 = s.groupby(mapping).agg(np.mean)\n expected = s.groupby([0, 0, 1, 1]).mean()\n expected2 = s.groupby([0, 0, 1, 1]).mean()\n assert_series_equal(result, expected)\n assert_series_equal(result, result2)\n assert_series_equal(result, expected2)\n\n def test_groupby_bounds_check(self):\n import pandas as pd\n # groupby_X is code-generated, so if one variant\n # does, the rest probably do to\n a = np.array([1,2],dtype='object')\n b = np.array([1,2,3],dtype='object')\n self.assertRaises(AssertionError, pd.algos.groupby_object,a, b)\n\n def test_groupby_grouper_f_sanity_checked(self):\n import pandas as pd\n dates = date_range('01-Jan-2013', periods=12, freq='MS')\n ts = pd.TimeSeries(np.random.randn(12), index=dates)\n\n # GH3035\n # index.map is used to apply grouper to the index\n # if it fails on the elements, map tries it on the entire index as\n # a sequence. That can yield invalid results that cause trouble\n # down the line.\n # the surprise comes from using key[0:6] rather then str(key)[0:6]\n # when the elements are Timestamp.\n # the result is Index[0:6], very confusing.\n\n self.assertRaises(AssertionError, ts.groupby,lambda key: key[0:6])\n\n def test_groupby_nonobject_dtype(self):\n key = self.mframe.index.labels[0]\n grouped = self.mframe.groupby(key)\n result = grouped.sum()\n\n expected = self.mframe.groupby(key.astype('O')).sum()\n assert_frame_equal(result, expected)\n\n # GH 3911, mixed frame non-conversion\n df = self.df_mixed_floats.copy()\n df['value'] = lrange(len(df))\n\n def max_value(group):\n return group.ix[group['value'].idxmax()]\n\n applied = df.groupby('A').apply(max_value)\n result = applied.get_dtype_counts()\n result.sort()\n expected = Series({ 'object' : 2, 'float64' : 2, 'int64' : 1 })\n expected.sort()\n assert_series_equal(result,expected)\n\n def test_groupby_return_type(self):\n\n # GH2893, return a reduced type\n df1 = DataFrame([{\"val1\": 1, \"val2\" : 20}, {\"val1\":1, \"val2\": 19},\n {\"val1\":2, \"val2\": 27}, {\"val1\":2, \"val2\": 12}])\n\n def func(dataf):\n return dataf[\"val2\"] - dataf[\"val2\"].mean()\n\n result = df1.groupby(\"val1\", squeeze=True).apply(func)\n tm.assert_isinstance(result,Series)\n\n df2 = DataFrame([{\"val1\": 1, \"val2\" : 20}, {\"val1\":1, \"val2\": 19},\n {\"val1\":1, \"val2\": 27}, {\"val1\":1, \"val2\": 12}])\n def func(dataf):\n return dataf[\"val2\"] - dataf[\"val2\"].mean()\n\n result = df2.groupby(\"val1\", squeeze=True).apply(func)\n tm.assert_isinstance(result,Series)\n\n # GH3596, return a consistent type (regression in 0.11 from 0.10.1)\n df = DataFrame([[1,1],[1,1]],columns=['X','Y'])\n result = df.groupby('X',squeeze=False).count()\n tm.assert_isinstance(result,DataFrame)\n\n # GH5592\n # inconcistent return type\n df = DataFrame(dict(A = [ 'Tiger', 'Tiger', 'Tiger', 'Lamb', 'Lamb', 'Pony', 'Pony' ],\n B = Series(np.arange(7),dtype='int64'),\n C = date_range('20130101',periods=7)))\n\n def f(grp):\n return grp.iloc[0]\n expected = df.groupby('A').first()[['B']]\n result = df.groupby('A').apply(f)[['B']]\n assert_frame_equal(result,expected)\n\n def f(grp):\n if grp.name == 'Tiger':\n return None\n return grp.iloc[0]\n result = df.groupby('A').apply(f)[['B']]\n e = expected.copy()\n e.loc['Tiger'] = np.nan\n assert_frame_equal(result,e)\n\n def f(grp):\n if grp.name == 'Pony':\n return None\n return grp.iloc[0]\n result = df.groupby('A').apply(f)[['B']]\n e = expected.copy()\n e.loc['Pony'] = np.nan\n assert_frame_equal(result,e)\n\n # 5592 revisited, with datetimes\n def f(grp):\n if grp.name == 'Pony':\n return None\n return grp.iloc[0]\n result = df.groupby('A').apply(f)[['C']]\n e = df.groupby('A').first()[['C']]\n e.loc['Pony'] = np.nan\n assert_frame_equal(result,e)\n\n # scalar outputs\n def f(grp):\n if grp.name == 'Pony':\n return None\n return grp.iloc[0].loc['C']\n result = df.groupby('A').apply(f)\n e = df.groupby('A').first()['C'].copy()\n e.loc['Pony'] = np.nan\n e.name = None\n assert_series_equal(result,e)\n\n def test_agg_api(self):\n\n # GH 6337\n # http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error\n # different api for agg when passed custom function with mixed frame\n\n df = DataFrame({'data1':np.random.randn(5),\n 'data2':np.random.randn(5),\n 'key1':['a','a','b','b','a'],\n 'key2':['one','two','one','two','one']})\n grouped = df.groupby('key1')\n\n def peak_to_peak(arr):\n return arr.max() - arr.min()\n\n expected = grouped.agg([peak_to_peak])\n expected.columns=['data1','data2']\n result = grouped.agg(peak_to_peak)\n assert_frame_equal(result,expected)\n\n def test_agg_regression1(self):\n grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n assert_frame_equal(result, expected)\n\n def test_agg_datetimes_mixed(self):\n data = [[1, '2012-01-01', 1.0],\n [2, '2012-01-02', 2.0],\n [3, None, 3.0]]\n\n df1 = DataFrame({'key': [x[0] for x in data],\n 'date': [x[1] for x in data],\n 'value': [x[2] for x in data]})\n\n data = [[row[0], datetime.strptime(row[1], '%Y-%m-%d').date()\n if row[1] else None, row[2]] for row in data]\n\n df2 = DataFrame({'key': [x[0] for x in data],\n 'date': [x[1] for x in data],\n 'value': [x[2] for x in data]})\n\n df1['weights'] = df1['value'] / df1['value'].sum()\n gb1 = df1.groupby('date').aggregate(np.sum)\n\n df2['weights'] = df1['value'] / df1['value'].sum()\n gb2 = df2.groupby('date').aggregate(np.sum)\n\n assert(len(gb1) == len(gb2))\n\n def test_agg_period_index(self):\n from pandas import period_range, PeriodIndex\n prng = period_range('2012-1-1', freq='M', periods=3)\n df = DataFrame(np.random.randn(3, 2), index=prng)\n rs = df.groupby(level=0).sum()\n tm.assert_isinstance(rs.index, PeriodIndex)\n\n # GH 3579\n index = period_range(start='1999-01', periods=5, freq='M')\n s1 = Series(np.random.rand(len(index)), index=index)\n s2 = Series(np.random.rand(len(index)), index=index)\n series = [('s1', s1), ('s2',s2)]\n df = DataFrame.from_items(series)\n grouped = df.groupby(df.index.month)\n list(grouped)\n\n def test_agg_must_agg(self):\n grouped = self.df.groupby('A')['C']\n self.assertRaises(Exception, grouped.agg, lambda x: x.describe())\n self.assertRaises(Exception, grouped.agg, lambda x: x.index[:2])\n\n def test_agg_ser_multi_key(self):\n ser = self.df.C\n f = lambda x: x.sum()\n results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f)\n expected = self.df.groupby(['A', 'B']).sum()['C']\n assert_series_equal(results, expected)\n\n def test_get_group(self):\n wp = tm.makePanel()\n grouped = wp.groupby(lambda x: x.month, axis='major')\n\n gp = grouped.get_group(1)\n expected = wp.reindex(major=[x for x in wp.major_axis if x.month == 1])\n assert_panel_equal(gp, expected)\n\n\n # GH 5267\n # be datelike friendly\n df = DataFrame({'DATE' : pd.to_datetime(['10-Oct-2013', '10-Oct-2013', '10-Oct-2013',\n '11-Oct-2013', '11-Oct-2013', '11-Oct-2013']),\n 'label' : ['foo','foo','bar','foo','foo','bar'],\n 'VAL' : [1,2,3,4,5,6]})\n\n g = df.groupby('DATE')\n key = list(g.groups)[0]\n result1 = g.get_group(key)\n result2 = g.get_group(Timestamp(key).to_datetime())\n result3 = g.get_group(str(Timestamp(key)))\n assert_frame_equal(result1,result2)\n assert_frame_equal(result1,result3)\n\n g = df.groupby(['DATE','label'])\n\n key = list(g.groups)[0]\n result1 = g.get_group(key)\n result2 = g.get_group((Timestamp(key[0]).to_datetime(),key[1]))\n result3 = g.get_group((str(Timestamp(key[0])),key[1]))\n assert_frame_equal(result1,result2)\n assert_frame_equal(result1,result3)\n\n # must pass a same-length tuple with multiple keys\n self.assertRaises(ValueError, lambda : g.get_group('foo'))\n self.assertRaises(ValueError, lambda : g.get_group(('foo')))\n self.assertRaises(ValueError, lambda : g.get_group(('foo','bar','baz')))\n\n def test_get_group_grouped_by_tuple(self):\n # GH 8121\n df = DataFrame([[(1,), (1, 2), (1,), (1, 2)]],\n index=['ids']).T\n gr = df.groupby('ids')\n expected = DataFrame({'ids': [(1,), (1,)]}, index=[0, 2])\n result = gr.get_group((1,))\n assert_frame_equal(result, expected)\n\n dt = pd.to_datetime(['2010-01-01', '2010-01-02', '2010-01-01',\n '2010-01-02'])\n df = DataFrame({'ids': [(x,) for x in dt]})\n gr = df.groupby('ids')\n result = gr.get_group(('2010-01-01',))\n expected = DataFrame({'ids': [(dt[0],), (dt[0],)]}, index=[0, 2])\n assert_frame_equal(result, expected)\n\n def test_agg_apply_corner(self):\n # nothing to group, all NA\n grouped = self.ts.groupby(self.ts * np.nan)\n\n assert_series_equal(grouped.sum(), Series([]))\n assert_series_equal(grouped.agg(np.sum), Series([]))\n assert_series_equal(grouped.apply(np.sum), Series([]))\n\n # DataFrame\n grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan)\n exp_df = DataFrame(columns=self.tsframe.columns, dtype=float)\n assert_frame_equal(grouped.sum(), exp_df, check_names=False)\n assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)\n assert_frame_equal(grouped.apply(np.sum), DataFrame({}, dtype=float))\n\n def test_agg_grouping_is_list_tuple(self):\n from pandas.core.groupby import Grouping\n\n df = tm.makeTimeDataFrame()\n\n grouped = df.groupby(lambda x: x.year)\n grouper = grouped.grouper.groupings[0].grouper\n grouped.grouper.groupings[0] = Grouping(self.ts.index, list(grouper))\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n grouped.grouper.groupings[0] = Grouping(self.ts.index, tuple(grouper))\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n def test_grouping_error_on_multidim_input(self):\n from pandas.core.groupby import Grouping\n self.assertRaises(ValueError, \\\n Grouping, self.df.index, self.df[['A','A']])\n\n def test_agg_python_multiindex(self):\n grouped = self.mframe.groupby(['A', 'B'])\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n def test_apply_describe_bug(self):\n grouped = self.mframe.groupby(level='first')\n result = grouped.describe() # it works!\n\n def test_apply_issues(self):\n # GH 5788\n\n s=\"\"\"2011.05.16,00:00,1.40893\n2011.05.16,01:00,1.40760\n2011.05.16,02:00,1.40750\n2011.05.16,03:00,1.40649\n2011.05.17,02:00,1.40893\n2011.05.17,03:00,1.40760\n2011.05.17,04:00,1.40750\n2011.05.17,05:00,1.40649\n2011.05.18,02:00,1.40893\n2011.05.18,03:00,1.40760\n2011.05.18,04:00,1.40750\n2011.05.18,05:00,1.40649\"\"\"\n\n df = pd.read_csv(StringIO(s), header=None, names=['date', 'time', 'value'], parse_dates=[['date', 'time']])\n df = df.set_index('date_time')\n\n expected = df.groupby(df.index.date).idxmax()\n result = df.groupby(df.index.date).apply(lambda x: x.idxmax())\n assert_frame_equal(result,expected)\n\n # GH 5789\n # don't auto coerce dates\n df = pd.read_csv(StringIO(s), header=None, names=['date', 'time', 'value'])\n expected = Series(['00:00','02:00','02:00'],index=['2011.05.16','2011.05.17','2011.05.18'])\n result = df.groupby('date').apply(lambda x: x['time'][x['value'].idxmax()])\n assert_series_equal(result,expected)\n\n def test_len(self):\n df = tm.makeTimeDataFrame()\n grouped = df.groupby([lambda x: x.year,\n lambda x: x.month,\n lambda x: x.day])\n self.assertEqual(len(grouped), len(df))\n\n grouped = df.groupby([lambda x: x.year,\n lambda x: x.month])\n expected = len(set([(x.year, x.month) for x in df.index]))\n self.assertEqual(len(grouped), expected)\n\n def test_groups(self):\n grouped = self.df.groupby(['A'])\n groups = grouped.groups\n self.assertIs(groups, grouped.groups) # caching works\n\n for k, v in compat.iteritems(grouped.groups):\n self.assertTrue((self.df.ix[v]['A'] == k).all())\n\n grouped = self.df.groupby(['A', 'B'])\n groups = grouped.groups\n self.assertIs(groups, grouped.groups) # caching works\n for k, v in compat.iteritems(grouped.groups):\n self.assertTrue((self.df.ix[v]['A'] == k[0]).all())\n self.assertTrue((self.df.ix[v]['B'] == k[1]).all())\n\n def test_aggregate_str_func(self):\n\n def _check_results(grouped):\n # single series\n result = grouped['A'].agg('std')\n expected = grouped['A'].std()\n assert_series_equal(result, expected)\n\n # group frame by function name\n result = grouped.aggregate('var')\n expected = grouped.var()\n assert_frame_equal(result, expected)\n\n # group frame by function dict\n result = grouped.agg(OrderedDict([['A', 'var'],\n ['B', 'std'],\n ['C', 'mean'],\n ['D', 'sem']]))\n expected = DataFrame(OrderedDict([['A', grouped['A'].var()],\n ['B', grouped['B'].std()],\n ['C', grouped['C'].mean()],\n ['D', grouped['D'].sem()]]))\n assert_frame_equal(result, expected)\n\n by_weekday = self.tsframe.groupby(lambda x: x.weekday())\n _check_results(by_weekday)\n\n by_mwkday = self.tsframe.groupby([lambda x: x.month,\n lambda x: x.weekday()])\n _check_results(by_mwkday)\n\n def test_aggregate_item_by_item(self):\n\n df = self.df.copy()\n df['E'] = ['a'] * len(self.df)\n grouped = self.df.groupby('A')\n\n # API change in 0.11\n # def aggfun(ser):\n # return len(ser + 'a')\n # result = grouped.agg(aggfun)\n # self.assertEqual(len(result.columns), 1)\n\n aggfun = lambda ser: ser.size\n result = grouped.agg(aggfun)\n foo = (self.df.A == 'foo').sum()\n bar = (self.df.A == 'bar').sum()\n K = len(result.columns)\n\n # GH5782\n # odd comparisons can result here, so cast to make easy\n assert_almost_equal(result.xs('foo'), np.array([foo] * K).astype('float64'))\n assert_almost_equal(result.xs('bar'), np.array([bar] * K).astype('float64'))\n\n def aggfun(ser):\n return ser.size\n result = DataFrame().groupby(self.df.A).agg(aggfun)\n tm.assert_isinstance(result, DataFrame)\n self.assertEqual(len(result), 0)\n\n def test_agg_item_by_item_raise_typeerror(self):\n from numpy.random import randint\n\n df = DataFrame(randint(10, size=(20, 10)))\n\n def raiseException(df):\n com.pprint_thing('----------------------------------------')\n com.pprint_thing(df.to_string())\n raise TypeError\n\n self.assertRaises(TypeError, df.groupby(0).agg,\n raiseException)\n\n def test_basic_regression(self):\n # regression\n T = [1.0 * x for x in lrange(1, 10) * 10][:1095]\n result = Series(T, lrange(0, len(T)))\n\n groupings = np.random.random((1100,))\n groupings = Series(groupings, lrange(0, len(groupings))) * 10.\n\n grouped = result.groupby(groupings)\n grouped.mean()\n\n def test_transform(self):\n data = Series(np.arange(9) // 3, index=np.arange(9))\n\n index = np.arange(9)\n np.random.shuffle(index)\n data = data.reindex(index)\n\n grouped = data.groupby(lambda x: x // 3)\n\n transformed = grouped.transform(lambda x: x * x.sum())\n self.assertEqual(transformed[7], 12)\n\n # GH 8046\n # make sure that we preserve the input order\n\n df = DataFrame(np.arange(6,dtype='int64').reshape(3,2), columns=[\"a\",\"b\"], index=[0,2,1])\n key = [0,0,1]\n expected = df.sort_index().groupby(key).transform(lambda x: x-x.mean()).groupby(key).mean()\n result = df.groupby(key).transform(lambda x: x-x.mean()).groupby(key).mean()\n assert_frame_equal(result, expected)\n\n def demean(arr):\n return arr - arr.mean()\n\n people = DataFrame(np.random.randn(5, 5),\n columns=['a', 'b', 'c', 'd', 'e'],\n index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])\n key = ['one', 'two', 'one', 'two', 'one']\n result = people.groupby(key).transform(demean).groupby(key).mean()\n expected = people.groupby(key).apply(demean).groupby(key).mean()\n assert_frame_equal(result, expected)\n\n # GH 8430\n df = tm.makeTimeDataFrame()\n g = df.groupby(pd.TimeGrouper('M'))\n g.transform(lambda x: x-1)\n\n def test_transform_fast(self):\n\n df = DataFrame( { 'id' : np.arange( 100000 ) / 3,\n 'val': np.random.randn( 100000) } )\n\n grp=df.groupby('id')['val']\n\n values = np.repeat(grp.mean().values, com._ensure_platform_int(grp.count().values))\n expected = pd.Series(values,index=df.index)\n result = grp.transform(np.mean)\n assert_series_equal(result,expected)\n\n result = grp.transform('mean')\n assert_series_equal(result,expected)\n\n def test_transform_broadcast(self):\n grouped = self.ts.groupby(lambda x: x.month)\n result = grouped.transform(np.mean)\n\n self.assertTrue(result.index.equals(self.ts.index))\n for _, gp in grouped:\n assert_fp_equal(result.reindex(gp.index), gp.mean())\n\n grouped = self.tsframe.groupby(lambda x: x.month)\n result = grouped.transform(np.mean)\n self.assertTrue(result.index.equals(self.tsframe.index))\n for _, gp in grouped:\n agged = gp.mean()\n res = result.reindex(gp.index)\n for col in self.tsframe:\n assert_fp_equal(res[col], agged[col])\n\n # group columns\n grouped = self.tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},\n axis=1)\n result = grouped.transform(np.mean)\n self.assertTrue(result.index.equals(self.tsframe.index))\n self.assertTrue(result.columns.equals(self.tsframe.columns))\n for _, gp in grouped:\n agged = gp.mean(1)\n res = result.reindex(columns=gp.columns)\n for idx in gp.index:\n assert_fp_equal(res.xs(idx), agged[idx])\n\n def test_transform_bug(self):\n # GH 5712\n # transforming on a datetime column\n df = DataFrame(dict(A = Timestamp('20130101'), B = np.arange(5)))\n result = df.groupby('A')['B'].transform(lambda x: x.rank(ascending=False))\n expected = Series(np.arange(5,0,step=-1),name='B')\n assert_series_equal(result,expected)\n\n def test_transform_multiple(self):\n grouped = self.ts.groupby([lambda x: x.year, lambda x: x.month])\n\n transformed = grouped.transform(lambda x: x * 2)\n broadcasted = grouped.transform(np.mean)\n\n def test_dispatch_transform(self):\n df = self.tsframe[::5].reindex(self.tsframe.index)\n\n grouped = df.groupby(lambda x: x.month)\n\n filled = grouped.fillna(method='pad')\n fillit = lambda x: x.fillna(method='pad')\n expected = df.groupby(lambda x: x.month).transform(fillit)\n assert_frame_equal(filled, expected)\n\n def test_transform_select_columns(self):\n f = lambda x: x.mean()\n result = self.df.groupby('A')['C', 'D'].transform(f)\n\n selection = self.df[['C', 'D']]\n expected = selection.groupby(self.df['A']).transform(f)\n\n assert_frame_equal(result, expected)\n\n def test_transform_exclude_nuisance(self):\n\n # this also tests orderings in transform between\n # series/frame to make sure its consistent\n expected = {}\n grouped = self.df.groupby('A')\n expected['C'] = grouped['C'].transform(np.mean)\n expected['D'] = grouped['D'].transform(np.mean)\n expected = DataFrame(expected)\n result = self.df.groupby('A').transform(np.mean)\n\n assert_frame_equal(result, expected)\n\n def test_transform_function_aliases(self):\n result = self.df.groupby('A').transform('mean')\n expected = self.df.groupby('A').transform(np.mean)\n assert_frame_equal(result, expected)\n\n result = self.df.groupby('A')['C'].transform('mean')\n expected = self.df.groupby('A')['C'].transform(np.mean)\n assert_series_equal(result, expected)\n\n def test_with_na(self):\n index = Index(np.arange(10))\n\n for dtype in ['float64','float32','int64','int32','int16','int8']:\n values = Series(np.ones(10), index, dtype=dtype)\n labels = Series([nan, 'foo', 'bar', 'bar', nan, nan, 'bar',\n 'bar', nan, 'foo'], index=index)\n\n\n # this SHOULD be an int\n grouped = values.groupby(labels)\n agged = grouped.agg(len)\n expected = Series([4, 2], index=['bar', 'foo'])\n\n assert_series_equal(agged, expected, check_dtype=False)\n #self.assertTrue(issubclass(agged.dtype.type, np.integer))\n\n # explicity return a float from my function\n def f(x):\n return float(len(x))\n\n agged = grouped.agg(f)\n expected = Series([4, 2], index=['bar', 'foo'])\n\n assert_series_equal(agged, expected, check_dtype=False)\n self.assertTrue(issubclass(agged.dtype.type, np.dtype(dtype).type))\n\n def test_groupby_transform_with_int(self):\n\n # GH 3740, make sure that we might upcast on item-by-item transform\n\n # floats\n df = DataFrame(dict(A = [1,1,1,2,2,2], B = Series(1,dtype='float64'), C = Series([1,2,3,1,2,3],dtype='float64'), D = 'foo'))\n result = df.groupby('A').transform(lambda x: (x-x.mean())/x.std())\n expected = DataFrame(dict(B = np.nan, C = Series([-1,0,1,-1,0,1],dtype='float64')))\n assert_frame_equal(result,expected)\n\n # int case\n df = DataFrame(dict(A = [1,1,1,2,2,2], B = 1, C = [1,2,3,1,2,3], D = 'foo'))\n result = df.groupby('A').transform(lambda x: (x-x.mean())/x.std())\n expected = DataFrame(dict(B = np.nan, C = [-1,0,1,-1,0,1]))\n assert_frame_equal(result,expected)\n\n # int that needs float conversion\n s = Series([2,3,4,10,5,-1])\n df = DataFrame(dict(A = [1,1,1,2,2,2], B = 1, C = s, D = 'foo'))\n result = df.groupby('A').transform(lambda x: (x-x.mean())/x.std())\n\n s1 = s.iloc[0:3]\n s1 = (s1-s1.mean())/s1.std()\n s2 = s.iloc[3:6]\n s2 = (s2-s2.mean())/s2.std()\n expected = DataFrame(dict(B = np.nan, C = concat([s1,s2])))\n assert_frame_equal(result,expected)\n\n # int downcasting\n result = df.groupby('A').transform(lambda x: x*2/2)\n expected = DataFrame(dict(B = 1, C = [2,3,4,10,5,-1]))\n assert_frame_equal(result,expected)\n\n def test_indices_concatenation_order(self):\n\n # GH 2808\n\n def f1(x):\n y = x[(x.b % 2) == 1]**2\n if y.empty:\n multiindex = MultiIndex(\n levels = [[]]*2,\n labels = [[]]*2,\n names = ['b', 'c']\n )\n res = DataFrame(None,\n columns=['a'],\n index=multiindex)\n return res\n else:\n y = y.set_index(['b','c'])\n return y\n\n def f2(x):\n y = x[(x.b % 2) == 1]**2\n if y.empty:\n return DataFrame()\n else:\n y = y.set_index(['b','c'])\n return y\n\n def f3(x):\n y = x[(x.b % 2) == 1]**2\n if y.empty:\n multiindex = MultiIndex(\n levels = [[]]*2,\n labels = [[]]*2,\n names = ['foo', 'bar']\n )\n res = DataFrame(None,\n columns=['a','b'],\n index=multiindex)\n return res\n else:\n return y\n\n df = DataFrame({'a':[1,2,2,2],\n 'b':lrange(4),\n 'c':lrange(5,9)})\n\n df2 = DataFrame({'a':[3,2,2,2],\n 'b':lrange(4),\n 'c':lrange(5,9)})\n\n\n # correct result\n result1 = df.groupby('a').apply(f1)\n result2 = df2.groupby('a').apply(f1)\n assert_frame_equal(result1, result2)\n\n # should fail (not the same number of levels)\n self.assertRaises(AssertionError, df.groupby('a').apply, f2)\n self.assertRaises(AssertionError, df2.groupby('a').apply, f2)\n\n # should fail (incorrect shape)\n self.assertRaises(AssertionError, df.groupby('a').apply, f3)\n self.assertRaises(AssertionError, df2.groupby('a').apply, f3)\n\n def test_attr_wrapper(self):\n grouped = self.ts.groupby(lambda x: x.weekday())\n\n result = grouped.std()\n expected = grouped.agg(lambda x: np.std(x, ddof=1))\n assert_series_equal(result, expected)\n\n # this is pretty cool\n result = grouped.describe()\n expected = {}\n for name, gp in grouped:\n expected[name] = gp.describe()\n expected = DataFrame(expected).T\n assert_frame_equal(result.unstack(), expected)\n\n # get attribute\n result = grouped.dtype\n expected = grouped.agg(lambda x: x.dtype)\n\n # make sure raises error\n self.assertRaises(AttributeError, getattr, grouped, 'foo')\n\n def test_series_describe_multikey(self):\n ts = tm.makeTimeSeries()\n grouped = ts.groupby([lambda x: x.year, lambda x: x.month])\n result = grouped.describe().unstack()\n assert_series_equal(result['mean'], grouped.mean())\n assert_series_equal(result['std'], grouped.std())\n assert_series_equal(result['min'], grouped.min())\n\n def test_series_describe_single(self):\n ts = tm.makeTimeSeries()\n grouped = ts.groupby(lambda x: x.month)\n result = grouped.apply(lambda x: x.describe())\n expected = grouped.describe()\n assert_series_equal(result, expected)\n\n def test_series_agg_multikey(self):\n ts = tm.makeTimeSeries()\n grouped = ts.groupby([lambda x: x.year, lambda x: x.month])\n\n result = grouped.agg(np.sum)\n expected = grouped.sum()\n assert_series_equal(result, expected)\n\n def test_series_agg_multi_pure_python(self):\n data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',\n 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two',\n 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull',\n 'dull', 'shiny', 'shiny', 'dull',\n 'shiny', 'shiny', 'shiny'],\n 'D': np.random.randn(11),\n 'E': np.random.randn(11),\n 'F': np.random.randn(11)})\n\n def bad(x):\n assert(len(x.base) > 0)\n return 'foo'\n\n result = data.groupby(['A', 'B']).agg(bad)\n expected = data.groupby(['A', 'B']).agg(lambda x: 'foo')\n assert_frame_equal(result, expected)\n\n def test_series_index_name(self):\n grouped = self.df.ix[:, ['C']].groupby(self.df['A'])\n result = grouped.agg(lambda x: x.mean())\n self.assertEqual(result.index.name, 'A')\n\n def test_frame_describe_multikey(self):\n grouped = self.tsframe.groupby([lambda x: x.year,\n lambda x: x.month])\n result = grouped.describe()\n\n for col in self.tsframe:\n expected = grouped[col].describe()\n assert_series_equal(result[col], expected)\n\n groupedT = self.tsframe.groupby({'A': 0, 'B': 0,\n 'C': 1, 'D': 1}, axis=1)\n result = groupedT.describe()\n\n for name, group in groupedT:\n assert_frame_equal(result[name], group.describe())\n\n def test_frame_groupby(self):\n grouped = self.tsframe.groupby(lambda x: x.weekday())\n\n # aggregate\n aggregated = grouped.aggregate(np.mean)\n self.assertEqual(len(aggregated), 5)\n self.assertEqual(len(aggregated.columns), 4)\n\n # by string\n tscopy = self.tsframe.copy()\n tscopy['weekday'] = [x.weekday() for x in tscopy.index]\n stragged = tscopy.groupby('weekday').aggregate(np.mean)\n assert_frame_equal(stragged, aggregated, check_names=False)\n\n # transform\n grouped = self.tsframe.head(30).groupby(lambda x: x.weekday())\n transformed = grouped.transform(lambda x: x - x.mean())\n self.assertEqual(len(transformed), 30)\n self.assertEqual(len(transformed.columns), 4)\n\n # transform propagate\n transformed = grouped.transform(lambda x: x.mean())\n for name, group in grouped:\n mean = group.mean()\n for idx in group.index:\n assert_almost_equal(transformed.xs(idx), mean)\n\n # iterate\n for weekday, group in grouped:\n self.assertEqual(group.index[0].weekday(), weekday)\n\n # groups / group_indices\n groups = grouped.groups\n indices = grouped.indices\n\n for k, v in compat.iteritems(groups):\n samething = self.tsframe.index.take(indices[k])\n self.assertTrue((samething == v).all())\n\n def test_grouping_is_iterable(self):\n # this code path isn't used anywhere else\n # not sure it's useful\n grouped = self.tsframe.groupby([lambda x: x.weekday(),\n lambda x: x.year])\n\n # test it works\n for g in grouped.grouper.groupings[0]:\n pass\n\n def test_frame_groupby_columns(self):\n mapping = {\n 'A': 0, 'B': 0, 'C': 1, 'D': 1\n }\n grouped = self.tsframe.groupby(mapping, axis=1)\n\n # aggregate\n aggregated = grouped.aggregate(np.mean)\n self.assertEqual(len(aggregated), len(self.tsframe))\n self.assertEqual(len(aggregated.columns), 2)\n\n # transform\n tf = lambda x: x - x.mean()\n groupedT = self.tsframe.T.groupby(mapping, axis=0)\n assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))\n\n # iterate\n for k, v in grouped:\n self.assertEqual(len(v.columns), 2)\n\n def test_frame_set_name_single(self):\n grouped = self.df.groupby('A')\n\n result = grouped.mean()\n self.assertEqual(result.index.name, 'A')\n\n result = self.df.groupby('A', as_index=False).mean()\n self.assertNotEqual(result.index.name, 'A')\n\n result = grouped.agg(np.mean)\n self.assertEqual(result.index.name, 'A')\n\n result = grouped.agg({'C': np.mean, 'D': np.std})\n self.assertEqual(result.index.name, 'A')\n\n result = grouped['C'].mean()\n self.assertEqual(result.index.name, 'A')\n result = grouped['C'].agg(np.mean)\n self.assertEqual(result.index.name, 'A')\n result = grouped['C'].agg([np.mean, np.std])\n self.assertEqual(result.index.name, 'A')\n\n result = grouped['C'].agg({'foo': np.mean, 'bar': np.std})\n self.assertEqual(result.index.name, 'A')\n\n def test_multi_iter(self):\n s = Series(np.arange(6))\n k1 = np.array(['a', 'a', 'a', 'b', 'b', 'b'])\n k2 = np.array(['1', '2', '1', '2', '1', '2'])\n\n grouped = s.groupby([k1, k2])\n\n iterated = list(grouped)\n expected = [('a', '1', s[[0, 2]]),\n ('a', '2', s[[1]]),\n ('b', '1', s[[4]]),\n ('b', '2', s[[3, 5]])]\n for i, ((one, two), three) in enumerate(iterated):\n e1, e2, e3 = expected[i]\n self.assertEqual(e1, one)\n self.assertEqual(e2, two)\n assert_series_equal(three, e3)\n\n def test_multi_iter_frame(self):\n k1 = np.array(['b', 'b', 'b', 'a', 'a', 'a'])\n k2 = np.array(['1', '2', '1', '2', '1', '2'])\n df = DataFrame({'v1': np.random.randn(6),\n 'v2': np.random.randn(6),\n 'k1': k1, 'k2': k2},\n index=['one', 'two', 'three', 'four', 'five', 'six'])\n\n grouped = df.groupby(['k1', 'k2'])\n\n # things get sorted!\n iterated = list(grouped)\n idx = df.index\n expected = [('a', '1', df.ix[idx[[4]]]),\n ('a', '2', df.ix[idx[[3, 5]]]),\n ('b', '1', df.ix[idx[[0, 2]]]),\n ('b', '2', df.ix[idx[[1]]])]\n for i, ((one, two), three) in enumerate(iterated):\n e1, e2, e3 = expected[i]\n self.assertEqual(e1, one)\n self.assertEqual(e2, two)\n assert_frame_equal(three, e3)\n\n # don't iterate through groups with no data\n df['k1'] = np.array(['b', 'b', 'b', 'a', 'a', 'a'])\n df['k2'] = np.array(['1', '1', '1', '2', '2', '2'])\n grouped = df.groupby(['k1', 'k2'])\n groups = {}\n for key, gp in grouped:\n groups[key] = gp\n self.assertEqual(len(groups), 2)\n\n # axis = 1\n three_levels = self.three_group.groupby(['A', 'B', 'C']).mean()\n grouped = three_levels.T.groupby(axis=1, level=(1, 2))\n for key, group in grouped:\n pass\n\n def test_multi_iter_panel(self):\n wp = tm.makePanel()\n grouped = wp.groupby([lambda x: x.month, lambda x: x.weekday()],\n axis=1)\n\n for (month, wd), group in grouped:\n exp_axis = [x for x in wp.major_axis\n if x.month == month and x.weekday() == wd]\n expected = wp.reindex(major=exp_axis)\n assert_panel_equal(group, expected)\n\n def test_multi_func(self):\n col1 = self.df['A']\n col2 = self.df['B']\n\n grouped = self.df.groupby([col1.get, col2.get])\n agged = grouped.mean()\n expected = self.df.groupby(['A', 'B']).mean()\n assert_frame_equal(agged.ix[:, ['C', 'D']],\n expected.ix[:, ['C', 'D']],\n check_names=False) # TODO groupby get drops names\n\n # some \"groups\" with no data\n df = DataFrame({'v1': np.random.randn(6),\n 'v2': np.random.randn(6),\n 'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),\n 'k2': np.array(['1', '1', '1', '2', '2', '2'])},\n index=['one', 'two', 'three', 'four', 'five', 'six'])\n # only verify that it works for now\n grouped = df.groupby(['k1', 'k2'])\n grouped.agg(np.sum)\n\n def test_multi_key_multiple_functions(self):\n grouped = self.df.groupby(['A', 'B'])['C']\n\n agged = grouped.agg([np.mean, np.std])\n expected = DataFrame({'mean': grouped.agg(np.mean),\n 'std': grouped.agg(np.std)})\n assert_frame_equal(agged, expected)\n\n def test_frame_multi_key_function_list(self):\n data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',\n 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two',\n 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull',\n 'dull', 'shiny', 'shiny', 'dull',\n 'shiny', 'shiny', 'shiny'],\n 'D': np.random.randn(11),\n 'E': np.random.randn(11),\n 'F': np.random.randn(11)})\n\n grouped = data.groupby(['A', 'B'])\n funcs = [np.mean, np.std]\n agged = grouped.agg(funcs)\n expected = concat([grouped['D'].agg(funcs), grouped['E'].agg(funcs),\n grouped['F'].agg(funcs)],\n keys=['D', 'E', 'F'], axis=1)\n assert(isinstance(agged.index, MultiIndex))\n assert(isinstance(expected.index, MultiIndex))\n assert_frame_equal(agged, expected)\n\n def test_groupby_multiple_columns(self):\n data = self.df\n grouped = data.groupby(['A', 'B'])\n\n def _check_op(op):\n\n result1 = op(grouped)\n\n expected = defaultdict(dict)\n for n1, gp1 in data.groupby('A'):\n for n2, gp2 in gp1.groupby('B'):\n expected[n1][n2] = op(gp2.ix[:, ['C', 'D']])\n expected = dict((k, DataFrame(v)) for k, v in compat.iteritems(expected))\n expected = Panel.fromDict(expected).swapaxes(0, 1)\n expected.major_axis.name, expected.minor_axis.name = 'A', 'B'\n\n # a little bit crude\n for col in ['C', 'D']:\n result_col = op(grouped[col])\n exp = expected[col]\n pivoted = result1[col].unstack()\n pivoted2 = result_col.unstack()\n assert_frame_equal(pivoted.reindex_like(exp), exp)\n assert_frame_equal(pivoted2.reindex_like(exp), exp)\n\n _check_op(lambda x: x.sum())\n _check_op(lambda x: x.mean())\n\n # test single series works the same\n result = data['C'].groupby([data['A'], data['B']]).mean()\n expected = data.groupby(['A', 'B']).mean()['C']\n\n assert_series_equal(result, expected)\n\n def test_groupby_as_index_agg(self):\n grouped = self.df.groupby('A', as_index=False)\n\n # single-key\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n assert_frame_equal(result, expected)\n\n result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))\n expected2 = grouped.mean()\n expected2['D'] = grouped.sum()['D']\n assert_frame_equal(result2, expected2)\n\n grouped = self.df.groupby('A', as_index=True)\n expected3 = grouped['C'].sum()\n expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})\n result3 = grouped['C'].agg({'Q': np.sum})\n assert_frame_equal(result3, expected3)\n\n # multi-key\n\n grouped = self.df.groupby(['A', 'B'], as_index=False)\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n assert_frame_equal(result, expected)\n\n result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))\n expected2 = grouped.mean()\n expected2['D'] = grouped.sum()['D']\n assert_frame_equal(result2, expected2)\n\n expected3 = grouped['C'].sum()\n expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})\n result3 = grouped['C'].agg({'Q': np.sum})\n assert_frame_equal(result3, expected3)\n\n # GH7115 & GH8112 & GH8582\n df = DataFrame(np.random.randint(0, 100, (50, 3)),\n columns=['jim', 'joe', 'jolie'])\n ts = Series(np.random.randint(5, 10, 50), name='jim')\n\n gr = df.groupby(ts)\n _ = gr.nth(0) # invokes _set_selection_from_grouper internally\n assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))\n\n for attr in ['mean', 'max', 'count', 'idxmax', 'cumsum', 'all']:\n gr = df.groupby(ts, as_index=False)\n left = getattr(gr, attr)()\n\n gr = df.groupby(ts.values, as_index=True)\n right = getattr(gr, attr)().reset_index(drop=True)\n\n assert_frame_equal(left, right)\n\n def test_mulitindex_passthru(self):\n\n # GH 7997\n # regression from 0.14.1\n df = pd.DataFrame([[1,2,3],[4,5,6],[7,8,9]])\n df.columns = pd.MultiIndex.from_tuples([(0,1),(1,1),(2,1)])\n\n result = df.groupby(axis=1, level=[0,1]).first()\n assert_frame_equal(result, df)\n\n def test_multifunc_select_col_integer_cols(self):\n df = self.df\n df.columns = np.arange(len(df.columns))\n\n # it works!\n result = df.groupby(1, as_index=False)[2].agg({'Q': np.mean})\n\n def test_as_index_series_return_frame(self):\n grouped = self.df.groupby('A', as_index=False)\n grouped2 = self.df.groupby(['A', 'B'], as_index=False)\n\n result = grouped['C'].agg(np.sum)\n expected = grouped.agg(np.sum).ix[:, ['A', 'C']]\n tm.assert_isinstance(result, DataFrame)\n assert_frame_equal(result, expected)\n\n result2 = grouped2['C'].agg(np.sum)\n expected2 = grouped2.agg(np.sum).ix[:, ['A', 'B', 'C']]\n tm.assert_isinstance(result2, DataFrame)\n assert_frame_equal(result2, expected2)\n\n result = grouped['C'].sum()\n expected = grouped.sum().ix[:, ['A', 'C']]\n tm.assert_isinstance(result, DataFrame)\n assert_frame_equal(result, expected)\n\n result2 = grouped2['C'].sum()\n expected2 = grouped2.sum().ix[:, ['A', 'B', 'C']]\n tm.assert_isinstance(result2, DataFrame)\n assert_frame_equal(result2, expected2)\n\n # corner case\n self.assertRaises(Exception, grouped['C'].__getitem__,\n 'D')\n\n def test_groupby_as_index_cython(self):\n data = self.df\n\n # single-key\n grouped = data.groupby('A', as_index=False)\n result = grouped.mean()\n expected = data.groupby(['A']).mean()\n expected.insert(0, 'A', expected.index)\n expected.index = np.arange(len(expected))\n assert_frame_equal(result, expected)\n\n # multi-key\n grouped = data.groupby(['A', 'B'], as_index=False)\n result = grouped.mean()\n expected = data.groupby(['A', 'B']).mean()\n\n arrays = lzip(*expected.index._tuple_index)\n expected.insert(0, 'A', arrays[0])\n expected.insert(1, 'B', arrays[1])\n expected.index = np.arange(len(expected))\n assert_frame_equal(result, expected)\n\n def test_groupby_as_index_series_scalar(self):\n grouped = self.df.groupby(['A', 'B'], as_index=False)\n\n # GH #421\n\n result = grouped['C'].agg(len)\n expected = grouped.agg(len).ix[:, ['A', 'B', 'C']]\n assert_frame_equal(result, expected)\n\n def test_groupby_as_index_corner(self):\n self.assertRaises(TypeError, self.ts.groupby,\n lambda x: x.weekday(), as_index=False)\n\n self.assertRaises(ValueError, self.df.groupby,\n lambda x: x.lower(), as_index=False, axis=1)\n\n def test_groupby_as_index_apply(self):\n # GH #4648 and #3417\n df = DataFrame({'item_id': ['b', 'b', 'a', 'c', 'a', 'b'],\n 'user_id': [1,2,1,1,3,1],\n 'time': range(6)})\n\n g_as = df.groupby('user_id', as_index=True)\n g_not_as = df.groupby('user_id', as_index=False)\n\n res_as = g_as.head(2).index\n res_not_as = g_not_as.head(2).index\n exp = Index([0, 1, 2, 4])\n assert_index_equal(res_as, exp)\n assert_index_equal(res_not_as, exp)\n\n res_as_apply = g_as.apply(lambda x: x.head(2)).index\n res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index\n\n # apply doesn't maintain the original ordering\n # changed in GH5610 as the as_index=False returns a MI here\n exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (2, 4)])\n exp_as_apply = MultiIndex.from_tuples([(1, 0), (1, 2), (2, 1), (3, 4)])\n\n assert_index_equal(res_as_apply, exp_as_apply)\n assert_index_equal(res_not_as_apply, exp_not_as_apply)\n\n ind = Index(list('abcde'))\n df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)\n res = df.groupby(0, as_index=False).apply(lambda x: x).index\n assert_index_equal(res, ind)\n\n def test_groupby_head_tail(self):\n df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])\n g_as = df.groupby('A', as_index=True)\n g_not_as = df.groupby('A', as_index=False)\n\n # as_index= False, much easier\n assert_frame_equal(df.loc[[0, 2]], g_not_as.head(1))\n assert_frame_equal(df.loc[[1, 2]], g_not_as.tail(1))\n\n empty_not_as = DataFrame(columns=df.columns)\n assert_frame_equal(empty_not_as, g_not_as.head(0))\n assert_frame_equal(empty_not_as, g_not_as.tail(0))\n assert_frame_equal(empty_not_as, g_not_as.head(-1))\n assert_frame_equal(empty_not_as, g_not_as.tail(-1))\n\n assert_frame_equal(df, g_not_as.head(7)) # contains all\n assert_frame_equal(df, g_not_as.tail(7))\n\n # as_index=True, (used to be different)\n df_as = df\n\n assert_frame_equal(df_as.loc[[0, 2]], g_as.head(1))\n assert_frame_equal(df_as.loc[[1, 2]], g_as.tail(1))\n\n empty_as = DataFrame(index=df_as.index[:0], columns=df.columns)\n assert_frame_equal(empty_as, g_as.head(0))\n assert_frame_equal(empty_as, g_as.tail(0))\n assert_frame_equal(empty_as, g_as.head(-1))\n assert_frame_equal(empty_as, g_as.tail(-1))\n\n assert_frame_equal(df_as, g_as.head(7)) # contains all\n assert_frame_equal(df_as, g_as.tail(7))\n\n # test with selection\n assert_frame_equal(g_as[[]].head(1), df_as.loc[[0,2], []])\n assert_frame_equal(g_as[['A']].head(1), df_as.loc[[0,2], ['A']])\n assert_frame_equal(g_as[['B']].head(1), df_as.loc[[0,2], ['B']])\n assert_frame_equal(g_as[['A', 'B']].head(1), df_as.loc[[0,2]])\n\n assert_frame_equal(g_not_as[[]].head(1), df_as.loc[[0,2], []])\n assert_frame_equal(g_not_as[['A']].head(1), df_as.loc[[0,2], ['A']])\n assert_frame_equal(g_not_as[['B']].head(1), df_as.loc[[0,2], ['B']])\n assert_frame_equal(g_not_as[['A', 'B']].head(1), df_as.loc[[0,2]])\n\n def test_groupby_multiple_key(self):\n df = tm.makeTimeDataFrame()\n grouped = df.groupby([lambda x: x.year,\n lambda x: x.month,\n lambda x: x.day])\n agged = grouped.sum()\n assert_almost_equal(df.values, agged.values)\n\n grouped = df.T.groupby([lambda x: x.year,\n lambda x: x.month,\n lambda x: x.day], axis=1)\n\n agged = grouped.agg(lambda x: x.sum())\n self.assertTrue(agged.index.equals(df.columns))\n assert_almost_equal(df.T.values, agged.values)\n\n agged = grouped.agg(lambda x: x.sum())\n assert_almost_equal(df.T.values, agged.values)\n\n def test_groupby_multi_corner(self):\n # test that having an all-NA column doesn't mess you up\n df = self.df.copy()\n df['bad'] = np.nan\n agged = df.groupby(['A', 'B']).mean()\n\n expected = self.df.groupby(['A', 'B']).mean()\n expected['bad'] = np.nan\n\n assert_frame_equal(agged, expected)\n\n def test_omit_nuisance(self):\n grouped = self.df.groupby('A')\n\n result = grouped.mean()\n expected = self.df.ix[:, ['A', 'C', 'D']].groupby('A').mean()\n assert_frame_equal(result, expected)\n\n agged = grouped.agg(np.mean)\n exp = grouped.mean()\n assert_frame_equal(agged, exp)\n\n df = self.df.ix[:, ['A', 'C', 'D']]\n df['E'] = datetime.now()\n grouped = df.groupby('A')\n result = grouped.agg(np.sum)\n expected = grouped.sum()\n assert_frame_equal(result, expected)\n\n # won't work with axis = 1\n grouped = df.groupby({'A': 0, 'C': 0, 'D': 1, 'E': 1}, axis=1)\n result = self.assertRaises(TypeError, grouped.agg,\n lambda x: x.sum(0, numeric_only=False))\n\n def test_omit_nuisance_python_multiple(self):\n grouped = self.three_group.groupby(['A', 'B'])\n\n agged = grouped.agg(np.mean)\n exp = grouped.mean()\n assert_frame_equal(agged, exp)\n\n def test_empty_groups_corner(self):\n # handle empty groups\n df = DataFrame({'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),\n 'k2': np.array(['1', '1', '1', '2', '2', '2']),\n 'k3': ['foo', 'bar'] * 3,\n 'v1': np.random.randn(6),\n 'v2': np.random.randn(6)})\n\n grouped = df.groupby(['k1', 'k2'])\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n assert_frame_equal(result, expected)\n\n grouped = self.mframe[3:5].groupby(level=0)\n agged = grouped.apply(lambda x: x.mean())\n agged_A = grouped['A'].apply(np.mean)\n assert_series_equal(agged['A'], agged_A)\n self.assertEqual(agged.index.name, 'first')\n\n def test_apply_concat_preserve_names(self):\n grouped = self.three_group.groupby(['A', 'B'])\n\n def desc(group):\n result = group.describe()\n result.index.name = 'stat'\n return result\n\n def desc2(group):\n result = group.describe()\n result.index.name = 'stat'\n result = result[:len(group)]\n # weirdo\n return result\n\n def desc3(group):\n result = group.describe()\n\n # names are different\n result.index.name = 'stat_%d' % len(group)\n\n result = result[:len(group)]\n # weirdo\n return result\n\n result = grouped.apply(desc)\n self.assertEqual(result.index.names, ('A', 'B', 'stat'))\n\n result2 = grouped.apply(desc2)\n self.assertEqual(result2.index.names, ('A', 'B', 'stat'))\n\n result3 = grouped.apply(desc3)\n self.assertEqual(result3.index.names, ('A', 'B', None))\n\n def test_nonsense_func(self):\n df = DataFrame([0])\n self.assertRaises(Exception, df.groupby, lambda x: x + 'foo')\n\n def test_builtins_apply(self): # GH8155\n df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),\n columns=['jim', 'joe'])\n df['jolie'] = np.random.randn(1000)\n print(df.head())\n\n for keys in ['jim', ['jim', 'joe']]: # single key & multi-key\n if keys == 'jim': continue\n for f in [max, min, sum]:\n fname = f.__name__\n result = df.groupby(keys).apply(f)\n _shape = result.shape\n ngroups = len(df.drop_duplicates(subset=keys))\n assert result.shape == (ngroups, 3), 'invalid frame shape: '\\\n '{} (expected ({}, 3))'.format(result.shape, ngroups)\n\n assert_frame_equal(result, # numpy's equivalent function\n df.groupby(keys).apply(getattr(np, fname)))\n\n if f != sum:\n expected = df.groupby(keys).agg(fname).reset_index()\n expected.set_index(keys, inplace=True, drop=False)\n assert_frame_equal(result, expected, check_dtype=False)\n\n assert_series_equal(getattr(result, fname)(),\n getattr(df, fname)())\n\n def test_cythonized_aggers(self):\n data = {'A': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1., nan, nan],\n 'B': ['A', 'B'] * 6,\n 'C': np.random.randn(12)}\n df = DataFrame(data)\n df.loc[2:10:2,'C'] = nan\n\n def _testit(op):\n # single column\n grouped = df.drop(['B'], axis=1).groupby('A')\n exp = {}\n for cat, group in grouped:\n exp[cat] = op(group['C'])\n exp = DataFrame({'C': exp})\n exp.index.name = 'A'\n result = op(grouped)\n assert_frame_equal(result, exp)\n\n # multiple columns\n grouped = df.groupby(['A', 'B'])\n expd = {}\n for (cat1, cat2), group in grouped:\n expd.setdefault(cat1, {})[cat2] = op(group['C'])\n exp = DataFrame(expd).T.stack(dropna=False)\n result = op(grouped)['C']\n assert_series_equal(result, exp)\n\n _testit(lambda x: x.count())\n _testit(lambda x: x.sum())\n _testit(lambda x: x.std())\n _testit(lambda x: x.var())\n _testit(lambda x: x.sem())\n _testit(lambda x: x.mean())\n _testit(lambda x: x.median())\n _testit(lambda x: x.prod())\n _testit(lambda x: x.min())\n _testit(lambda x: x.max())\n\n def test_max_min_non_numeric(self):\n # #2700\n aa = DataFrame({'nn':[11,11,22,22],'ii':[1,2,3,4],'ss':4*['mama']})\n\n result = aa.groupby('nn').max()\n self.assertTrue('ss' in result)\n\n result = aa.groupby('nn').min()\n self.assertTrue('ss' in result)\n\n def test_cython_agg_boolean(self):\n frame = DataFrame({'a': np.random.randint(0, 5, 50),\n 'b': np.random.randint(0, 2, 50).astype('bool')})\n result = frame.groupby('a')['b'].mean()\n expected = frame.groupby('a')['b'].agg(np.mean)\n\n assert_series_equal(result, expected)\n\n def test_cython_agg_nothing_to_agg(self):\n frame = DataFrame({'a': np.random.randint(0, 5, 50),\n 'b': ['foo', 'bar'] * 25})\n self.assertRaises(DataError, frame.groupby('a')['b'].mean)\n\n frame = DataFrame({'a': np.random.randint(0, 5, 50),\n 'b': ['foo', 'bar'] * 25})\n self.assertRaises(DataError, frame[['b']].groupby(frame['a']).mean)\n\n def test_cython_agg_nothing_to_agg_with_dates(self):\n frame = DataFrame({'a': np.random.randint(0, 5, 50),\n 'b': ['foo', 'bar'] * 25,\n 'dates': pd.date_range('now', periods=50,\n freq='T')})\n with tm.assertRaisesRegexp(DataError, \"No numeric types to aggregate\"):\n frame.groupby('b').dates.mean()\n\n def test_groupby_timedelta_cython_count(self):\n df = DataFrame({'g': list('ab' * 2),\n 'delt': np.arange(4).astype('timedelta64[ns]')})\n expected = Series([2, 2], index=['a', 'b'], name='delt')\n result = df.groupby('g').delt.count()\n tm.assert_series_equal(expected, result)\n\n def test_cython_agg_frame_columns(self):\n # #2113\n df = DataFrame({'x': [1, 2, 3], 'y': [3, 4, 5]})\n\n result = df.groupby(level=0, axis='columns').mean()\n result = df.groupby(level=0, axis='columns').mean()\n result = df.groupby(level=0, axis='columns').mean()\n _ = df.groupby(level=0, axis='columns').mean()\n\n def test_wrap_aggregated_output_multindex(self):\n df = self.mframe.T\n df['baz', 'two'] = 'peekaboo'\n\n keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]\n agged = df.groupby(keys).agg(np.mean)\n tm.assert_isinstance(agged.columns, MultiIndex)\n\n def aggfun(ser):\n if ser.name == ('foo', 'one'):\n raise TypeError\n else:\n return ser.sum()\n agged2 = df.groupby(keys).aggregate(aggfun)\n self.assertEqual(len(agged2.columns) + 1, len(df.columns))\n\n def test_groupby_level(self):\n frame = self.mframe\n deleveled = frame.reset_index()\n\n result0 = frame.groupby(level=0).sum()\n result1 = frame.groupby(level=1).sum()\n\n expected0 = frame.groupby(deleveled['first'].values).sum()\n expected1 = frame.groupby(deleveled['second'].values).sum()\n\n expected0 = expected0.reindex(frame.index.levels[0])\n expected1 = expected1.reindex(frame.index.levels[1])\n\n self.assertEqual(result0.index.name, 'first')\n self.assertEqual(result1.index.name, 'second')\n\n assert_frame_equal(result0, expected0)\n assert_frame_equal(result1, expected1)\n self.assertEqual(result0.index.name, frame.index.names[0])\n self.assertEqual(result1.index.name, frame.index.names[1])\n\n # groupby level name\n result0 = frame.groupby(level='first').sum()\n result1 = frame.groupby(level='second').sum()\n assert_frame_equal(result0, expected0)\n assert_frame_equal(result1, expected1)\n\n # axis=1\n\n result0 = frame.T.groupby(level=0, axis=1).sum()\n result1 = frame.T.groupby(level=1, axis=1).sum()\n assert_frame_equal(result0, expected0.T)\n assert_frame_equal(result1, expected1.T)\n\n # raise exception for non-MultiIndex\n self.assertRaises(ValueError, self.df.groupby, level=1)\n\n\n\n\n def test_groupby_level_index_names(self):\n ## GH4014 this used to raise ValueError since 'exp'>1 (in py2)\n df = DataFrame({'exp' : ['A']*3 + ['B']*3, 'var1' : lrange(6),}).set_index('exp')\n df.groupby(level='exp')\n self.assertRaises(ValueError, df.groupby, level='foo')\n\n def test_groupby_level_with_nas(self):\n index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],\n labels=[[1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 2, 3, 0, 1, 2, 3]])\n\n # factorizing doesn't confuse things\n s = Series(np.arange(8.), index=index)\n result = s.groupby(level=0).sum()\n expected = Series([22., 6.], index=[1, 0])\n assert_series_equal(result, expected)\n\n index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],\n labels=[[1, 1, 1, 1, -1, 0, 0, 0],\n [0, 1, 2, 3, 0, 1, 2, 3]])\n\n # factorizing doesn't confuse things\n s = Series(np.arange(8.), index=index)\n result = s.groupby(level=0).sum()\n expected = Series([18., 6.], index=[1, 0])\n assert_series_equal(result, expected)\n\n def test_groupby_level_apply(self):\n frame = self.mframe\n\n result = frame.groupby(level=0).count()\n self.assertEqual(result.index.name, 'first')\n result = frame.groupby(level=1).count()\n self.assertEqual(result.index.name, 'second')\n\n result = frame['A'].groupby(level=0).count()\n self.assertEqual(result.index.name, 'first')\n\n def test_groupby_args(self):\n #PR8618 and issue 8015\n frame = self.mframe\n def j():\n frame.groupby()\n self.assertRaisesRegexp(TypeError, \"You have to supply one of 'by' and 'level'\", j)\n\n def k():\n frame.groupby(by=None, level=None)\n self.assertRaisesRegexp(TypeError, \"You have to supply one of 'by' and 'level'\", k)\n\n def test_groupby_level_mapper(self):\n frame = self.mframe\n deleveled = frame.reset_index()\n\n mapper0 = {'foo': 0, 'bar': 0,\n 'baz': 1, 'qux': 1}\n mapper1 = {'one': 0, 'two': 0, 'three': 1}\n\n result0 = frame.groupby(mapper0, level=0).sum()\n result1 = frame.groupby(mapper1, level=1).sum()\n\n mapped_level0 = np.array([mapper0.get(x) for x in deleveled['first']])\n mapped_level1 = np.array([mapper1.get(x) for x in deleveled['second']])\n expected0 = frame.groupby(mapped_level0).sum()\n expected1 = frame.groupby(mapped_level1).sum()\n expected0.index.name, expected1.index.name = 'first', 'second'\n\n assert_frame_equal(result0, expected0)\n assert_frame_equal(result1, expected1)\n\n def test_groupby_level_0_nonmulti(self):\n # #1313\n a = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1,\n 4, 5, 2, 6], name='foo'))\n\n result = a.groupby(level=0).sum()\n self.assertEqual(result.index.name, a.index.name)\n\n def test_level_preserve_order(self):\n grouped = self.mframe.groupby(level=0)\n exp_labels = np.array([0, 0, 0, 1, 1, 2, 2, 3, 3, 3])\n assert_almost_equal(grouped.grouper.labels[0], exp_labels)\n\n def test_grouping_labels(self):\n grouped = self.mframe.groupby(self.mframe.index.get_level_values(0))\n exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3])\n assert_almost_equal(grouped.grouper.labels[0], exp_labels)\n\n def test_cython_fail_agg(self):\n dr = bdate_range('1/1/2000', periods=50)\n ts = Series(['A', 'B', 'C', 'D', 'E'] * 10, index=dr)\n\n grouped = ts.groupby(lambda x: x.month)\n summed = grouped.sum()\n expected = grouped.agg(np.sum)\n assert_series_equal(summed, expected)\n\n def test_apply_series_to_frame(self):\n def f(piece):\n return DataFrame({'value': piece,\n 'demeaned': piece - piece.mean(),\n 'logged': np.log(piece)})\n\n dr = bdate_range('1/1/2000', periods=100)\n ts = Series(np.random.randn(100), index=dr)\n\n grouped = ts.groupby(lambda x: x.month)\n result = grouped.apply(f)\n\n tm.assert_isinstance(result, DataFrame)\n self.assertTrue(result.index.equals(ts.index))\n\n def test_apply_series_yield_constant(self):\n result = self.df.groupby(['A', 'B'])['C'].apply(len)\n self.assertEqual(result.index.names[:2], ('A', 'B'))\n\n def test_apply_frame_to_series(self):\n grouped = self.df.groupby(['A', 'B'])\n result = grouped.apply(len)\n expected = grouped.count()['C']\n self.assertTrue(result.index.equals(expected.index))\n self.assert_numpy_array_equal(result.values, expected.values)\n\n def test_apply_frame_concat_series(self):\n def trans(group):\n return group.groupby('B')['C'].sum().order()[:2]\n\n def trans2(group):\n grouped = group.groupby(df.reindex(group.index)['B'])\n return grouped.sum().order()[:2]\n\n df = DataFrame({'A': np.random.randint(0, 5, 1000),\n 'B': np.random.randint(0, 5, 1000),\n 'C': np.random.randn(1000)})\n\n result = df.groupby('A').apply(trans)\n exp = df.groupby('A')['C'].apply(trans2)\n assert_series_equal(result, exp)\n\n def test_apply_transform(self):\n grouped = self.ts.groupby(lambda x: x.month)\n result = grouped.apply(lambda x: x * 2)\n expected = grouped.transform(lambda x: x * 2)\n assert_series_equal(result, expected)\n\n def test_apply_multikey_corner(self):\n grouped = self.tsframe.groupby([lambda x: x.year,\n lambda x: x.month])\n\n def f(group):\n return group.sort('A')[-5:]\n\n result = grouped.apply(f)\n for key, group in grouped:\n assert_frame_equal(result.ix[key], f(group))\n\n def test_mutate_groups(self):\n\n # GH3380\n\n mydf = DataFrame({\n 'cat1' : ['a'] * 8 + ['b'] * 6,\n 'cat2' : ['c'] * 2 + ['d'] * 2 + ['e'] * 2 + ['f'] * 2 + ['c'] * 2 + ['d'] * 2 + ['e'] * 2,\n 'cat3' : lmap(lambda x: 'g%s' % x, lrange(1,15)),\n 'val' : np.random.randint(100, size=14),\n })\n\n def f_copy(x):\n x = x.copy()\n x['rank'] = x.val.rank(method='min')\n return x.groupby('cat2')['rank'].min()\n\n def f_no_copy(x):\n x['rank'] = x.val.rank(method='min')\n return x.groupby('cat2')['rank'].min()\n\n grpby_copy = mydf.groupby('cat1').apply(f_copy)\n grpby_no_copy = mydf.groupby('cat1').apply(f_no_copy)\n assert_series_equal(grpby_copy,grpby_no_copy)\n\n def test_no_mutate_but_looks_like(self):\n\n # GH 8467\n # first show's mutation indicator\n # second does not, but should yield the same results\n df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],\n 'value': range(9)})\n\n result1 = df.groupby('key', group_keys=True).apply(lambda x: x[:].key)\n result2 = df.groupby('key', group_keys=True).apply(lambda x: x.key)\n assert_series_equal(result1, result2)\n\n def test_apply_chunk_view(self):\n # Low level tinkering could be unsafe, make sure not\n df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],\n 'value': lrange(9)})\n\n # return view\n f = lambda x: x[:2]\n\n result = df.groupby('key', group_keys=False).apply(f)\n expected = df.take([0, 1, 3, 4, 6, 7])\n assert_frame_equal(result, expected)\n\n def test_apply_no_name_column_conflict(self):\n df = DataFrame({'name': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],\n 'name2': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],\n 'value': lrange(10)[::-1]})\n\n # it works! #2605\n grouped = df.groupby(['name', 'name2'])\n grouped.apply(lambda x: x.sort('value'))\n\n def test_groupby_series_indexed_differently(self):\n s1 = Series([5.0, -9.0, 4.0, 100., -5., 55., 6.7],\n index=Index(['a', 'b', 'c', 'd', 'e', 'f', 'g']))\n s2 = Series([1.0, 1.0, 4.0, 5.0, 5.0, 7.0],\n index=Index(['a', 'b', 'd', 'f', 'g', 'h']))\n\n grouped = s1.groupby(s2)\n agged = grouped.mean()\n exp = s1.groupby(s2.reindex(s1.index).get).mean()\n assert_series_equal(agged, exp)\n\n def test_groupby_with_hier_columns(self):\n tuples = list(zip(*[['bar', 'bar', 'baz', 'baz',\n 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two',\n 'one', 'two', 'one', 'two']]))\n index = MultiIndex.from_tuples(tuples)\n columns = MultiIndex.from_tuples([('A', 'cat'), ('B', 'dog'),\n ('B', 'cat'), ('A', 'dog')])\n df = DataFrame(np.random.randn(8, 4), index=index,\n columns=columns)\n\n result = df.groupby(level=0).mean()\n self.assertTrue(result.columns.equals(columns))\n\n result = df.groupby(level=0, axis=1).mean()\n self.assertTrue(result.index.equals(df.index))\n\n result = df.groupby(level=0).agg(np.mean)\n self.assertTrue(result.columns.equals(columns))\n\n result = df.groupby(level=0).apply(lambda x: x.mean())\n self.assertTrue(result.columns.equals(columns))\n\n result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))\n self.assertTrue(result.columns.equals(Index(['A', 'B'])))\n self.assertTrue(result.index.equals(df.index))\n\n # add a nuisance column\n sorted_columns, _ = columns.sortlevel(0)\n df['A', 'foo'] = 'bar'\n result = df.groupby(level=0).mean()\n self.assertTrue(result.columns.equals(df.columns[:-1]))\n\n def test_pass_args_kwargs(self):\n from numpy import percentile\n\n def f(x, q=None, axis=0):\n return percentile(x, q, axis=axis)\n g = lambda x: percentile(x, 80, axis=0)\n\n # Series\n ts_grouped = self.ts.groupby(lambda x: x.month)\n agg_result = ts_grouped.agg(percentile, 80, axis=0)\n apply_result = ts_grouped.apply(percentile, 80, axis=0)\n trans_result = ts_grouped.transform(percentile, 80, axis=0)\n\n agg_expected = ts_grouped.quantile(.8)\n trans_expected = ts_grouped.transform(g)\n\n assert_series_equal(apply_result, agg_expected)\n assert_series_equal(agg_result, agg_expected)\n assert_series_equal(trans_result, trans_expected)\n\n agg_result = ts_grouped.agg(f, q=80)\n apply_result = ts_grouped.apply(f, q=80)\n trans_result = ts_grouped.transform(f, q=80)\n assert_series_equal(agg_result, agg_expected)\n assert_series_equal(apply_result, agg_expected)\n assert_series_equal(trans_result, trans_expected)\n\n # DataFrame\n df_grouped = self.tsframe.groupby(lambda x: x.month)\n agg_result = df_grouped.agg(percentile, 80, axis=0)\n apply_result = df_grouped.apply(DataFrame.quantile, .8)\n expected = df_grouped.quantile(.8)\n assert_frame_equal(apply_result, expected)\n assert_frame_equal(agg_result, expected)\n\n agg_result = df_grouped.agg(f, q=80)\n apply_result = df_grouped.apply(DataFrame.quantile, q=.8)\n assert_frame_equal(agg_result, expected)\n assert_frame_equal(apply_result, expected)\n\n # def test_cython_na_bug(self):\n # values = np.random.randn(10)\n # shape = (5, 5)\n # label_list = [np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2], dtype=np.int32),\n # np.array([1, 2, 3, 4, 0, 1, 2, 3, 3, 4], dtype=np.int32)]\n\n # lib.group_aggregate(values, label_list, shape)\n\n def test_size(self):\n grouped = self.df.groupby(['A', 'B'])\n result = grouped.size()\n for key, group in grouped:\n self.assertEqual(result[key], len(group))\n\n grouped = self.df.groupby('A')\n result = grouped.size()\n for key, group in grouped:\n self.assertEqual(result[key], len(group))\n\n grouped = self.df.groupby('B')\n result = grouped.size()\n for key, group in grouped:\n self.assertEqual(result[key], len(group))\n\n def test_count(self):\n\n # GH5610\n # count counts non-nulls\n df = pd.DataFrame([[1, 2, 'foo'], [1, nan, 'bar'], [3, nan, nan]],\n columns=['A', 'B', 'C'])\n\n count_as = df.groupby('A').count()\n count_not_as = df.groupby('A', as_index=False).count()\n\n expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'], index=[1,3])\n expected.index.name='A'\n assert_frame_equal(count_not_as, expected.reset_index())\n assert_frame_equal(count_as, expected)\n\n count_B = df.groupby('A')['B'].count()\n assert_series_equal(count_B, expected['B'])\n\n def test_count_object(self):\n df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3,\n 'c': [2] * 3 + [3] * 3})\n result = df.groupby('c').a.count()\n expected = pd.Series([3, 3], index=[2, 3], name='a')\n tm.assert_series_equal(result, expected)\n\n df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3,\n 'c': [2] * 3 + [3] * 3})\n result = df.groupby('c').a.count()\n expected = pd.Series([1, 3], index=[2, 3], name='a')\n tm.assert_series_equal(result, expected)\n\n def test_count_cross_type(self): # GH8169\n vals = np.hstack((np.random.randint(0,5,(100,2)),\n np.random.randint(0,2,(100,2))))\n\n df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd'])\n df[df==2] = np.nan\n expected = df.groupby(['c', 'd']).count()\n\n for t in ['float32', 'object']:\n df['a'] = df['a'].astype(t)\n df['b'] = df['b'].astype(t)\n result = df.groupby(['c', 'd']).count()\n tm.assert_frame_equal(result, expected)\n\n def test_non_cython_api(self):\n\n # GH5610\n # non-cython calls should not include the grouper\n\n df = DataFrame([[1, 2, 'foo'], [1, nan, 'bar',], [3, nan, 'baz']], columns=['A', 'B','C'])\n g = df.groupby('A')\n gni = df.groupby('A',as_index=False)\n\n # mad\n expected = DataFrame([[0],[nan]],columns=['B'],index=[1,3])\n expected.index.name = 'A'\n result = g.mad()\n assert_frame_equal(result,expected)\n\n expected = DataFrame([[0.,0.],[0,nan]],columns=['A','B'],index=[0,1])\n result = gni.mad()\n assert_frame_equal(result,expected)\n\n # describe\n expected = DataFrame(dict(B = concat([df.loc[[0,1],'B'].describe(),df.loc[[2],'B'].describe()],keys=[1,3])))\n expected.index.names = ['A',None]\n result = g.describe()\n assert_frame_equal(result,expected)\n\n expected = concat([df.loc[[0,1],['A','B']].describe(),df.loc[[2],['A','B']].describe()],keys=[0,1])\n result = gni.describe()\n assert_frame_equal(result,expected)\n\n # any\n expected = DataFrame([[True, True],[False, True]],columns=['B','C'],index=[1,3])\n expected.index.name = 'A'\n result = g.any()\n assert_frame_equal(result,expected)\n\n # idxmax\n expected = DataFrame([[0],[nan]],columns=['B'],index=[1,3])\n expected.index.name = 'A'\n result = g.idxmax()\n assert_frame_equal(result,expected)\n\n def test_cython_api2(self):\n\n # this takes the fast apply path\n\n # cumsum (GH5614)\n df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=['A', 'B', 'C'])\n expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])\n result = df.groupby('A').cumsum()\n assert_frame_equal(result,expected)\n\n expected = DataFrame([[1, 2, np.nan], [2, np.nan, 9], [3, 4, 9]], columns=['A', 'B', 'C']).astype('float64')\n result = df.groupby('A', as_index=False).cumsum()\n assert_frame_equal(result,expected)\n\n def test_grouping_ndarray(self):\n grouped = self.df.groupby(self.df['A'].values)\n\n result = grouped.sum()\n expected = self.df.groupby('A').sum()\n assert_frame_equal(result, expected, check_names=False) # Note: no names when grouping by value\n\n def test_agg_consistency(self):\n # agg with ([]) and () not consistent\n # GH 6715\n\n def P1(a):\n try:\n return np.percentile(a.dropna(), q=1)\n except:\n return np.nan\n\n import datetime as dt\n df = DataFrame({'col1':[1,2,3,4],\n 'col2':[10,25,26,31],\n 'date':[dt.date(2013,2,10),dt.date(2013,2,10),dt.date(2013,2,11),dt.date(2013,2,11)]})\n\n g = df.groupby('date')\n\n expected = g.agg([P1])\n expected.columns = expected.columns.levels[0]\n\n result = g.agg(P1)\n assert_frame_equal(result, expected)\n\n def test_apply_typecast_fail(self):\n df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],\n 'c': np.tile(['a', 'b', 'c'], 2),\n 'v': np.arange(1., 7.)})\n\n def f(group):\n v = group['v']\n group['v2'] = (v - v.min()) / (v.max() - v.min())\n return group\n\n result = df.groupby('d').apply(f)\n\n expected = df.copy()\n expected['v2'] = np.tile([0., 0.5, 1], 2)\n\n assert_frame_equal(result, expected)\n\n def test_apply_multiindex_fail(self):\n index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],\n [1, 2, 3, 1, 2, 3]])\n df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],\n 'c': np.tile(['a', 'b', 'c'], 2),\n 'v': np.arange(1., 7.)}, index=index)\n\n def f(group):\n v = group['v']\n group['v2'] = (v - v.min()) / (v.max() - v.min())\n return group\n\n result = df.groupby('d').apply(f)\n\n expected = df.copy()\n expected['v2'] = np.tile([0., 0.5, 1], 2)\n\n assert_frame_equal(result, expected)\n\n def test_apply_corner(self):\n result = self.tsframe.groupby(lambda x: x.year).apply(lambda x: x * 2)\n expected = self.tsframe * 2\n assert_frame_equal(result, expected)\n\n def test_apply_without_copy(self):\n # GH 5545\n # returning a non-copy in an applied function fails\n\n data = DataFrame({'id_field' : [100, 100, 200, 300], 'category' : ['a','b','c','c'], 'value' : [1,2,3,4]})\n\n def filt1(x):\n if x.shape[0] == 1:\n return x.copy()\n else:\n return x[x.category == 'c']\n\n def filt2(x):\n if x.shape[0] == 1:\n return x\n else:\n return x[x.category == 'c']\n\n expected = data.groupby('id_field').apply(filt1)\n result = data.groupby('id_field').apply(filt2)\n assert_frame_equal(result,expected)\n\n def test_apply_use_categorical_name(self):\n from pandas import qcut\n cats = qcut(self.df.C, 4)\n\n def get_stats(group):\n return {'min': group.min(), 'max': group.max(),\n 'count': group.count(), 'mean': group.mean()}\n\n result = self.df.groupby(cats).D.apply(get_stats)\n self.assertEqual(result.index.names[0], 'C')\n\n def test_apply_corner_cases(self):\n # #535, can't use sliding iterator\n\n N = 1000\n labels = np.random.randint(0, 100, size=N)\n df = DataFrame({'key': labels,\n 'value1': np.random.randn(N),\n 'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})\n\n grouped = df.groupby('key')\n\n def f(g):\n g['value3'] = g['value1'] * 2\n return g\n\n result = grouped.apply(f)\n self.assertTrue('value3' in result)\n\n def test_transform_mixed_type(self):\n index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],\n [1, 2, 3, 1, 2, 3]])\n df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],\n 'c': np.tile(['a', 'b', 'c'], 2),\n 'v': np.arange(1., 7.)}, index=index)\n\n def f(group):\n group['g'] = group['d'] * 2\n return group[:1]\n\n grouped = df.groupby('c')\n result = grouped.apply(f)\n\n self.assertEqual(result['d'].dtype, np.float64)\n\n # this is by definition a mutating operation!\n with option_context('mode.chained_assignment',None):\n for key, group in grouped:\n res = f(group)\n assert_frame_equal(res, result.ix[key])\n\n def test_groupby_wrong_multi_labels(self):\n from pandas import read_csv\n data = \"\"\"index,foo,bar,baz,spam,data\n0,foo1,bar1,baz1,spam2,20\n1,foo1,bar2,baz1,spam3,30\n2,foo2,bar2,baz1,spam2,40\n3,foo1,bar1,baz2,spam1,50\n4,foo3,bar1,baz2,spam1,60\"\"\"\n data = read_csv(StringIO(data), index_col=0)\n\n grouped = data.groupby(['foo', 'bar', 'baz', 'spam'])\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n assert_frame_equal(result, expected)\n\n def test_groupby_series_with_name(self):\n result = self.df.groupby(self.df['A']).mean()\n result2 = self.df.groupby(self.df['A'], as_index=False).mean()\n self.assertEqual(result.index.name, 'A')\n self.assertIn('A', result2)\n\n result = self.df.groupby([self.df['A'], self.df['B']]).mean()\n result2 = self.df.groupby([self.df['A'], self.df['B']],\n as_index=False).mean()\n self.assertEqual(result.index.names, ('A', 'B'))\n self.assertIn('A', result2)\n self.assertIn('B', result2)\n\n def test_seriesgroupby_name_attr(self):\n # GH 6265\n result = self.df.groupby('A')['C']\n self.assertEqual(result.count().name, 'C')\n self.assertEqual(result.mean().name, 'C')\n\n testFunc = lambda x: np.sum(x)*2\n self.assertEqual(result.agg(testFunc).name, 'C')\n\n def test_groupby_name_propagation(self):\n # GH 6124\n def summarize(df, name=None):\n return Series({\n 'count': 1,\n 'mean': 2,\n 'omissions': 3,\n }, name=name)\n\n def summarize_random_name(df):\n # Provide a different name for each Series. In this case, groupby\n # should not attempt to propagate the Series name since they are\n # inconsistent.\n return Series({\n 'count': 1,\n 'mean': 2,\n 'omissions': 3,\n }, name=df.iloc[0]['A'])\n\n metrics = self.df.groupby('A').apply(summarize)\n self.assertEqual(metrics.columns.name, None)\n metrics = self.df.groupby('A').apply(summarize, 'metrics')\n self.assertEqual(metrics.columns.name, 'metrics')\n metrics = self.df.groupby('A').apply(summarize_random_name)\n self.assertEqual(metrics.columns.name, None)\n\n def test_groupby_nonstring_columns(self):\n df = DataFrame([np.arange(10) for x in range(10)])\n grouped = df.groupby(0)\n result = grouped.mean()\n expected = df.groupby(df[0]).mean()\n assert_frame_equal(result, expected)\n\n def test_cython_grouper_series_bug_noncontig(self):\n arr = np.empty((100, 100))\n arr.fill(np.nan)\n obj = Series(arr[:, 0], index=lrange(100))\n inds = np.tile(lrange(10), 10)\n\n result = obj.groupby(inds).agg(Series.median)\n self.assertTrue(result.isnull().all())\n\n def test_series_grouper_noncontig_index(self):\n index = Index(tm.rands_array(10, 100))\n\n values = Series(np.random.randn(50), index=index[::2])\n labels = np.random.randint(0, 5, 50)\n\n # it works!\n grouped = values.groupby(labels)\n\n # accessing the index elements causes segfault\n f = lambda x: len(set(map(id, x.index)))\n grouped.agg(f)\n\n def test_convert_objects_leave_decimal_alone(self):\n\n from decimal import Decimal\n\n s = Series(lrange(5))\n labels = np.array(['a', 'b', 'c', 'd', 'e'], dtype='O')\n\n def convert_fast(x):\n return Decimal(str(x.mean()))\n\n def convert_force_pure(x):\n # base will be length 0\n assert(len(x.base) > 0)\n return Decimal(str(x.mean()))\n\n grouped = s.groupby(labels)\n\n result = grouped.agg(convert_fast)\n self.assertEqual(result.dtype, np.object_)\n tm.assert_isinstance(result[0], Decimal)\n\n result = grouped.agg(convert_force_pure)\n self.assertEqual(result.dtype, np.object_)\n tm.assert_isinstance(result[0], Decimal)\n\n def test_fast_apply(self):\n # make sure that fast apply is correctly called\n # rather than raising any kind of error\n # otherwise the python path will be callsed\n # which slows things down\n N = 1000\n labels = np.random.randint(0, 2000, size=N)\n labels2 = np.random.randint(0, 3, size=N)\n df = DataFrame({'key': labels,\n 'key2': labels2,\n 'value1': np.random.randn(N),\n 'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})\n def f(g):\n return 1\n\n g = df.groupby(['key', 'key2'])\n\n grouper = g.grouper\n\n splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)\n group_keys = grouper._get_group_keys()\n\n values, mutated = splitter.fast_apply(f, group_keys)\n self.assertFalse(mutated)\n\n def test_apply_with_mixed_dtype(self):\n # GH3480, apply with mixed dtype on axis=1 breaks in 0.11\n df = DataFrame({'foo1' : ['one', 'two', 'two', 'three', 'one', 'two'],\n 'foo2' : np.random.randn(6)})\n result = df.apply(lambda x: x, axis=1)\n assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())\n\n\n # GH 3610 incorrect dtype conversion with as_index=False\n df = DataFrame({\"c1\" : [1,2,6,6,8]})\n df[\"c2\"] = df.c1/2.0\n result1 = df.groupby(\"c2\").mean().reset_index().c2\n result2 = df.groupby(\"c2\", as_index=False).mean().c2\n assert_series_equal(result1,result2)\n\n def test_groupby_aggregation_mixed_dtype(self):\n\n # GH 6212\n expected = DataFrame({\n 'v1': [5,5,7,np.nan,3,3,4,1],\n 'v2': [55,55,77,np.nan,33,33,44,11]},\n index=MultiIndex.from_tuples([(1,95),(1,99),(2,95),(2,99),('big','damp'),\n ('blue','dry'),('red','red'),('red','wet')],\n names=['by1','by2']))\n\n df = DataFrame({\n 'v1': [1,3,5,7,8,3,5,np.nan,4,5,7,9],\n 'v2': [11,33,55,77,88,33,55,np.nan,44,55,77,99],\n 'by1': [\"red\", \"blue\", 1, 2, np.nan, \"big\", 1, 2, \"red\", 1, np.nan, 12],\n 'by2': [\"wet\", \"dry\", 99, 95, np.nan, \"damp\", 95, 99, \"red\", 99, np.nan,\n np.nan]\n })\n\n g = df.groupby(['by1','by2'])\n result = g[['v1','v2']].mean()\n assert_frame_equal(result,expected)\n\n def test_groupby_dtype_inference_empty(self):\n # GH 6733\n df = DataFrame({'x': [], 'range': np.arange(0,dtype='int64')})\n result = df.groupby('x').first()\n expected = DataFrame({'range' : Series([],index=Index([],name='x'),dtype='int64') })\n assert_frame_equal(result,expected,by_blocks=True)\n\n def test_groupby_list_infer_array_like(self):\n result = self.df.groupby(list(self.df['A'])).mean()\n expected = self.df.groupby(self.df['A']).mean()\n assert_frame_equal(result, expected, check_names=False)\n\n self.assertRaises(Exception, self.df.groupby, list(self.df['A'][:-1]))\n\n # pathological case of ambiguity\n df = DataFrame({'foo': [0, 1], 'bar': [3, 4],\n 'val': np.random.randn(2)})\n\n result = df.groupby(['foo', 'bar']).mean()\n expected = df.groupby([df['foo'], df['bar']]).mean()[['val']]\n\n def test_dictify(self):\n dict(iter(self.df.groupby('A')))\n dict(iter(self.df.groupby(['A', 'B'])))\n dict(iter(self.df['C'].groupby(self.df['A'])))\n dict(iter(self.df['C'].groupby([self.df['A'], self.df['B']])))\n dict(iter(self.df.groupby('A')['C']))\n dict(iter(self.df.groupby(['A', 'B'])['C']))\n\n def test_sparse_friendly(self):\n sdf = self.df[['C', 'D']].to_sparse()\n panel = tm.makePanel()\n tm.add_nans(panel)\n\n def _check_work(gp):\n gp.mean()\n gp.agg(np.mean)\n dict(iter(gp))\n\n # it works!\n _check_work(sdf.groupby(lambda x: x // 2))\n _check_work(sdf['C'].groupby(lambda x: x // 2))\n _check_work(sdf.groupby(self.df['A']))\n\n # do this someday\n # _check_work(panel.groupby(lambda x: x.month, axis=1))\n\n def test_panel_groupby(self):\n self.panel = tm.makePanel()\n tm.add_nans(self.panel)\n grouped = self.panel.groupby({'ItemA': 0, 'ItemB': 0, 'ItemC': 1},\n axis='items')\n agged = grouped.mean()\n agged2 = grouped.agg(lambda x: x.mean('items'))\n\n tm.assert_panel_equal(agged, agged2)\n\n self.assert_numpy_array_equal(agged.items, [0, 1])\n\n grouped = self.panel.groupby(lambda x: x.month, axis='major')\n agged = grouped.mean()\n\n self.assert_numpy_array_equal(agged.major_axis, sorted(list(set(self.panel.major_axis.month))))\n\n grouped = self.panel.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},\n axis='minor')\n agged = grouped.mean()\n self.assert_numpy_array_equal(agged.minor_axis, [0, 1])\n\n def test_numpy_groupby(self):\n from pandas.core.groupby import numpy_groupby\n\n data = np.random.randn(100, 100)\n labels = np.random.randint(0, 10, size=100)\n\n df = DataFrame(data)\n\n result = df.groupby(labels).sum().values\n expected = numpy_groupby(data, labels)\n assert_almost_equal(result, expected)\n\n result = df.groupby(labels, axis=1).sum().values\n expected = numpy_groupby(data, labels, axis=1)\n assert_almost_equal(result, expected)\n\n def test_groupby_2d_malformed(self):\n d = DataFrame(index=lrange(2))\n d['group'] = ['g1', 'g2']\n d['zeros'] = [0, 0]\n d['ones'] = [1, 1]\n d['label'] = ['l1', 'l2']\n tmp = d.groupby(['group']).mean()\n res_values = np.array([[0., 1.], [0., 1.]])\n self.assert_numpy_array_equal(tmp.columns, ['zeros', 'ones'])\n self.assert_numpy_array_equal(tmp.values, res_values)\n\n def test_int32_overflow(self):\n B = np.concatenate((np.arange(10000), np.arange(10000),\n np.arange(5000)))\n A = np.arange(25000)\n df = DataFrame({'A': A, 'B': B,\n 'C': A, 'D': B,\n 'E': np.random.randn(25000)})\n\n left = df.groupby(['A', 'B', 'C', 'D']).sum()\n right = df.groupby(['D', 'C', 'B', 'A']).sum()\n self.assertEqual(len(left), len(right))\n\n def test_int64_overflow(self):\n B = np.concatenate((np.arange(1000), np.arange(1000),\n np.arange(500)))\n A = np.arange(2500)\n df = DataFrame({'A': A, 'B': B,\n 'C': A, 'D': B,\n 'E': A, 'F': B,\n 'G': A, 'H': B,\n 'values': np.random.randn(2500)})\n\n lg = df.groupby(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])\n rg = df.groupby(['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'])\n\n left = lg.sum()['values']\n right = rg.sum()['values']\n\n exp_index, _ = left.index.sortlevel(0)\n self.assertTrue(left.index.equals(exp_index))\n\n exp_index, _ = right.index.sortlevel(0)\n self.assertTrue(right.index.equals(exp_index))\n\n tups = list(map(tuple, df[['A', 'B', 'C', 'D',\n 'E', 'F', 'G', 'H']].values))\n tups = com._asarray_tuplesafe(tups)\n expected = df.groupby(tups).sum()['values']\n\n for k, v in compat.iteritems(expected):\n self.assertEqual(left[k], right[k[::-1]])\n self.assertEqual(left[k], v)\n self.assertEqual(len(left), len(right))\n\n def test_groupby_sort_multi(self):\n df = DataFrame({'a': ['foo', 'bar', 'baz'],\n 'b': [3, 2, 1],\n 'c': [0, 1, 2],\n 'd': np.random.randn(3)})\n\n tups = lmap(tuple, df[['a', 'b', 'c']].values)\n tups = com._asarray_tuplesafe(tups)\n result = df.groupby(['a', 'b', 'c'], sort=True).sum()\n self.assert_numpy_array_equal(result.index.values,\n tups[[1, 2, 0]])\n\n tups = lmap(tuple, df[['c', 'a', 'b']].values)\n tups = com._asarray_tuplesafe(tups)\n result = df.groupby(['c', 'a', 'b'], sort=True).sum()\n self.assert_numpy_array_equal(result.index.values, tups)\n\n tups = lmap(tuple, df[['b', 'c', 'a']].values)\n tups = com._asarray_tuplesafe(tups)\n result = df.groupby(['b', 'c', 'a'], sort=True).sum()\n self.assert_numpy_array_equal(result.index.values,\n tups[[2, 1, 0]])\n\n df = DataFrame({'a': [0, 1, 2, 0, 1, 2],\n 'b': [0, 0, 0, 1, 1, 1],\n 'd': np.random.randn(6)})\n grouped = df.groupby(['a', 'b'])['d']\n result = grouped.sum()\n _check_groupby(df, result, ['a', 'b'], 'd')\n\n def test_intercept_builtin_sum(self):\n s = Series([1., 2., np.nan, 3.])\n grouped = s.groupby([0, 1, 2, 2])\n\n result = grouped.agg(builtins.sum)\n result2 = grouped.apply(builtins.sum)\n expected = grouped.sum()\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n def test_column_select_via_attr(self):\n result = self.df.groupby('A').C.sum()\n expected = self.df.groupby('A')['C'].sum()\n assert_series_equal(result, expected)\n\n self.df['mean'] = 1.5\n result = self.df.groupby('A').mean()\n expected = self.df.groupby('A').agg(np.mean)\n assert_frame_equal(result, expected)\n\n def test_rank_apply(self):\n lev1 = tm.rands_array(10, 100)\n lev2 = tm.rands_array(10, 130)\n lab1 = np.random.randint(0, 100, size=500)\n lab2 = np.random.randint(0, 130, size=500)\n\n df = DataFrame({'value': np.random.randn(500),\n 'key1': lev1.take(lab1),\n 'key2': lev2.take(lab2)})\n\n result = df.groupby(['key1', 'key2']).value.rank()\n\n expected = []\n for key, piece in df.groupby(['key1', 'key2']):\n expected.append(piece.value.rank())\n expected = concat(expected, axis=0)\n expected = expected.reindex(result.index)\n assert_series_equal(result, expected)\n\n result = df.groupby(['key1', 'key2']).value.rank(pct=True)\n\n expected = []\n for key, piece in df.groupby(['key1', 'key2']):\n expected.append(piece.value.rank(pct=True))\n expected = concat(expected, axis=0)\n expected = expected.reindex(result.index)\n assert_series_equal(result, expected)\n\n def test_dont_clobber_name_column(self):\n df = DataFrame({'key': ['a', 'a', 'a', 'b', 'b', 'b'],\n 'name': ['foo', 'bar', 'baz'] * 2})\n\n result = df.groupby('key').apply(lambda x: x)\n assert_frame_equal(result, df)\n\n def test_skip_group_keys(self):\n from pandas import concat\n\n tsf = tm.makeTimeDataFrame()\n\n grouped = tsf.groupby(lambda x: x.month, group_keys=False)\n result = grouped.apply(lambda x: x.sort_index(by='A')[:3])\n\n pieces = []\n for key, group in grouped:\n pieces.append(group.sort_index(by='A')[:3])\n\n expected = concat(pieces)\n assert_frame_equal(result, expected)\n\n grouped = tsf['A'].groupby(lambda x: x.month, group_keys=False)\n result = grouped.apply(lambda x: x.order()[:3])\n\n pieces = []\n for key, group in grouped:\n pieces.append(group.order()[:3])\n\n expected = concat(pieces)\n assert_series_equal(result, expected)\n\n def test_no_nonsense_name(self):\n # GH #995\n s = self.frame['C'].copy()\n s.name = None\n\n result = s.groupby(self.frame['A']).agg(np.sum)\n self.assertIsNone(result.name)\n\n def test_wrap_agg_out(self):\n grouped = self.three_group.groupby(['A', 'B'])\n\n def func(ser):\n if ser.dtype == np.object:\n raise TypeError\n else:\n return ser.sum()\n result = grouped.aggregate(func)\n exp_grouped = self.three_group.ix[:, self.three_group.columns != 'C']\n expected = exp_grouped.groupby(['A', 'B']).aggregate(func)\n assert_frame_equal(result, expected)\n\n def test_multifunc_sum_bug(self):\n # GH #1065\n x = DataFrame(np.arange(9).reshape(3, 3))\n x['test'] = 0\n x['fl'] = [1.3, 1.5, 1.6]\n\n grouped = x.groupby('test')\n result = grouped.agg({'fl': 'sum', 2: 'size'})\n self.assertEqual(result['fl'].dtype, np.float64)\n\n def test_handle_dict_return_value(self):\n def f(group):\n return {'min': group.min(), 'max': group.max()}\n\n def g(group):\n return Series({'min': group.min(), 'max': group.max()})\n\n result = self.df.groupby('A')['C'].apply(f)\n expected = self.df.groupby('A')['C'].apply(g)\n\n tm.assert_isinstance(result, Series)\n assert_series_equal(result, expected)\n\n def test_getitem_list_of_columns(self):\n df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'foo', 'foo'],\n 'B': ['one', 'one', 'two', 'three',\n 'two', 'two', 'one', 'three'],\n 'C': np.random.randn(8),\n 'D': np.random.randn(8),\n 'E': np.random.randn(8)})\n\n result = df.groupby('A')[['C', 'D']].mean()\n result2 = df.groupby('A')['C', 'D'].mean()\n result3 = df.groupby('A')[df.columns[2:4]].mean()\n\n expected = df.ix[:, ['A', 'C', 'D']].groupby('A').mean()\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n assert_frame_equal(result3, expected)\n\n def test_agg_multiple_functions_maintain_order(self):\n # GH #610\n funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)]\n result = self.df.groupby('A')['C'].agg(funcs)\n exp_cols = ['mean', 'max', 'min']\n\n self.assert_numpy_array_equal(result.columns, exp_cols)\n\n def test_multiple_functions_tuples_and_non_tuples(self):\n # #1359\n\n funcs = [('foo', 'mean'), 'std']\n ex_funcs = [('foo', 'mean'), ('std', 'std')]\n\n result = self.df.groupby('A')['C'].agg(funcs)\n expected = self.df.groupby('A')['C'].agg(ex_funcs)\n assert_frame_equal(result, expected)\n\n result = self.df.groupby('A').agg(funcs)\n expected = self.df.groupby('A').agg(ex_funcs)\n assert_frame_equal(result, expected)\n\n def test_agg_multiple_functions_too_many_lambdas(self):\n grouped = self.df.groupby('A')\n funcs = ['mean', lambda x: x.mean(), lambda x: x.std()]\n\n self.assertRaises(SpecificationError, grouped.agg, funcs)\n\n def test_more_flexible_frame_multi_function(self):\n from pandas import concat\n\n grouped = self.df.groupby('A')\n\n exmean = grouped.agg(OrderedDict([['C', np.mean], ['D', np.mean]]))\n exstd = grouped.agg(OrderedDict([['C', np.std], ['D', np.std]]))\n\n expected = concat([exmean, exstd], keys=['mean', 'std'], axis=1)\n expected = expected.swaplevel(0, 1, axis=1).sortlevel(0, axis=1)\n\n d = OrderedDict([['C', [np.mean, np.std]], ['D', [np.mean, np.std]]])\n result = grouped.aggregate(d)\n\n assert_frame_equal(result, expected)\n\n # be careful\n result = grouped.aggregate(OrderedDict([['C', np.mean],\n ['D', [np.mean, np.std]]]))\n expected = grouped.aggregate(OrderedDict([['C', np.mean],\n ['D', [np.mean, np.std]]]))\n assert_frame_equal(result, expected)\n\n def foo(x):\n return np.mean(x)\n\n def bar(x):\n return np.std(x, ddof=1)\n d = OrderedDict([['C', np.mean],\n ['D', OrderedDict([['foo', np.mean],\n ['bar', np.std]])]])\n result = grouped.aggregate(d)\n\n d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]])\n expected = grouped.aggregate(d)\n\n assert_frame_equal(result, expected)\n\n def test_multi_function_flexible_mix(self):\n # GH #1268\n grouped = self.df.groupby('A')\n\n d = OrderedDict([['C', OrderedDict([['foo', 'mean'],\n [\n 'bar', 'std']])],\n ['D', 'sum']])\n result = grouped.aggregate(d)\n d2 = OrderedDict([['C', OrderedDict([['foo', 'mean'],\n [\n 'bar', 'std']])],\n ['D', ['sum']]])\n result2 = grouped.aggregate(d2)\n\n d3 = OrderedDict([['C', OrderedDict([['foo', 'mean'],\n [\n 'bar', 'std']])],\n ['D', {'sum': 'sum'}]])\n expected = grouped.aggregate(d3)\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n def test_agg_callables(self):\n # GH 7929\n df = DataFrame({'foo' : [1,2], 'bar' :[3,4]}).astype(np.int64)\n\n class fn_class(object):\n def __call__(self, x):\n return sum(x)\n\n equiv_callables = [sum, np.sum,\n lambda x: sum(x),\n lambda x: x.sum(),\n partial(sum), fn_class()]\n\n expected = df.groupby(\"foo\").agg(sum)\n for ecall in equiv_callables:\n result = df.groupby('foo').agg(ecall)\n assert_frame_equal(result, expected)\n\n def test_set_group_name(self):\n def f(group):\n assert group.name is not None\n return group\n\n def freduce(group):\n assert group.name is not None\n return group.sum()\n\n def foo(x):\n return freduce(x)\n\n def _check_all(grouped):\n # make sure all these work\n grouped.apply(f)\n grouped.aggregate(freduce)\n grouped.aggregate({'C': freduce, 'D': freduce})\n grouped.transform(f)\n\n grouped['C'].apply(f)\n grouped['C'].aggregate(freduce)\n grouped['C'].aggregate([freduce, foo])\n grouped['C'].transform(f)\n\n _check_all(self.df.groupby('A'))\n _check_all(self.df.groupby(['A', 'B']))\n\n def test_no_dummy_key_names(self):\n # GH #1291\n\n result = self.df.groupby(self.df['A'].values).sum()\n self.assertIsNone(result.index.name)\n\n result = self.df.groupby([self.df['A'].values,\n self.df['B'].values]).sum()\n self.assertEqual(result.index.names, (None, None))\n\n def test_groupby_categorical(self):\n levels = ['foo', 'bar', 'baz', 'qux']\n codes = np.random.randint(0, 4, size=100)\n\n cats = Categorical.from_codes(codes, levels, name='myfactor')\n\n data = DataFrame(np.random.randn(100, 4))\n\n result = data.groupby(cats).mean()\n\n expected = data.groupby(np.asarray(cats)).mean()\n expected = expected.reindex(levels)\n expected.index.name = 'myfactor'\n\n assert_frame_equal(result, expected)\n self.assertEqual(result.index.name, cats.name)\n\n grouped = data.groupby(cats)\n desc_result = grouped.describe()\n\n idx = cats.codes.argsort()\n ord_labels = np.asarray(cats).take(idx)\n ord_data = data.take(idx)\n expected = ord_data.groupby(ord_labels, sort=False).describe()\n expected.index.names = ['myfactor', None]\n assert_frame_equal(desc_result, expected)\n\n def test_groupby_groups_datetimeindex(self):\n # #1430\n from pandas.tseries.api import DatetimeIndex\n periods = 1000\n ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)\n df = DataFrame({'high': np.arange(periods),\n 'low': np.arange(periods)}, index=ind)\n grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))\n\n # it works!\n groups = grouped.groups\n tm.assert_isinstance(list(groups.keys())[0], datetime)\n\n def test_groupby_groups_datetimeindex_tz(self):\n # GH 3950\n dates = ['2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00',\n '2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00']\n df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],\n 'datetime': dates,\n 'value1': np.arange(6,dtype='int64'),\n 'value2': [1, 2] * 3})\n df['datetime'] = df['datetime'].apply(lambda d: Timestamp(d, tz='US/Pacific'))\n\n exp_idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 07:00:00',\n '2011-07-19 08:00:00', '2011-07-19 08:00:00',\n '2011-07-19 09:00:00', '2011-07-19 09:00:00'],\n tz='US/Pacific', name='datetime')\n exp_idx2 = Index(['a', 'b'] * 3, name='label')\n exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])\n expected = DataFrame({'value1': [0, 3, 1, 4, 2, 5], 'value2': [1, 2, 2, 1, 1, 2]},\n index=exp_idx, columns=['value1', 'value2'])\n\n result = df.groupby(['datetime', 'label']).sum()\n assert_frame_equal(result, expected)\n\n # by level\n didx = pd.DatetimeIndex(dates, tz='Asia/Tokyo')\n df = DataFrame({'value1': np.arange(6,dtype='int64'),\n 'value2': [1, 2, 3, 1, 2, 3]},\n index=didx)\n\n exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',\n '2011-07-19 09:00:00'], tz='Asia/Tokyo')\n expected = DataFrame({'value1': [3, 5, 7], 'value2': [2, 4, 6]},\n index=exp_idx, columns=['value1', 'value2'])\n\n result = df.groupby(level=0).sum()\n assert_frame_equal(result, expected)\n\n def test_groupby_reindex_inside_function(self):\n from pandas.tseries.api import DatetimeIndex\n\n periods = 1000\n ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)\n df = DataFrame({'high': np.arange(\n periods), 'low': np.arange(periods)}, index=ind)\n\n def agg_before(hour, func, fix=False):\n \"\"\"\n Run an aggregate func on the subset of data.\n \"\"\"\n def _func(data):\n d = data.select(lambda x: x.hour < 11).dropna()\n if fix:\n data[data.index[0]]\n if len(d) == 0:\n return None\n return func(d)\n return _func\n\n def afunc(data):\n d = data.select(lambda x: x.hour < 11).dropna()\n return np.max(d)\n\n grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))\n closure_bad = grouped.agg({'high': agg_before(11, np.max)})\n closure_good = grouped.agg({'high': agg_before(11, np.max, True)})\n\n assert_frame_equal(closure_bad, closure_good)\n\n def test_multiindex_columns_empty_level(self):\n l = [['count', 'values'], ['to filter', '']]\n midx = MultiIndex.from_tuples(l)\n\n df = DataFrame([[long(1), 'A']], columns=midx)\n\n grouped = df.groupby('to filter').groups\n self.assert_numpy_array_equal(grouped['A'], [0])\n\n grouped = df.groupby([('to filter', '')]).groups\n self.assert_numpy_array_equal(grouped['A'], [0])\n\n df = DataFrame([[long(1), 'A'], [long(2), 'B']], columns=midx)\n\n expected = df.groupby('to filter').groups\n result = df.groupby([('to filter', '')]).groups\n self.assertEqual(result, expected)\n\n df = DataFrame([[long(1), 'A'], [long(2), 'A']], columns=midx)\n\n expected = df.groupby('to filter').groups\n result = df.groupby([('to filter', '')]).groups\n self.assertEqual(result, expected)\n\n def test_cython_median(self):\n df = DataFrame(np.random.randn(1000))\n df.values[::2] = np.nan\n\n labels = np.random.randint(0, 50, size=1000).astype(float)\n labels[::17] = np.nan\n\n result = df.groupby(labels).median()\n exp = df.groupby(labels).agg(nanops.nanmedian)\n assert_frame_equal(result, exp)\n\n df = DataFrame(np.random.randn(1000, 5))\n rs = df.groupby(labels).agg(np.median)\n xp = df.groupby(labels).median()\n assert_frame_equal(rs, xp)\n\n def test_groupby_categorical_no_compress(self):\n data = Series(np.random.randn(9))\n\n codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])\n cats = Categorical.from_codes(codes, [0, 1, 2])\n\n result = data.groupby(cats).mean()\n exp = data.groupby(codes).mean()\n assert_series_equal(result, exp)\n\n codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])\n cats = Categorical.from_codes(codes, [0, 1, 2, 3])\n\n result = data.groupby(cats).mean()\n exp = data.groupby(codes).mean().reindex(cats.categories)\n assert_series_equal(result, exp)\n\n cats = Categorical([\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\"],\n categories=[\"a\",\"b\",\"c\",\"d\"])\n data = DataFrame({\"a\":[1,1,1,2,2,2,3,4,5], \"b\":cats})\n\n result = data.groupby(\"b\").mean()\n result = result[\"a\"].values\n exp = np.array([1,2,4,np.nan])\n self.assert_numpy_array_equivalent(result, exp)\n\n def test_groupby_first_datetime64(self):\n df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)])\n df[1] = df[1].view('M8[ns]')\n\n self.assertTrue(issubclass(df[1].dtype.type, np.datetime64))\n\n result = df.groupby(level=0).first()\n got_dt = result[1].dtype\n self.assertTrue(issubclass(got_dt.type, np.datetime64))\n\n result = df[1].groupby(level=0).first()\n got_dt = result.dtype\n self.assertTrue(issubclass(got_dt.type, np.datetime64))\n\n def test_groupby_max_datetime64(self):\n # GH 5869\n # datetimelike dtype conversion from int\n df = DataFrame(dict(A = Timestamp('20130101'), B = np.arange(5)))\n expected = df.groupby('A')['A'].apply(lambda x: x.max())\n result = df.groupby('A')['A'].max()\n assert_series_equal(result,expected)\n\n def test_groupby_datetime64_32_bit(self):\n # GH 6410 / numpy 4328\n # 32-bit under 1.9-dev indexing issue\n\n df = DataFrame({\"A\": range(2), \"B\": [pd.Timestamp('2000-01-1')]*2})\n result = df.groupby(\"A\")[\"B\"].transform(min)\n expected = Series([pd.Timestamp('2000-01-1')]*2)\n assert_series_equal(result,expected)\n\n def test_groupby_categorical_unequal_len(self):\n import pandas as pd\n #GH3011\n series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])\n # The raises only happens with categorical, not with series of types category\n bins = pd.cut(series.dropna().values, 4)\n\n # len(bins) != len(series) here\n self.assertRaises(ValueError,lambda : series.groupby(bins).mean())\n\n def test_gb_apply_list_of_unequal_len_arrays(self):\n\n # GH1738\n df = DataFrame({'group1': ['a','a','a','b','b','b','a','a','a','b','b','b'],\n 'group2': ['c','c','d','d','d','e','c','c','d','d','d','e'],\n 'weight': [1.1,2,3,4,5,6,2,4,6,8,1,2],\n 'value': [7.1,8,9,10,11,12,8,7,6,5,4,3]\n })\n df = df.set_index(['group1', 'group2'])\n df_grouped = df.groupby(level=['group1','group2'], sort=True)\n\n def noddy(value, weight):\n out = np.array( value * weight ).repeat(3)\n return out\n\n # the kernel function returns arrays of unequal length\n # pandas sniffs the first one, sees it's an array and not\n # a list, and assumed the rest are of equal length\n # and so tries a vstack\n\n # don't die\n no_toes = df_grouped.apply(lambda x: noddy(x.value, x.weight ))\n\n def test_groupby_with_empty(self):\n import pandas as pd\n index = pd.DatetimeIndex(())\n data = ()\n series = pd.Series(data, index)\n grouper = pd.tseries.resample.TimeGrouper('D')\n grouped = series.groupby(grouper)\n assert next(iter(grouped), None) is None\n\n def test_groupby_with_timegrouper(self):\n # GH 4161\n # TimeGrouper requires a sorted index\n # also verifies that the resultant index has the correct name\n import datetime as DT\n df_original = DataFrame({\n 'Buyer': 'Carl Carl Carl Carl Joe Carl'.split(),\n 'Quantity': [18,3,5,1,9,3],\n 'Date' : [\n DT.datetime(2013,9,1,13,0),\n DT.datetime(2013,9,1,13,5),\n DT.datetime(2013,10,1,20,0),\n DT.datetime(2013,10,3,10,0),\n DT.datetime(2013,12,2,12,0),\n DT.datetime(2013,9,2,14,0),\n ]})\n\n # GH 6908 change target column's order\n df_reordered = df_original.sort(columns='Quantity')\n\n for df in [df_original, df_reordered]:\n df = df.set_index(['Date'])\n\n expected = DataFrame({ 'Quantity' : np.nan },\n index=date_range('20130901 13:00:00','20131205 13:00:00',\n freq='5D',name='Date',closed='left'))\n expected.iloc[[0,6,18],0] = np.array([24.,6.,9.],dtype='float64')\n\n result1 = df.resample('5D',how=sum)\n assert_frame_equal(result1, expected)\n\n df_sorted = df.sort_index()\n result2 = df_sorted.groupby(pd.TimeGrouper(freq='5D')).sum()\n assert_frame_equal(result2, expected)\n\n result3 = df.groupby(pd.TimeGrouper(freq='5D')).sum()\n assert_frame_equal(result3, expected)\n\n def test_groupby_with_timegrouper_methods(self):\n # GH 3881\n # make sure API of timegrouper conforms\n\n import datetime as DT\n df_original = pd.DataFrame({\n 'Branch' : 'A A A A A B'.split(),\n 'Buyer': 'Carl Mark Carl Joe Joe Carl'.split(),\n 'Quantity': [1,3,5,8,9,3],\n 'Date' : [\n DT.datetime(2013,1,1,13,0),\n DT.datetime(2013,1,1,13,5),\n DT.datetime(2013,10,1,20,0),\n DT.datetime(2013,10,2,10,0),\n DT.datetime(2013,12,2,12,0),\n DT.datetime(2013,12,2,14,0),\n ]})\n\n df_sorted = df_original.sort(columns='Quantity', ascending=False)\n\n for df in [df_original, df_sorted]:\n df = df.set_index('Date', drop=False)\n g = df.groupby(pd.TimeGrouper('6M'))\n self.assertTrue(g.group_keys)\n self.assertTrue(isinstance(g.grouper,pd.core.groupby.BinGrouper))\n groups = g.groups\n self.assertTrue(isinstance(groups,dict))\n self.assertTrue(len(groups) == 3)\n\n def test_timegrouper_with_reg_groups(self):\n\n # GH 3794\n # allow combinateion of timegrouper/reg groups\n\n import datetime as DT\n\n df_original = DataFrame({\n 'Branch' : 'A A A A A A A B'.split(),\n 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),\n 'Quantity': [1,3,5,1,8,1,9,3],\n 'Date' : [\n DT.datetime(2013,1,1,13,0),\n DT.datetime(2013,1,1,13,5),\n DT.datetime(2013,10,1,20,0),\n DT.datetime(2013,10,2,10,0),\n DT.datetime(2013,10,1,20,0),\n DT.datetime(2013,10,2,10,0),\n DT.datetime(2013,12,2,12,0),\n DT.datetime(2013,12,2,14,0),\n ]}).set_index('Date')\n\n df_sorted = df_original.sort(columns='Quantity', ascending=False)\n\n for df in [df_original, df_sorted]:\n expected = DataFrame({\n 'Buyer': 'Carl Joe Mark'.split(),\n 'Quantity': [10,18,3],\n 'Date' : [\n DT.datetime(2013,12,31,0,0),\n DT.datetime(2013,12,31,0,0),\n DT.datetime(2013,12,31,0,0),\n ]}).set_index(['Date','Buyer'])\n\n result = df.groupby([pd.Grouper(freq='A'),'Buyer']).sum()\n assert_frame_equal(result,expected)\n\n expected = DataFrame({\n 'Buyer': 'Carl Mark Carl Joe'.split(),\n 'Quantity': [1,3,9,18],\n 'Date' : [\n DT.datetime(2013,1,1,0,0),\n DT.datetime(2013,1,1,0,0),\n DT.datetime(2013,7,1,0,0),\n DT.datetime(2013,7,1,0,0),\n ]}).set_index(['Date','Buyer'])\n result = df.groupby([pd.Grouper(freq='6MS'),'Buyer']).sum()\n assert_frame_equal(result,expected)\n\n df_original = DataFrame({\n 'Branch' : 'A A A A A A A B'.split(),\n 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),\n 'Quantity': [1,3,5,1,8,1,9,3],\n 'Date' : [\n DT.datetime(2013,10,1,13,0),\n DT.datetime(2013,10,1,13,5),\n DT.datetime(2013,10,1,20,0),\n DT.datetime(2013,10,2,10,0),\n DT.datetime(2013,10,1,20,0),\n DT.datetime(2013,10,2,10,0),\n DT.datetime(2013,10,2,12,0),\n DT.datetime(2013,10,2,14,0),\n ]}).set_index('Date')\n\n df_sorted = df_original.sort(columns='Quantity', ascending=False)\n for df in [df_original, df_sorted]:\n\n expected = DataFrame({\n 'Buyer': 'Carl Joe Mark Carl Joe'.split(),\n 'Quantity': [6,8,3,4,10],\n 'Date' : [\n DT.datetime(2013,10,1,0,0),\n DT.datetime(2013,10,1,0,0),\n DT.datetime(2013,10,1,0,0),\n DT.datetime(2013,10,2,0,0),\n DT.datetime(2013,10,2,0,0),\n ]}).set_index(['Date','Buyer'])\n\n result = df.groupby([pd.Grouper(freq='1D'),'Buyer']).sum()\n assert_frame_equal(result,expected)\n\n result = df.groupby([pd.Grouper(freq='1M'),'Buyer']).sum()\n expected = DataFrame({\n 'Buyer': 'Carl Joe Mark'.split(),\n 'Quantity': [10,18,3],\n 'Date' : [\n DT.datetime(2013,10,31,0,0),\n DT.datetime(2013,10,31,0,0),\n DT.datetime(2013,10,31,0,0),\n ]}).set_index(['Date','Buyer'])\n assert_frame_equal(result,expected)\n\n # passing the name\n df = df.reset_index()\n result = df.groupby([pd.Grouper(freq='1M',key='Date'),'Buyer']).sum()\n assert_frame_equal(result,expected)\n\n self.assertRaises(KeyError, lambda : df.groupby([pd.Grouper(freq='1M',key='foo'),'Buyer']).sum())\n\n # passing the level\n df = df.set_index('Date')\n result = df.groupby([pd.Grouper(freq='1M',level='Date'),'Buyer']).sum()\n assert_frame_equal(result,expected)\n result = df.groupby([pd.Grouper(freq='1M',level=0),'Buyer']).sum()\n assert_frame_equal(result,expected)\n\n self.assertRaises(ValueError, lambda : df.groupby([pd.Grouper(freq='1M',level='foo'),'Buyer']).sum())\n\n # multi names\n df = df.copy()\n df['Date'] = df.index + pd.offsets.MonthEnd(2)\n result = df.groupby([pd.Grouper(freq='1M',key='Date'),'Buyer']).sum()\n expected = DataFrame({\n 'Buyer': 'Carl Joe Mark'.split(),\n 'Quantity': [10,18,3],\n 'Date' : [\n DT.datetime(2013,11,30,0,0),\n DT.datetime(2013,11,30,0,0),\n DT.datetime(2013,11,30,0,0),\n ]}).set_index(['Date','Buyer'])\n assert_frame_equal(result,expected)\n\n # error as we have both a level and a name!\n self.assertRaises(ValueError, lambda : df.groupby([pd.Grouper(freq='1M',key='Date',level='Date'),'Buyer']).sum())\n\n\n # single groupers\n expected = DataFrame({ 'Quantity' : [31],\n 'Date' : [DT.datetime(2013,10,31,0,0)] }).set_index('Date')\n result = df.groupby(pd.Grouper(freq='1M')).sum()\n assert_frame_equal(result, expected)\n\n result = df.groupby([pd.Grouper(freq='1M')]).sum()\n assert_frame_equal(result, expected)\n\n expected = DataFrame({ 'Quantity' : [31],\n 'Date' : [DT.datetime(2013,11,30,0,0)] }).set_index('Date')\n result = df.groupby(pd.Grouper(freq='1M',key='Date')).sum()\n assert_frame_equal(result, expected)\n\n result = df.groupby([pd.Grouper(freq='1M',key='Date')]).sum()\n assert_frame_equal(result, expected)\n\n # GH 6764 multiple grouping with/without sort\n df = DataFrame({\n 'date' : pd.to_datetime([\n '20121002','20121007','20130130','20130202','20130305','20121002',\n '20121207','20130130','20130202','20130305','20130202','20130305']),\n 'user_id' : [1,1,1,1,1,3,3,3,5,5,5,5],\n 'whole_cost' : [1790,364,280,259,201,623,90,312,359,301,359,801],\n 'cost1' : [12,15,10,24,39,1,0,90,45,34,1,12] }).set_index('date')\n\n for freq in ['D', 'M', 'A', 'Q-APR']:\n expected = df.groupby('user_id')['whole_cost'].resample(\n freq, how='sum').dropna().reorder_levels(\n ['date','user_id']).sortlevel().astype('int64')\n expected.name = 'whole_cost'\n\n result1 = df.sort_index().groupby([pd.TimeGrouper(freq=freq), 'user_id'])['whole_cost'].sum()\n assert_series_equal(result1, expected)\n\n result2 = df.groupby([pd.TimeGrouper(freq=freq), 'user_id'])['whole_cost'].sum()\n assert_series_equal(result2, expected)\n\n def test_timegrouper_get_group(self):\n # GH 6914\n\n df_original = DataFrame({\n 'Buyer': 'Carl Joe Joe Carl Joe Carl'.split(),\n 'Quantity': [18,3,5,1,9,3],\n 'Date' : [datetime(2013,9,1,13,0), datetime(2013,9,1,13,5),\n datetime(2013,10,1,20,0), datetime(2013,10,3,10,0),\n datetime(2013,12,2,12,0), datetime(2013,9,2,14,0),]})\n df_reordered = df_original.sort(columns='Quantity')\n\n # single grouping\n expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],\n df_original.iloc[[4]]]\n dt_list = ['2013-09-30', '2013-10-31', '2013-12-31']\n\n for df in [df_original, df_reordered]:\n grouped = df.groupby(pd.Grouper(freq='M', key='Date'))\n for t, expected in zip(dt_list, expected_list):\n dt = pd.Timestamp(t)\n result = grouped.get_group(dt)\n assert_frame_equal(result, expected)\n\n # multiple grouping\n expected_list = [df_original.iloc[[1]], df_original.iloc[[3]],\n df_original.iloc[[4]]]\n g_list = [('Joe', '2013-09-30'), ('Carl', '2013-10-31'), ('Joe', '2013-12-31')]\n\n for df in [df_original, df_reordered]:\n grouped = df.groupby(['Buyer', pd.Grouper(freq='M', key='Date')])\n for (b, t), expected in zip(g_list, expected_list):\n dt = pd.Timestamp(t)\n result = grouped.get_group((b, dt))\n assert_frame_equal(result, expected)\n\n # with index\n df_original = df_original.set_index('Date')\n df_reordered = df_original.sort(columns='Quantity')\n\n expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],\n df_original.iloc[[4]]]\n\n for df in [df_original, df_reordered]:\n grouped = df.groupby(pd.Grouper(freq='M'))\n for t, expected in zip(dt_list, expected_list):\n dt = pd.Timestamp(t)\n result = grouped.get_group(dt)\n assert_frame_equal(result, expected)\n\n def test_cumcount(self):\n df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'])\n g = df.groupby('A')\n sg = g.A\n\n expected = Series([0, 1, 2, 0, 3])\n\n assert_series_equal(expected, g.cumcount())\n assert_series_equal(expected, sg.cumcount())\n\n def test_cumcount_empty(self):\n ge = DataFrame().groupby(level=0)\n se = Series().groupby(level=0)\n\n e = Series(dtype='int64') # edge case, as this is usually considered float\n\n assert_series_equal(e, ge.cumcount())\n assert_series_equal(e, se.cumcount())\n\n def test_cumcount_dupe_index(self):\n df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'], index=[0] * 5)\n g = df.groupby('A')\n sg = g.A\n\n expected = Series([0, 1, 2, 0, 3], index=[0] * 5)\n\n assert_series_equal(expected, g.cumcount())\n assert_series_equal(expected, sg.cumcount())\n\n def test_cumcount_mi(self):\n mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])\n df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'], index=mi)\n g = df.groupby('A')\n sg = g.A\n\n expected = Series([0, 1, 2, 0, 3], index=mi)\n\n assert_series_equal(expected, g.cumcount())\n assert_series_equal(expected, sg.cumcount())\n\n def test_cumcount_groupby_not_col(self):\n df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'], index=[0] * 5)\n g = df.groupby([0, 0, 0, 1, 0])\n sg = g.A\n\n expected = Series([0, 1, 2, 0, 3], index=[0] * 5)\n\n assert_series_equal(expected, g.cumcount())\n assert_series_equal(expected, sg.cumcount())\n\n def test_filter_series(self):\n import pandas as pd\n s = pd.Series([1, 3, 20, 5, 22, 24, 7])\n expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])\n expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])\n grouper = s.apply(lambda x: x % 2)\n grouped = s.groupby(grouper)\n assert_series_equal(\n grouped.filter(lambda x: x.mean() < 10), expected_odd)\n assert_series_equal(\n grouped.filter(lambda x: x.mean() > 10), expected_even)\n # Test dropna=False.\n assert_series_equal(\n grouped.filter(lambda x: x.mean() < 10, dropna=False),\n expected_odd.reindex(s.index))\n assert_series_equal(\n grouped.filter(lambda x: x.mean() > 10, dropna=False),\n expected_even.reindex(s.index))\n\n def test_filter_single_column_df(self):\n import pandas as pd\n df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])\n expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])\n expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])\n grouper = df[0].apply(lambda x: x % 2)\n grouped = df.groupby(grouper)\n assert_frame_equal(\n grouped.filter(lambda x: x.mean() < 10), expected_odd)\n assert_frame_equal(\n grouped.filter(lambda x: x.mean() > 10), expected_even)\n # Test dropna=False.\n assert_frame_equal(\n grouped.filter(lambda x: x.mean() < 10, dropna=False),\n expected_odd.reindex(df.index))\n assert_frame_equal(\n grouped.filter(lambda x: x.mean() > 10, dropna=False),\n expected_even.reindex(df.index))\n\n def test_filter_multi_column_df(self):\n import pandas as pd\n df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})\n grouper = df['A'].apply(lambda x: x % 2)\n grouped = df.groupby(grouper)\n expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])\n assert_frame_equal(\n grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10), expected)\n\n def test_filter_mixed_df(self):\n import pandas as pd\n df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})\n grouper = df['A'].apply(lambda x: x % 2)\n grouped = df.groupby(grouper)\n expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']},\n index=[1, 2])\n assert_frame_equal(\n grouped.filter(lambda x: x['A'].sum() > 10), expected)\n\n def test_filter_out_all_groups(self):\n import pandas as pd\n s = pd.Series([1, 3, 20, 5, 22, 24, 7])\n grouper = s.apply(lambda x: x % 2)\n grouped = s.groupby(grouper)\n assert_series_equal(\n grouped.filter(lambda x: x.mean() > 1000), s[[]])\n df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})\n grouper = df['A'].apply(lambda x: x % 2)\n grouped = df.groupby(grouper)\n assert_frame_equal(\n grouped.filter(lambda x: x['A'].sum() > 1000), df.ix[[]])\n\n def test_filter_out_no_groups(self):\n import pandas as pd\n s = pd.Series([1, 3, 20, 5, 22, 24, 7])\n grouper = s.apply(lambda x: x % 2)\n grouped = s.groupby(grouper)\n filtered = grouped.filter(lambda x: x.mean() > 0)\n assert_series_equal(filtered, s)\n df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})\n grouper = df['A'].apply(lambda x: x % 2)\n grouped = df.groupby(grouper)\n filtered = grouped.filter(lambda x: x['A'].mean() > 0)\n assert_frame_equal(filtered, df)\n\n def test_filter_condition_raises(self):\n import pandas as pd\n def raise_if_sum_is_zero(x):\n if x.sum() == 0:\n raise ValueError\n else:\n return x.sum() > 0\n s = pd.Series([-1,0,1,2])\n grouper = s.apply(lambda x: x % 2)\n grouped = s.groupby(grouper)\n self.assertRaises(TypeError,\n lambda: grouped.filter(raise_if_sum_is_zero))\n\n def test_filter_bad_shapes(self):\n df = DataFrame({'A': np.arange(8), 'B': list('aabbbbcc'), 'C': np.arange(8)})\n s = df['B']\n g_df = df.groupby('B')\n g_s = s.groupby(s)\n\n f = lambda x: x\n self.assertRaises(TypeError, lambda: g_df.filter(f))\n self.assertRaises(TypeError, lambda: g_s.filter(f))\n\n f = lambda x: x == 1\n self.assertRaises(TypeError, lambda: g_df.filter(f))\n self.assertRaises(TypeError, lambda: g_s.filter(f))\n\n f = lambda x: np.outer(x, x)\n self.assertRaises(TypeError, lambda: g_df.filter(f))\n self.assertRaises(TypeError, lambda: g_s.filter(f))\n\n def test_filter_nan_is_false(self):\n df = DataFrame({'A': np.arange(8), 'B': list('aabbbbcc'), 'C': np.arange(8)})\n s = df['B']\n g_df = df.groupby(df['B'])\n g_s = s.groupby(s)\n\n f = lambda x: np.nan\n assert_frame_equal(g_df.filter(f), df.loc[[]])\n assert_series_equal(g_s.filter(f), s[[]])\n\n def test_filter_against_workaround(self):\n np.random.seed(0)\n # Series of ints\n s = Series(np.random.randint(0,100,1000))\n grouper = s.apply(lambda x: np.round(x, -1))\n grouped = s.groupby(grouper)\n f = lambda x: x.mean() > 10\n old_way = s[grouped.transform(f).astype('bool')]\n new_way = grouped.filter(f)\n assert_series_equal(new_way.order(), old_way.order())\n\n # Series of floats\n s = 100*Series(np.random.random(1000))\n grouper = s.apply(lambda x: np.round(x, -1))\n grouped = s.groupby(grouper)\n f = lambda x: x.mean() > 10\n old_way = s[grouped.transform(f).astype('bool')]\n new_way = grouped.filter(f)\n assert_series_equal(new_way.order(), old_way.order())\n\n # Set up DataFrame of ints, floats, strings.\n from string import ascii_lowercase\n letters = np.array(list(ascii_lowercase))\n N = 1000\n random_letters = letters.take(np.random.randint(0, 26, N))\n df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),\n 'floats': N/10*Series(np.random.random(N)),\n 'letters': Series(random_letters)})\n\n # Group by ints; filter on floats.\n grouped = df.groupby('ints')\n old_way = df[grouped.floats.\\\n transform(lambda x: x.mean() > N/20).astype('bool')]\n new_way = grouped.filter(lambda x: x['floats'].mean() > N/20)\n assert_frame_equal(new_way, old_way)\n\n # Group by floats (rounded); filter on strings.\n grouper = df.floats.apply(lambda x: np.round(x, -1))\n grouped = df.groupby(grouper)\n old_way = df[grouped.letters.\\\n transform(lambda x: len(x) < N/10).astype('bool')]\n new_way = grouped.filter(\n lambda x: len(x.letters) < N/10)\n assert_frame_equal(new_way, old_way)\n\n # Group by strings; filter on ints.\n grouped = df.groupby('letters')\n old_way = df[grouped.ints.\\\n transform(lambda x: x.mean() > N/20).astype('bool')]\n new_way = grouped.filter(lambda x: x['ints'].mean() > N/20)\n assert_frame_equal(new_way, old_way)\n\n def test_filter_using_len(self):\n # BUG GH4447\n df = DataFrame({'A': np.arange(8), 'B': list('aabbbbcc'), 'C': np.arange(8)})\n grouped = df.groupby('B')\n actual = grouped.filter(lambda x: len(x) > 2)\n expected = DataFrame({'A': np.arange(2, 6), 'B': list('bbbb'), 'C': np.arange(2, 6)}, index=np.arange(2, 6))\n assert_frame_equal(actual, expected)\n\n actual = grouped.filter(lambda x: len(x) > 4)\n expected = df.ix[[]]\n assert_frame_equal(actual, expected)\n\n # Series have always worked properly, but we'll test anyway.\n s = df['B']\n grouped = s.groupby(s)\n actual = grouped.filter(lambda x: len(x) > 2)\n expected = Series(4*['b'], index=np.arange(2, 6))\n assert_series_equal(actual, expected)\n\n actual = grouped.filter(lambda x: len(x) > 4)\n expected = s[[]]\n assert_series_equal(actual, expected)\n\n def test_filter_maintains_ordering(self):\n # Simple case: index is sequential. #4621\n df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],\n 'tag' : [23,45,62,24,45,34,25,62]})\n s = df['pid']\n grouped = df.groupby('tag')\n actual = grouped.filter(lambda x: len(x) > 1)\n expected = df.iloc[[1, 2, 4, 7]]\n assert_frame_equal(actual, expected)\n\n grouped = s.groupby(df['tag'])\n actual = grouped.filter(lambda x: len(x) > 1)\n expected = s.iloc[[1, 2, 4, 7]]\n assert_series_equal(actual, expected)\n\n # Now index is sequentially decreasing.\n df.index = np.arange(len(df) - 1, -1, -1)\n s = df['pid']\n grouped = df.groupby('tag')\n actual = grouped.filter(lambda x: len(x) > 1)\n expected = df.iloc[[1, 2, 4, 7]]\n assert_frame_equal(actual, expected)\n\n grouped = s.groupby(df['tag'])\n actual = grouped.filter(lambda x: len(x) > 1)\n expected = s.iloc[[1, 2, 4, 7]]\n assert_series_equal(actual, expected)\n\n # Index is shuffled.\n SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]\n df.index = df.index[SHUFFLED]\n s = df['pid']\n grouped = df.groupby('tag')\n actual = grouped.filter(lambda x: len(x) > 1)\n expected = df.iloc[[1, 2, 4, 7]]\n assert_frame_equal(actual, expected)\n\n grouped = s.groupby(df['tag'])\n actual = grouped.filter(lambda x: len(x) > 1)\n expected = s.iloc[[1, 2, 4, 7]]\n assert_series_equal(actual, expected)\n\n def test_filter_and_transform_with_non_unique_int_index(self):\n # GH4620\n index = [1, 1, 1, 2, 1, 1, 0, 1]\n df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],\n 'tag' : [23,45,62,24,45,34,25,62]}, index=index)\n grouped_df = df.groupby('tag')\n ser = df['pid']\n grouped_ser = ser.groupby(df['tag'])\n expected_indexes = [1, 2, 4, 7]\n\n # Filter DataFrame\n actual = grouped_df.filter(lambda x: len(x) > 1)\n expected = df.iloc[expected_indexes]\n assert_frame_equal(actual, expected)\n\n actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)\n expected = df.copy()\n expected.iloc[[0, 3, 5, 6]] = np.nan\n assert_frame_equal(actual, expected)\n\n # Filter Series\n actual = grouped_ser.filter(lambda x: len(x) > 1)\n expected = ser.take(expected_indexes)\n assert_series_equal(actual, expected)\n\n actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)\n NA = np.nan\n expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')\n # ^ made manually because this can get confusing!\n assert_series_equal(actual, expected)\n\n # Transform Series\n actual = grouped_ser.transform(len)\n expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index)\n assert_series_equal(actual, expected)\n\n # Transform (a column from) DataFrameGroupBy\n actual = grouped_df.pid.transform(len)\n assert_series_equal(actual, expected)\n\n def test_filter_and_transform_with_multiple_non_unique_int_index(self):\n # GH4620\n index = [1, 1, 1, 2, 0, 0, 0, 1]\n df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],\n 'tag' : [23,45,62,24,45,34,25,62]}, index=index)\n grouped_df = df.groupby('tag')\n ser = df['pid']\n grouped_ser = ser.groupby(df['tag'])\n expected_indexes = [1, 2, 4, 7]\n\n # Filter DataFrame\n actual = grouped_df.filter(lambda x: len(x) > 1)\n expected = df.iloc[expected_indexes]\n assert_frame_equal(actual, expected)\n\n actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)\n expected = df.copy()\n expected.iloc[[0, 3, 5, 6]] = np.nan\n assert_frame_equal(actual, expected)\n\n # Filter Series\n actual = grouped_ser.filter(lambda x: len(x) > 1)\n expected = ser.take(expected_indexes)\n assert_series_equal(actual, expected)\n\n actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)\n NA = np.nan\n expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')\n # ^ made manually because this can get confusing!\n assert_series_equal(actual, expected)\n\n # Transform Series\n actual = grouped_ser.transform(len)\n expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index)\n assert_series_equal(actual, expected)\n\n # Transform (a column from) DataFrameGroupBy\n actual = grouped_df.pid.transform(len)\n assert_series_equal(actual, expected)\n\n def test_filter_and_transform_with_non_unique_float_index(self):\n # GH4620\n index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)\n df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],\n 'tag' : [23,45,62,24,45,34,25,62]}, index=index)\n grouped_df = df.groupby('tag')\n ser = df['pid']\n grouped_ser = ser.groupby(df['tag'])\n expected_indexes = [1, 2, 4, 7]\n\n # Filter DataFrame\n actual = grouped_df.filter(lambda x: len(x) > 1)\n expected = df.iloc[expected_indexes]\n assert_frame_equal(actual, expected)\n\n actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)\n expected = df.copy()\n expected.iloc[[0, 3, 5, 6]] = np.nan\n assert_frame_equal(actual, expected)\n\n # Filter Series\n actual = grouped_ser.filter(lambda x: len(x) > 1)\n expected = ser.take(expected_indexes)\n assert_series_equal(actual, expected)\n\n actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)\n NA = np.nan\n expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')\n # ^ made manually because this can get confusing!\n assert_series_equal(actual, expected)\n\n # Transform Series\n actual = grouped_ser.transform(len)\n expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index)\n assert_series_equal(actual, expected)\n\n # Transform (a column from) DataFrameGroupBy\n actual = grouped_df.pid.transform(len)\n assert_series_equal(actual, expected)\n\n def test_filter_and_transform_with_non_unique_float_index(self):\n # GH4620\n index = np.array([1, 1, 1, 2, 0, 0, 0, 1], dtype=float)\n df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],\n 'tag' : [23,45,62,24,45,34,25,62]}, index=index)\n grouped_df = df.groupby('tag')\n ser = df['pid']\n grouped_ser = ser.groupby(df['tag'])\n expected_indexes = [1, 2, 4, 7]\n\n # Filter DataFrame\n actual = grouped_df.filter(lambda x: len(x) > 1)\n expected = df.iloc[expected_indexes]\n assert_frame_equal(actual, expected)\n\n actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)\n expected = df.copy()\n expected.iloc[[0, 3, 5, 6]] = np.nan\n assert_frame_equal(actual, expected)\n\n # Filter Series\n actual = grouped_ser.filter(lambda x: len(x) > 1)\n expected = ser.take(expected_indexes)\n assert_series_equal(actual, expected)\n\n actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)\n NA = np.nan\n expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')\n # ^ made manually because this can get confusing!\n assert_series_equal(actual, expected)\n\n # Transform Series\n actual = grouped_ser.transform(len)\n expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index)\n assert_series_equal(actual, expected)\n\n # Transform (a column from) DataFrameGroupBy\n actual = grouped_df.pid.transform(len)\n assert_series_equal(actual, expected)\n\n def test_filter_and_transform_with_non_unique_timestamp_index(self):\n # GH4620\n t0 = Timestamp('2013-09-30 00:05:00')\n t1 = Timestamp('2013-10-30 00:05:00')\n t2 = Timestamp('2013-11-30 00:05:00')\n index = [t1, t1, t1, t2, t1, t1, t0, t1]\n df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],\n 'tag' : [23,45,62,24,45,34,25,62]}, index=index)\n grouped_df = df.groupby('tag')\n ser = df['pid']\n grouped_ser = ser.groupby(df['tag'])\n expected_indexes = [1, 2, 4, 7]\n\n # Filter DataFrame\n actual = grouped_df.filter(lambda x: len(x) > 1)\n expected = df.iloc[expected_indexes]\n assert_frame_equal(actual, expected)\n\n actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)\n expected = df.copy()\n expected.iloc[[0, 3, 5, 6]] = np.nan\n assert_frame_equal(actual, expected)\n\n # Filter Series\n actual = grouped_ser.filter(lambda x: len(x) > 1)\n expected = ser.take(expected_indexes)\n assert_series_equal(actual, expected)\n\n actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)\n NA = np.nan\n expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')\n # ^ made manually because this can get confusing!\n assert_series_equal(actual, expected)\n\n # Transform Series\n actual = grouped_ser.transform(len)\n expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index)\n assert_series_equal(actual, expected)\n\n # Transform (a column from) DataFrameGroupBy\n actual = grouped_df.pid.transform(len)\n assert_series_equal(actual, expected)\n\n def test_filter_and_transform_with_non_unique_string_index(self):\n # GH4620\n index = list('bbbcbbab')\n df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],\n 'tag' : [23,45,62,24,45,34,25,62]}, index=index)\n grouped_df = df.groupby('tag')\n ser = df['pid']\n grouped_ser = ser.groupby(df['tag'])\n expected_indexes = [1, 2, 4, 7]\n\n # Filter DataFrame\n actual = grouped_df.filter(lambda x: len(x) > 1)\n expected = df.iloc[expected_indexes]\n assert_frame_equal(actual, expected)\n\n actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)\n expected = df.copy()\n expected.iloc[[0, 3, 5, 6]] = np.nan\n assert_frame_equal(actual, expected)\n\n # Filter Series\n actual = grouped_ser.filter(lambda x: len(x) > 1)\n expected = ser.take(expected_indexes)\n assert_series_equal(actual, expected)\n\n actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)\n NA = np.nan\n expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')\n # ^ made manually because this can get confusing!\n assert_series_equal(actual, expected)\n\n # Transform Series\n actual = grouped_ser.transform(len)\n expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index)\n assert_series_equal(actual, expected)\n\n # Transform (a column from) DataFrameGroupBy\n actual = grouped_df.pid.transform(len)\n assert_series_equal(actual, expected)\n\n def test_filter_has_access_to_grouped_cols(self):\n df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=['A', 'B'])\n g = df.groupby('A')\n # previously didn't have access to col A #????\n filt = g.filter(lambda x: x['A'].sum() == 2)\n assert_frame_equal(filt, df.iloc[[0, 1]])\n\n def test_filter_enforces_scalarness(self):\n df = pd.DataFrame([\n ['best', 'a', 'x'],\n ['worst', 'b', 'y'],\n ['best', 'c', 'x'],\n ['best','d', 'y'],\n ['worst','d', 'y'],\n ['worst','d', 'y'],\n ['best','d', 'z'],\n ], columns=['a', 'b', 'c'])\n with tm.assertRaisesRegexp(TypeError, 'filter function returned a.*'):\n df.groupby('c').filter(lambda g: g['a'] == 'best')\n\n def test_filter_non_bool_raises(self):\n df = pd.DataFrame([\n ['best', 'a', 1],\n ['worst', 'b', 1],\n ['best', 'c', 1],\n ['best','d', 1],\n ['worst','d', 1],\n ['worst','d', 1],\n ['best','d', 1],\n ], columns=['a', 'b', 'c'])\n with tm.assertRaisesRegexp(TypeError, 'filter function returned a.*'):\n df.groupby('a').filter(lambda g: g.c.mean())\n\n def test_index_label_overlaps_location(self):\n # checking we don't have any label/location confusion in the\n # the wake of GH5375\n df = DataFrame(list('ABCDE'), index=[2, 0, 2, 1, 1])\n g = df.groupby(list('ababb'))\n actual = g.filter(lambda x: len(x) > 2)\n expected = df.iloc[[1, 3, 4]]\n assert_frame_equal(actual, expected)\n\n ser = df[0]\n g = ser.groupby(list('ababb'))\n actual = g.filter(lambda x: len(x) > 2)\n expected = ser.take([1, 3, 4])\n assert_series_equal(actual, expected)\n\n # ... and again, with a generic Index of floats\n df.index = df.index.astype(float)\n g = df.groupby(list('ababb'))\n actual = g.filter(lambda x: len(x) > 2)\n expected = df.iloc[[1, 3, 4]]\n assert_frame_equal(actual, expected)\n\n ser = df[0]\n g = ser.groupby(list('ababb'))\n actual = g.filter(lambda x: len(x) > 2)\n expected = ser.take([1, 3, 4])\n assert_series_equal(actual, expected)\n\n def test_groupby_selection_with_methods(self):\n # some methods which require DatetimeIndex\n rng = pd.date_range('2014', periods=len(self.df))\n self.df.index = rng\n\n g = self.df.groupby(['A'])[['C']]\n g_exp = self.df[['C']].groupby(self.df['A'])\n # TODO check groupby with > 1 col ?\n\n # methods which are called as .foo()\n methods = ['count',\n 'corr',\n 'cummax', 'cummin', 'cumprod',\n 'describe', 'rank',\n 'quantile',\n 'diff', 'shift',\n 'all', 'any',\n 'idxmin', 'idxmax',\n 'ffill', 'bfill',\n 'pct_change',\n 'tshift',\n #'ohlc'\n ]\n\n for m in methods:\n res = getattr(g, m)()\n exp = getattr(g_exp, m)()\n assert_frame_equal(res, exp) # should always be frames!\n\n # methods which aren't just .foo()\n assert_frame_equal(g.fillna(0), g_exp.fillna(0))\n assert_frame_equal(g.dtypes, g_exp.dtypes)\n assert_frame_equal(g.apply(lambda x: x.sum()),\n g_exp.apply(lambda x: x.sum()))\n\n assert_frame_equal(g.resample('D'), g_exp.resample('D'))\n assert_frame_equal(g.resample('D', how='ohlc'),\n g_exp.resample('D', how='ohlc'))\n\n assert_frame_equal(g.filter(lambda x: len(x) == 3),\n g_exp.filter(lambda x: len(x) == 3))\n\n def test_groupby_whitelist(self):\n from string import ascii_lowercase\n letters = np.array(list(ascii_lowercase))\n N = 10\n random_letters = letters.take(np.random.randint(0, 26, N))\n df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),\n 'letters': Series(random_letters)})\n s = df.floats\n\n df_whitelist = frozenset([\n 'last', 'first',\n 'mean', 'sum', 'min', 'max',\n 'head', 'tail',\n 'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',\n 'resample',\n 'describe',\n 'rank', 'quantile', 'count',\n 'fillna',\n 'mad',\n 'any', 'all',\n 'irow', 'take',\n 'idxmax', 'idxmin',\n 'shift', 'tshift',\n 'ffill', 'bfill',\n 'pct_change', 'skew',\n 'plot', 'boxplot', 'hist',\n 'median', 'dtypes',\n 'corrwith', 'corr', 'cov',\n 'diff',\n ])\n s_whitelist = frozenset([\n 'last', 'first',\n 'mean', 'sum', 'min', 'max',\n 'head', 'tail',\n 'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',\n 'resample',\n 'describe',\n 'rank', 'quantile', 'count',\n 'fillna',\n 'mad',\n 'any', 'all',\n 'irow', 'take',\n 'idxmax', 'idxmin',\n 'shift', 'tshift',\n 'ffill', 'bfill',\n 'pct_change', 'skew',\n 'plot', 'hist',\n 'median', 'dtype',\n 'corr', 'cov',\n 'value_counts',\n 'diff',\n 'unique', 'nunique',\n 'nlargest', 'nsmallest',\n ])\n\n for obj, whitelist in zip((df, s),\n (df_whitelist, s_whitelist)):\n gb = obj.groupby(df.letters)\n self.assertEqual(whitelist, gb._apply_whitelist)\n for m in whitelist:\n getattr(type(gb), m)\n\n AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',\n 'mad', 'std', 'var', 'sem']\n AGG_FUNCTIONS_WITH_SKIPNA = ['skew', 'mad']\n\n def test_regression_whitelist_methods(self) :\n\n # GH6944\n # explicity test the whitelest methods\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n raw_frame = DataFrame(np.random.randn(10, 3), index=index,\n columns=Index(['A', 'B', 'C'], name='exp'))\n raw_frame.ix[1, [1, 2]] = np.nan\n raw_frame.ix[7, [0, 1]] = np.nan\n\n for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,\n lrange(2), lrange(2),\n [True,False]) :\n\n if axis == 0 :\n frame = raw_frame\n else :\n frame = raw_frame.T\n\n if op in self.AGG_FUNCTIONS_WITH_SKIPNA :\n grouped = frame.groupby(level=level,axis=axis)\n result = getattr(grouped,op)(skipna=skipna)\n expected = getattr(frame,op)(level=level,axis=axis,skipna=skipna)\n assert_frame_equal(result, expected)\n else :\n grouped = frame.groupby(level=level,axis=axis)\n result = getattr(grouped,op)()\n expected = getattr(frame,op)(level=level,axis=axis)\n assert_frame_equal(result, expected)\n\n def test_regression_kwargs_whitelist_methods(self):\n # GH8733\n\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n raw_frame = DataFrame(np.random.randn(10, 3), index=index,\n columns=Index(['A', 'B', 'C'], name='exp'))\n\n grouped = raw_frame.groupby(level=0, axis=1)\n grouped.all(test_kwargs='Test kwargs')\n grouped.any(test_kwargs='Test kwargs')\n grouped.cumcount(test_kwargs='Test kwargs')\n grouped.mad(test_kwargs='Test kwargs')\n grouped.cummin(test_kwargs='Test kwargs')\n grouped.skew(test_kwargs='Test kwargs')\n grouped.cumprod(test_kwargs='Test kwargs')\n\n def test_groupby_blacklist(self):\n from string import ascii_lowercase\n letters = np.array(list(ascii_lowercase))\n N = 10\n random_letters = letters.take(np.random.randint(0, 26, N))\n df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),\n 'letters': Series(random_letters)})\n s = df.floats\n\n blacklist = [\n 'eval', 'query', 'abs', 'where',\n 'mask', 'align', 'groupby', 'clip', 'astype',\n 'at', 'combine', 'consolidate', 'convert_objects',\n ]\n to_methods = [method for method in dir(df) if method.startswith('to_')]\n\n blacklist.extend(to_methods)\n\n # e.g., to_csv\n defined_but_not_allowed = (\"(?:^Cannot.+{0!r}.+{1!r}.+try using the \"\n \"'apply' method$)\")\n\n # e.g., query, eval\n not_defined = \"(?:^{1!r} object has no attribute {0!r}$)\"\n fmt = defined_but_not_allowed + '|' + not_defined\n for bl in blacklist:\n for obj in (df, s):\n gb = obj.groupby(df.letters)\n msg = fmt.format(bl, type(gb).__name__)\n with tm.assertRaisesRegexp(AttributeError, msg):\n getattr(gb, bl)\n\n def test_series_groupby_plotting_nominally_works(self):\n _skip_if_mpl_not_installed()\n\n n = 10\n weight = Series(np.random.normal(166, 20, size=n))\n height = Series(np.random.normal(60, 10, size=n))\n with tm.RNGContext(42):\n gender = tm.choice(['male', 'female'], size=n)\n\n weight.groupby(gender).plot()\n tm.close()\n height.groupby(gender).hist()\n tm.close()\n #Regression test for GH8733\n height.groupby(gender).plot(alpha=0.5)\n tm.close()\n\n def test_plotting_with_float_index_works(self):\n _skip_if_mpl_not_installed()\n\n # GH 7025\n df = DataFrame({'def': [1,1,1,2,2,2,3,3,3],\n 'val': np.random.randn(9)},\n index=[1.0,2.0,3.0,1.0,2.0,3.0,1.0,2.0,3.0])\n\n df.groupby('def')['val'].plot()\n tm.close()\n df.groupby('def')['val'].apply(lambda x: x.plot())\n tm.close()\n\n @slow\n def test_frame_groupby_plot_boxplot(self):\n _skip_if_mpl_not_installed()\n\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n mpl.use('Agg')\n tm.close()\n\n n = 10\n weight = Series(np.random.normal(166, 20, size=n))\n height = Series(np.random.normal(60, 10, size=n))\n with tm.RNGContext(42):\n gender = tm.choice(['male', 'female'], size=n)\n df = DataFrame({'height': height, 'weight': weight, 'gender': gender})\n gb = df.groupby('gender')\n\n res = gb.plot()\n self.assertEqual(len(plt.get_fignums()), 2)\n self.assertEqual(len(res), 2)\n tm.close()\n\n res = gb.boxplot()\n self.assertEqual(len(plt.get_fignums()), 1)\n self.assertEqual(len(res), 2)\n tm.close()\n\n # now works with GH 5610 as gender is excluded\n res = df.groupby('gender').hist()\n tm.close()\n\n @slow\n def test_frame_groupby_hist(self):\n _skip_if_mpl_not_installed()\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n mpl.use('Agg')\n tm.close()\n\n n = 10\n weight = Series(np.random.normal(166, 20, size=n))\n height = Series(np.random.normal(60, 10, size=n))\n with tm.RNGContext(42):\n gender_int = tm.choice([0, 1], size=n)\n df_int = DataFrame({'height': height, 'weight': weight,\n 'gender': gender_int})\n gb = df_int.groupby('gender')\n axes = gb.hist()\n self.assertEqual(len(axes), 2)\n self.assertEqual(len(plt.get_fignums()), 2)\n tm.close()\n\n def test_tab_completion(self):\n grp = self.mframe.groupby(level='second')\n results = set([v for v in dir(grp) if not v.startswith('_')])\n expected = set(['A','B','C',\n 'agg','aggregate','apply','boxplot','filter','first','get_group',\n 'groups','hist','indices','last','max','mean','median',\n 'min','name','ngroups','nth','ohlc','plot', 'prod',\n 'size', 'std', 'sum', 'transform', 'var', 'sem', 'count', 'head',\n 'describe', 'cummax', 'quantile', 'rank', 'cumprod', 'tail',\n 'resample', 'cummin', 'fillna', 'cumsum', 'cumcount',\n 'all', 'shift', 'skew', 'bfill', 'irow', 'ffill',\n 'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith',\n 'cov', 'dtypes', 'diff', 'idxmax', 'idxmin'\n ])\n self.assertEqual(results, expected)\n\n def test_lexsort_indexer(self):\n keys = [[nan]*5 + list(range(100)) + [nan]*5]\n # orders=True, na_position='last'\n result = _lexsort_indexer(keys, orders=True, na_position='last')\n expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110))\n assert_equal(result, expected)\n\n # orders=True, na_position='first'\n result = _lexsort_indexer(keys, orders=True, na_position='first')\n expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105))\n assert_equal(result, expected)\n\n # orders=False, na_position='last'\n result = _lexsort_indexer(keys, orders=False, na_position='last')\n expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))\n assert_equal(result, expected)\n\n # orders=False, na_position='first'\n result = _lexsort_indexer(keys, orders=False, na_position='first')\n expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))\n assert_equal(result, expected)\n\n def test_nargsort(self):\n # np.argsort(items) places NaNs last\n items = [nan]*5 + list(range(100)) + [nan]*5\n # np.argsort(items2) may not place NaNs first\n items2 = np.array(items, dtype='O')\n\n try:\n # GH 2785; due to a regression in NumPy1.6.2\n np.argsort(np.array([[1, 2], [1, 3], [1, 2]], dtype='i'))\n np.argsort(items2, kind='mergesort')\n except TypeError as err:\n raise nose.SkipTest('requested sort not available for type')\n\n # mergesort is the most difficult to get right because we want it to be stable.\n\n # According to numpy/core/tests/test_multiarray, \"\"\"The number\n # of sorted items must be greater than ~50 to check the actual algorithm\n # because quick and merge sort fall over to insertion sort for small\n # arrays.\"\"\"\n\n\n # mergesort, ascending=True, na_position='last'\n result = _nargsort(\n items, kind='mergesort', ascending=True, na_position='last')\n expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110))\n assert_equal(result, expected)\n\n # mergesort, ascending=True, na_position='first'\n result = _nargsort(\n items, kind='mergesort', ascending=True, na_position='first')\n expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105))\n assert_equal(result, expected)\n\n # mergesort, ascending=False, na_position='last'\n result = _nargsort(\n items, kind='mergesort', ascending=False, na_position='last')\n expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))\n assert_equal(result, expected)\n\n # mergesort, ascending=False, na_position='first'\n result = _nargsort(\n items, kind='mergesort', ascending=False, na_position='first')\n expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))\n assert_equal(result, expected)\n\n # mergesort, ascending=True, na_position='last'\n result = _nargsort(\n items2, kind='mergesort', ascending=True, na_position='last')\n expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110))\n assert_equal(result, expected)\n\n # mergesort, ascending=True, na_position='first'\n result = _nargsort(\n items2, kind='mergesort', ascending=True, na_position='first')\n expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105))\n assert_equal(result, expected)\n\n # mergesort, ascending=False, na_position='last'\n result = _nargsort(\n items2, kind='mergesort', ascending=False, na_position='last')\n expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))\n assert_equal(result, expected)\n\n # mergesort, ascending=False, na_position='first'\n result = _nargsort(\n items2, kind='mergesort', ascending=False, na_position='first')\n expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))\n assert_equal(result, expected)\n\n def test_datetime_count(self):\n df = DataFrame({'a': [1,2,3] * 2,\n 'dates': pd.date_range('now', periods=6, freq='T')})\n result = df.groupby('a').dates.count()\n expected = Series([2, 2, 2], index=Index([1, 2, 3], name='a'),\n name='dates')\n tm.assert_series_equal(result, expected)\n\n def test_lower_int_prec_count(self):\n df = DataFrame({'a': np.array([0, 1, 2, 100], np.int8),\n 'b': np.array([1, 2, 3, 6], np.uint32),\n 'c': np.array([4, 5, 6, 8], np.int16),\n 'grp': list('ab' * 2)})\n result = df.groupby('grp').count()\n expected = DataFrame({'a': [2, 2],\n 'b': [2, 2],\n 'c': [2, 2]}, index=pd.Index(list('ab'),\n name='grp'))\n tm.assert_frame_equal(result, expected)\n\n def test_count_uses_size_on_exception(self):\n class RaisingObjectException(Exception):\n pass\n\n class RaisingObject(object):\n def __init__(self, msg='I will raise inside Cython'):\n super(RaisingObject, self).__init__()\n self.msg = msg\n\n def __eq__(self, other):\n # gets called in Cython to check that raising calls the method\n raise RaisingObjectException(self.msg)\n\n df = DataFrame({'a': [RaisingObject() for _ in range(4)],\n 'grp': list('ab' * 2)})\n result = df.groupby('grp').count()\n expected = DataFrame({'a': [2, 2]}, index=pd.Index(list('ab'),\n name='grp'))\n tm.assert_frame_equal(result, expected)\n\n def test__cython_agg_general(self):\n ops = [('mean', np.mean),\n ('median', np.median),\n ('var', np.var),\n ('add', np.sum),\n ('prod', np.prod),\n ('min', np.min),\n ('max', np.max),\n ('first', lambda x: x.iloc[0]),\n ('last', lambda x: x.iloc[-1]),\n ('count', np.size),\n ]\n df = DataFrame(np.random.randn(1000))\n labels = np.random.randint(0, 50, size=1000).astype(float)\n\n for op, targop in ops:\n result = df.groupby(labels)._cython_agg_general(op)\n expected = df.groupby(labels).agg(targop)\n try:\n tm.assert_frame_equal(result, expected)\n except BaseException as exc:\n exc.args += ('operation: %s' % op,)\n raise\n\n def test_ops_general(self):\n ops = [('mean', np.mean),\n ('median', np.median),\n ('std', np.std),\n ('var', np.var),\n ('sum', np.sum),\n ('prod', np.prod),\n ('min', np.min),\n ('max', np.max),\n ('first', lambda x: x.iloc[0]),\n ('last', lambda x: x.iloc[-1]),\n ('count', np.size),\n ]\n try:\n from scipy.stats import sem\n except ImportError:\n pass\n else:\n ops.append(('sem', sem))\n df = DataFrame(np.random.randn(1000))\n labels = np.random.randint(0, 50, size=1000).astype(float)\n\n for op, targop in ops:\n result = getattr(df.groupby(labels), op)().astype(float)\n expected = df.groupby(labels).agg(targop)\n try:\n tm.assert_frame_equal(result, expected)\n except BaseException as exc:\n exc.args += ('operation: %s' % op,)\n raise\n\n def test_max_nan_bug(self):\n raw = \"\"\",Date,app,File\n2013-04-23,2013-04-23 00:00:00,,log080001.log\n2013-05-06,2013-05-06 00:00:00,,log.log\n2013-05-07,2013-05-07 00:00:00,OE,xlsx\"\"\"\n df = pd.read_csv(StringIO(raw), parse_dates=[0])\n gb = df.groupby('Date')\n r = gb[['File']].max()\n e = gb['File'].max().to_frame()\n tm.assert_frame_equal(r, e)\n self.assertFalse(r['File'].isnull().any())\n\n def test_nlargest(self):\n a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])\n b = Series(list('a' * 5 + 'b' * 5))\n gb = a.groupby(b)\n r = gb.nlargest(3)\n e = Series([7, 5, 3, 10, 9, 6],\n index=MultiIndex.from_arrays([list('aaabbb'),\n [3, 2, 1, 9, 5, 8]]))\n tm.assert_series_equal(r, e)\n\n def test_nsmallest(self):\n a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])\n b = Series(list('a' * 5 + 'b' * 5))\n gb = a.groupby(b)\n r = gb.nsmallest(3)\n e = Series([1, 2, 3, 0, 4, 6],\n index=MultiIndex.from_arrays([list('aaabbb'),\n [0, 4, 1, 6, 7, 8]]))\n tm.assert_series_equal(r, e)\n\n def test_transform_doesnt_clobber_ints(self):\n # GH 7972\n n = 6\n x = np.arange(n)\n df = DataFrame({'a': x // 2, 'b': 2.0 * x, 'c': 3.0 * x})\n df2 = DataFrame({'a': x // 2 * 1.0, 'b': 2.0 * x, 'c': 3.0 * x})\n\n gb = df.groupby('a')\n result = gb.transform('mean')\n\n gb2 = df2.groupby('a')\n expected = gb2.transform('mean')\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_categorical_two_columns(self):\n\n # https://github.com/pydata/pandas/issues/8138\n d = {'cat': pd.Categorical([\"a\",\"b\",\"a\",\"b\"], categories=[\"a\", \"b\", \"c\"]),\n 'ints': [1, 1, 2, 2],'val': [10, 20, 30, 40]}\n test = pd.DataFrame(d)\n\n # Grouping on a single column\n groups_single_key = test.groupby(\"cat\")\n res = groups_single_key.agg('mean')\n exp = DataFrame({\"ints\":[1.5,1.5,np.nan], \"val\":[20,30,np.nan]},\n index=pd.Index([\"a\", \"b\", \"c\"], name=\"cat\"))\n tm.assert_frame_equal(res, exp)\n\n # Grouping on two columns\n groups_double_key = test.groupby([\"cat\",\"ints\"])\n res = groups_double_key.agg('mean')\n exp = DataFrame({\"val\":[10,30,20,40,np.nan,np.nan],\n \"cat\": [\"a\",\"a\",\"b\",\"b\",\"c\",\"c\"],\n \"ints\": [1,2,1,2,1,2]}).set_index([\"cat\",\"ints\"])\n tm.assert_frame_equal(res, exp)\n\n d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]}\n test = pd.DataFrame(d)\n values = pd.cut(test['C1'], [1, 2, 3, 6])\n values.name = \"cat\"\n groups_double_key = test.groupby([values,'C2'])\n\n res = groups_double_key.agg('mean')\n nan = np.nan\n idx = MultiIndex.from_product([[\"(1, 2]\", \"(2, 3]\", \"(3, 6]\"],[1,2,3,4]],\n names=[\"cat\", \"C2\"])\n exp = DataFrame({\"C1\":[nan,nan,nan,nan, 3, 3,nan,nan, nan,nan, 4, 5],\n \"C3\":[nan,nan,nan,nan, 10,100,nan,nan, nan,nan,200,34]}, index=idx)\n tm.assert_frame_equal(res, exp)\n\n\ndef assert_fp_equal(a, b):\n assert (np.abs(a - b) < 1e-12).all()\n\n\ndef _check_groupby(df, result, keys, field, f=lambda x: x.sum()):\n tups = lmap(tuple, df[keys].values)\n tups = com._asarray_tuplesafe(tups)\n expected = f(df.groupby(tups)[field])\n for k, v in compat.iteritems(expected):\n assert(result[k] == v)\n\n\ndef test_decons():\n from pandas.core.groupby import decons_group_index, get_group_index\n\n def testit(label_list, shape):\n group_index = get_group_index(label_list, shape)\n label_list2 = decons_group_index(group_index, shape)\n\n for a, b in zip(label_list, label_list2):\n assert(np.array_equal(a, b))\n\n shape = (4, 5, 6)\n label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100),\n np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100),\n np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100)]\n testit(label_list, shape)\n\n shape = (10000, 10000)\n label_list = [np.tile(np.arange(10000), 5),\n np.tile(np.arange(10000), 5)]\n testit(label_list, shape)\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure',\n '-s'], exit=False)\n"
]
| [
[
"numpy.array_equal",
"numpy.tile",
"pandas.compat.OrderedDict",
"pandas.core.common.pprint_thing",
"pandas.util.testing.getSeriesData",
"numpy.random.random",
"pandas.compat.lzip",
"numpy.empty",
"numpy.log",
"pandas.compat.iteritems",
"pandas.DataFrame",
"pandas.util.testing.rands_array",
"pandas.core.groupby._nargsort",
"pandas.util.testing.makeTimeSeries",
"numpy.percentile",
"pandas.util.testing.assert_almost_equal",
"pandas.Index",
"pandas.core.groupby._lexsort_indexer",
"pandas.util.testing.assert_series_equal",
"matplotlib.pyplot.get_fignums",
"pandas.core.index.MultiIndex.from_arrays",
"pandas.util.testing.add_nans",
"pandas.DatetimeIndex",
"pandas.core.config.option_context",
"pandas.Grouper",
"numpy.mean",
"pandas.core.panel.Panel.fromDict",
"pandas.concat",
"pandas.compat.range",
"pandas.period_range",
"pandas.core.api.DataFrame",
"pandas.core.index.Index",
"pandas.util.testing.assert_isinstance",
"numpy.arange",
"pandas.util.testing.close",
"numpy.array",
"numpy.round",
"numpy.random.randn",
"pandas.MultiIndex.from_tuples",
"numpy.random.shuffle",
"numpy.argsort",
"pandas.core.groupby.get_group_index",
"pandas.util.testing.assert_frame_equal",
"numpy.random.seed",
"pandas.date_range",
"numpy.ones",
"pandas.Series",
"pandas.core.api.DataFrame.from_items",
"pandas.tseries.resample.TimeGrouper",
"pandas.core.index.MultiIndex.from_tuples",
"numpy.outer",
"pandas.core.index.MultiIndex",
"numpy.max",
"pandas.util.testing.choice",
"pandas.util.testing.assert_panel_equal",
"pandas.to_datetime",
"matplotlib.use",
"pandas.TimeGrouper",
"numpy.std",
"numpy.asarray",
"pandas.util.testing.makeTimeDataFrame",
"pandas.compat.lmap",
"numpy.sum",
"pandas.tseries.api.DatetimeIndex",
"pandas.util.testing.makePanel",
"numpy.abs",
"pandas.core.index.MultiIndex.from_product",
"pandas.compat.StringIO",
"pandas.qcut",
"pandas.Timestamp",
"pandas.compat.map",
"pandas.core.common._asarray_tuplesafe",
"pandas.util.testing.RNGContext",
"numpy.dtype",
"pandas.compat.long",
"pandas.core.api.Categorical.from_codes",
"numpy.random.normal",
"pandas.util.testing.assert_index_equal",
"numpy.random.randint",
"pandas.bdate_range",
"pandas.core.groupby.decons_group_index",
"numpy.testing.assert_equal",
"pandas.compat.lrange",
"pandas.MultiIndex.from_product",
"pandas.util.testing.getTimeSeriesData",
"pandas.offsets.MonthEnd",
"pandas.cut",
"pandas.compat.zip",
"pandas.core.groupby.numpy_groupby",
"pandas.Categorical",
"pandas.core.api.Categorical",
"pandas.util.testing.assertRaisesRegexp"
]
]
|
muhrin/aiida-vasp | [
"641fdc2ccd40bdd041e59af1fa3e1dcf9b037415"
]
| [
"aiida_vasp/utils/bands.py"
]
| [
"\"\"\"\nUtils for bands structures.\n\n---------------------------\nUtilities for working with band structures. Currently this is legacy and will be\nrewritten or moved.\n\"\"\"\ntry:\n import matplotlib\n matplotlib.use('TKAgg')\n from matplotlib import pyplot as plt\nexcept ImportError:\n raise ImportError('Error: matplotlib must be ' + 'installed to use this functionality')\n\n\ndef get_bs_dims(bands_array):\n \"\"\"\n Get the dimensions from the bands array of a BandsData node.\n\n :param numpy.array bands_array:\n an array with bands as stored in an array.bands data node\n :return: a tuple containing num_bands, num_kp, num_spins.\n if the array is only 2d, num_spins = 0\n :rtype tuple:\n \"\"\"\n bshape = bands_array.shape\n nbd = nkp = nsp = 0\n if len(bshape) == 2:\n nbd = bshape[1]\n nkp = bshape[0]\n elif len(bshape) == 3:\n nbd = bshape[2]\n nkp = bshape[1]\n nsp = bshape[0]\n return nbd, nkp, nsp\n\n\ndef get_kp_labels(bands_node, kpoints_node=None):\n \"\"\"\n Get Kpoint labels with their x-positions in matplotlib compatible format.\n\n A KpointsData node can optionally be given to fall back to if no labels\n are found on the BandsData node. The caller is responsible for ensuring\n the nodes match. This should be the case if you take the kpoints from\n the input and the bands from the\n output of a calculation node.\n\n :param BandsData bands_node:\n The BandsData node will be searched labels first\n :param KpointsData kpoints_node:\n The optional KpointsData node will be searched only if no labels are\n present on the BandsData node. No consistency checks are performed.\n :return: (kpx, kpl), the x-coordinates and text labels\n :rtype: tuple(list[int], list[unicode])\n :raises AttributeError: if neither of the given nodes have a labels\n attribute\n \"\"\"\n kplabs = None\n kpx = []\n kpl = []\n try:\n kplabs = bands_node.labels\n except AttributeError as err:\n if kpoints_node:\n kplabs = kpoints_node.labels\n else:\n raise err\n if kplabs:\n kpx = [i[0] for i in kplabs]\n kpl = [i[1] for i in kplabs]\n for i, kpoints in enumerate(kpl):\n if kpoints == 'G':\n kpl[i] = r'$\\Gamma$'\n return kpx, kpl\n\n\ndef get_efermi(calc):\n \"\"\"Get the fermi energy from a finished calculation.\"\"\"\n efermi = None\n if calc:\n p_res = calc.get_outputs_dict().get('results')\n efermi = p_res and p_res.get_dict().get('efermi')\n return efermi\n\n\ndef get_kp_node(calc):\n kpoints_node = None\n if calc:\n kpoints_node = calc.get_inputs_dict().get('kpoints')\n return kpoints_node\n\n\ndef plot_bstr(bands_node, kpoints_node=None, title=None, efermi=None, use_parent_calc=False, **kwargs):\n \"\"\"\n Use matplotlib to plot the bands stored in a BandsData node.\n\n A KpointsData node can optionally be given as a fallback for\n kpoint labels. The caller is responsible for giving a node\n with matching labels (as in they are in/out nodes of the same\n calculation).\n\n :param BandsData bands_node:\n The BandsData node will be searched labels first\n :param KpointsData kpoints_node:\n The optional KpointsData node will be searched only if no labels are\n present on the BandsData node. No consistency checks are performed.\n :return: the matplotlib figure containing the plot\n \"\"\"\n fig = plt.figure()\n title = title or 'Band Structure (pk=%s)' % bands_node.pk\n bands = bands_node.get_bands()\n _, nkp, _ = get_bs_dims(bands)\n plot_bands(bands_node, **kwargs)\n\n parent_calc = None\n if use_parent_calc:\n inputs = bands_node.get_inputs()\n parent_calc = inputs[0] if inputs else None\n\n efermi = get_efermi(parent_calc)\n kpoints_node = get_kp_node(parent_calc)\n\n if efermi:\n plt.hlines(efermi, plt.xlim()[0], nkp - 1, linestyles='dashed')\n plt.yticks(list(plt.yticks()[0]) + [efermi], [str(l) for l in plt.yticks()[0]] + [r'$E_{fermi}$'])\n\n try:\n kpx, kpl = get_kp_labels(bands_node, kpoints_node)\n plt.xticks(kpx, kpl)\n plt.vlines(kpx, plt.ylim()[0], plt.ylim()[1])\n except Exception: # pylint: disable=broad-except\n pass\n\n plt.ylabel('Dispersion')\n plt.suptitle(title)\n return fig\n\n\ndef plot_bands(bands_node, **kwargs):\n \"\"\"Plot a bandstructure node using matplotlib.\"\"\"\n import numpy as np\n\n bands = bands_node.get_bands()\n nbands, nkp, nspin = get_bs_dims(bands)\n if nspin > 0:\n allbands = np.empty((nkp, nbands * nspin))\n for i in range(nspin):\n allbands[:, i * nbands:(i + 1) * nbands] = bands[i]\n bands = allbands\n\n if 'colors' in kwargs:\n import itertools\n colors = itertools.cycle(kwargs.pop('colors'))\n for b_idx in range(bands.shape[1]):\n plt.plot(bands[:, b_idx], color=colors.next(), **kwargs) # pylint: disable=no-member\n else:\n plt.plot(bands, **kwargs)\n"
]
| [
[
"matplotlib.use",
"numpy.empty",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks"
]
]
|
kieranboyce/asl-ml-immersion | [
"097c31009af9c5b6708b45e25f742a3052e2c3d6"
]
| [
"notebooks/kubeflow_pipelines/pipelines/solutions/pipeline/helper_components.py"
]
| [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n\"\"\"Helper components.\"\"\"\n\nfrom typing import NamedTuple\n\n\ndef retrieve_best_run(\n project_id: str, job_id: str\n) -> NamedTuple(\n \"Outputs\", [(\"metric_value\", float), (\"alpha\", float), (\"max_iter\", int)]\n):\n \"\"\"Retrieves the parameters of the best Hypertune run.\"\"\"\n\n # pylint: disable-next=import-outside-toplevel\n from googleapiclient import discovery, errors\n\n ml = discovery.build(\"ml\", \"v1\")\n\n job_name = f\"projects/{project_id}/jobs/{job_id}\"\n request = ml.projects().jobs().get(name=job_name)\n\n try:\n response = request.execute()\n except errors.HttpError as err:\n print(err)\n\n print(response)\n\n best_trial = response[\"trainingOutput\"][\"trials\"][0]\n\n metric_value = best_trial[\"finalMetric\"][\"objectiveValue\"]\n alpha = float(best_trial[\"hyperparameters\"][\"alpha\"])\n max_iter = int(best_trial[\"hyperparameters\"][\"max_iter\"])\n\n return (metric_value, alpha, max_iter)\n\n\ndef evaluate_model(\n dataset_path: str, model_path: str, metric_name: str\n) -> NamedTuple(\n \"Outputs\",\n [\n (\"metric_name\", str),\n (\"metric_value\", float),\n (\"mlpipeline_metrics\", \"Metrics\"),\n ],\n):\n \"\"\"Evaluates a trained sklearn model.\"\"\"\n\n # pylint: disable=import-outside-toplevel\n import json\n import pickle\n import subprocess\n import sys\n\n import pandas as pd\n from sklearn.metrics import accuracy_score, recall_score\n\n df_test = pd.read_csv(dataset_path)\n\n X_test = df_test.drop(\"Cover_Type\", axis=1) # pylint: disable=invalid-name\n y_test = df_test[\"Cover_Type\"]\n\n # Copy the model from GCS\n model_filename = \"model.pkl\"\n gcs_model_filepath = f\"{model_path}/{model_filename}\"\n print(gcs_model_filepath)\n subprocess.check_call(\n [\"gsutil\", \"cp\", gcs_model_filepath, model_filename], stderr=sys.stdout\n )\n\n with open(model_filename, \"rb\") as model_file:\n model = pickle.load(model_file)\n\n y_hat = model.predict(X_test)\n\n if metric_name == \"accuracy\":\n metric_value = accuracy_score(y_test, y_hat)\n elif metric_name == \"recall\":\n metric_value = recall_score(y_test, y_hat)\n else:\n metric_name = \"N/A\"\n metric_value = 0\n\n # Export the metric\n metrics = {\n \"metrics\": [{\"name\": metric_name, \"numberValue\": float(metric_value)}]\n }\n\n return (metric_name, metric_value, json.dumps(metrics))\n"
]
| [
[
"pandas.read_csv",
"sklearn.metrics.recall_score",
"sklearn.metrics.accuracy_score"
]
]
|
ramseylab/cerenkov3 | [
"c7746d80516bc847d0355cda09f5407291e0f952"
]
| [
"cerenkov3_data/util_uscs.py"
]
| [
"import pandas as pd\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.engine.url import URL\nfrom sqlalchemy.pool import NullPool\n\n_REGULAR_CHR = {\"chr{}\".format(x) for x in (list(range(1, 23)) + [\"X\", \"Y\"])} # range(1,23) = 1,2,...,22\n\n\nclass GenomeBrowserClient:\n # If you are going to use the `local_hg19` configuration, make sure you have created such a user in MySQL:\n # For MySQL 5.6\n # GRANT SELECT PRIVILEGES ON hg19.* To 'bud'@'localhost' IDENTIFIED BY 'earth';\n # GRANT SELECT PRIVILEGES ON hgmd_pro.* To 'bud'@'localhost' IDENTIFIED BY 'earth';\n # For MySQL 5.7\n # CREATE USER 'bud'@'localhost' IDENTIFIED BY 'earth';\n # GRANT SELECT ON hg19.* TO 'bud'@'localhost';\n # GRANT SELECT ON hgmd_pro.* TO 'bud'@'localhost';\n # FLUSH PRIVILEGES;\n __db_url = dict(\n local_hg19=dict(\n drivername='mysql+pymysql',\n host='localhost',\n port='3306',\n username='bud',\n password='earth',\n database='hg19',\n query={'charset': 'utf8'}\n ),\n\n remote_hg19=dict(\n drivername='mysql+pymysql',\n host='genome-mysql.cse.ucsc.edu',\n port='3306',\n username='genome',\n password='',\n database='hg19',\n query={'charset': 'utf8'}\n ),\n )\n\n def __init__(self, config_key):\n # For `poolclass`, see http://stackoverflow.com/a/8705750\n self.db = create_engine(URL(**GenomeBrowserClient.__db_url[config_key]), poolclass=NullPool)\n self.conn = self.db.connect()\n\n # Subtraction between integer values, where one is of type UNSIGNED, produces an unsigned result by default.\n # If the difference is negative, an error results because it must be unsigned.\n # Coordinates are unsigned int. We'll use subtraction between coordinates to get TSS distances,\n # so we must enable this mode.\n self.conn.execute(\"SET sql_mode = 'NO_UNSIGNED_SUBTRACTION'\")\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.conn.close()\n self.db.dispose()\n\n def select_tfbs(self, rsid):\n snps = \", \".join(\"'{}'\".format(x) for x in rsid)\n chroms = \", \".join(\"'{}'\".format(x) for x in _REGULAR_CHR)\n clazz = \"'single'\"\n\n query = (\"SELECT s.name, GROUP_CONCAT(tf.name) as tfName \"\n \"FROM snp146 as s \"\n \"LEFT OUTER JOIN wgEncodeRegTfbsClusteredV3 as tf \"\n \"ON tf.bin = s.bin \"\n \" AND s.chromStart BETWEEN tf.chromStart AND tf.chromEnd - 1 \"\n \" AND tf.chrom = s.chrom \"\n \"WHERE s.name IN ({snps}) AND \"\n \" s.chrom IN ({chroms}) AND \"\n \" s.class = {clazz} \"\n \"GROUP BY s.name\".format(snps=snps, chroms=chroms, clazz=clazz))\n\n rows = self.conn.execute(query)\n\n df = pd.DataFrame(rows.fetchall())\n df.columns = rows.keys()\n\n return df\n\n\ndef binary_encode_tfbs(dfm, target_colname=\"tfName\", value_sep=',', dest_colname_prefix=None):\n \"\"\"\n Binary-encode categorical column `target_colname` separated by `value_sep`\n in data frame `dfm` to multiple binary columns with the same prefix `dest_colname_prefix`\n\n MySQL returns `GROUP_CONCAT(tf.name)` in `tfName` column in a comma-separated string.\n E.g. `ARID3A,ATF1,ATF2` stands for 3 TFs\n This function separates this string by commas and 3 new columns,\n `tf_ARID3A`, `tf_ATF1` and `tf_ATF2` would be 1 for this SNP\n\n :param dfm: the data frame\n :param target_colname: the name of the categorical column whose values would be encoded\n :param value_sep: the separator of the categorical values\n :param dest_colname_prefix: the prefix of the binary columns after one-hot encoding\n :return: the binary encoded dataframe\n \"\"\"\n dummies = dfm.loc[:, target_colname].str.get_dummies(sep=value_sep)\n\n if dest_colname_prefix is not None:\n # Add a prefix to all column names\n dummies = dummies.add_prefix(dest_colname_prefix)\n\n dfm = pd.concat([dfm, dummies], axis=1).drop(target_colname, axis=1)\n\n return dfm\n"
]
| [
[
"pandas.concat"
]
]
|
Addy81/BroadWork | [
"9af5316b52154f389f6e84078efc9d37c41cdba4"
]
| [
"bin/vcf_compare_GQ_test.py"
]
| [
"#!/usr/bin/python3\n#\n#\n#\n#\n# Adriana Toutoudaki (September 2019) contact: [email protected]\n\nimport vcf\nimport os, sys\nimport numpy as np\nfrom vcf.utils import walk_together\n\nclass Comparison:\n\n def __init__(self):\n self.matches = 0\n self.diff_metrics = 0\n self.diff = 0\n self.no_format_count = 0\n self.no_formats = []\n self.truth_GQ = 0\n self.dev_GQ = 0\n self.diff_GQ = 0\n \n def total_count(self):\n \"\"\" Returns total count of calls per vcf.\"\"\"\n self.tc = self.matches + self.diff + self.no_format_count +self.diff_metrics\n\n return self.tc\n\n def get_stats(self):\n \"\"\"\n Calculates the percentage of calls with different depth and different records.\n Returns those values.\n \"\"\"\n depth_stats = (self.diff_metrics / self.tc) * 100\n diff_stats = (self.diff/ self.tc) * 100\n\n return depth_stats,diff_stats\n\n def output_no_format(self):\n \"\"\" Currently not used in the code below\"\"\"\n \n with open('reads_missing_format_field.txt','w+') as f:\n header_line = '\\t'.join(['Chrom','Pos','Ref','Alt','INFO'])\n f.write(header_line)\n\n for read in self.no_formats:\n line = '\\t'.join([read[0].CHROM,str(read[0].POS),read[0].REF,read[0].ALT])\n f.write(line)\n\n def print_metrics(self,depth_stats,diff_stats):\n \"\"\" Prints summary of differences.\"\"\"\n print ('Total\\tDiffDP\\tDiffRec')\n print('{}\\t{}\\t{}'.format(self.tc,self.diff_metrics,self.diff))\n print('{}\\t{}\\t{}'.format('100%',depth_stats,diff_stats))\n print ('\\nGQ<=20\\tTruth\\tDev')\n print ('\\t{}\\t{}'.format(self.truth_GQ,self.dev_GQ))\n print ('Different GQ:{} or {}%'.format(self.diff_GQ, (self.diff_GQ/self.tc)*100))\n\ndef calculate_hist(lst,bin_edges):\n \"\"\"\n param1: list of DP differences or DP % change gathered during comparison\n param2: desired bin edges, for DP difference histogram [1,2,3,4,5,6,7,8,9,max(DP_difference_list)] \n for % difference histogram [0,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0,max(percent_difference_list)])\n prints: Histogram, bin edges and % off calls in each bin. \n \"\"\"\n abs_lst = [abs(x) for x in lst]\n\n #Print the outliers to emphasise the contents of the last bin\n outliers = []\n for x in abs_lst:\n if x >8:\n outliers.append(x)\n \n if max(abs_lst) < 8: \n counts, bin_edges = np.histogram(abs_lst)\n per_lst = [round(count*100/sum(counts),2) for count in counts]\n else:\n counts, bin_edges = np.histogram(abs_lst,bins=bin_edges)\n per_lst = [round(count*100/sum(counts),2) for count in counts]\n \n print ('\\nHistogram:', counts)\n print ('Bins:',bin_edges)\n print ('Percentage of values in each bin:', per_lst)\n \n print ('Values in the last bin:',outliers)\n\ndef main():\n\n\n #dev_vcf = '/Users/atoutoud/Projects/testCompare/data/NA12891.dev_short.vcf'\n #truth_vcf = '/Users/atoutoud/Projects/testCompare/data/NA12891.truth_short.vcf'\n\n truth_vcf = sys.argv[1]\n dev_vcf = sys.argv[2]\n\n #dev_reader= vcf.Reader(open(dev_vcf, 'r'))\n #truth_reader = vcf.Reader(open(truth_vcf,'r'))\n\n print ('Sample Comparison')\n dev_reader= vcf.Reader(filename=dev_vcf)\n truth_reader = vcf.Reader(filename=truth_vcf)\n\n #Checks if a filename is provided. pyVCF looks for the filename in the header line, for replicates of the same sample with different filenames\n # the correct ones should be provided otherwise it will fail.\n if len(sys.argv) == 4:\n sample = sys.argv[3]\n else:\n sample = os.path.basename(truth_vcf).split(os.extsep)[0]\n \n print (sample)\n summary = Comparison()\n records_dont_match = []\n call_difference = []\n percent_difference = []\n DP_range = []\n\n #Walk_together is a pyVCF inbuilt function to read two vcfs at the same time.\n for dev_rec,truth_rec in walk_together(dev_reader,truth_reader):\n # A record corresponds to [CHROM,POS,REF,ALT], if the same it checks the metrics differences.\n if dev_rec == truth_rec:\n try:\n #If the DP is different between the records.\n if dev_rec.genotype(sample)['DP'] != truth_rec.genotype(sample)['DP']:\n summary.diff_metrics +=1 \n #count_metrics += 1\n print('')\n print (dev_rec.CHROM,dev_rec.POS, dev_rec.REF,dev_rec.ALT,dev_rec.QUAL)\n print ('--------------------------------------------------------------')\n print ('\\t'.join(dev_rec.FORMAT.split(':')))\n \n for entry in truth_rec.genotype(sample).data:\n print (entry,end = '\\t')\n print('')\n \n for entry in dev_rec.genotype(sample).data:\n print (entry,end='\\t')\n \n true_DP = truth_rec.genotype(sample)['DP']\n test_DP = dev_rec.genotype(sample)['DP']\n DP_range.append(true_DP)\n DP_range.append(test_DP)\n\n if true_DP == 0:\n difference = 0 # had to set, as the % different calculation divides by the true_DP, so if that is 0 it breaks\n else:\n difference = round(abs((test_DP-true_DP)/true_DP*100),4)\n \n percent_difference.append(difference)\n\n DP_diff = true_DP-test_DP\n call_difference.append(DP_diff)\n \n print ('\\nDP difference {}'.format(DP_diff))\n print (difference,'%')\n print ('')\n\n if dev_rec.genotype(sample)['GQ'] <= 20:\n summary.dev_GQ +=1\n elif truth_rec.genotype(sample)['GQ'] <= 20:\n summary.truth_GQ +=1\n\n else: \n summary.matches +=1\n \n if dev_rec.genotype(sample)['GQ'] != truth_rec.genotype(sample)['GQ']:\n summary.diff_GQ +=1\n \n #\n except AttributeError: \n summary.no_format_count +=1\n summary.no_formats.append([dev_rec,dev_rec.INFO])\n print ('No format fields {} at position:{}'.format(dev_rec.CHROM,dev_rec.POS))\n \n else:\n #count_no_match +=1\n summary.diff +=1\n \n #Stores the different values so they can be explorted all together at the end. \n if truth_rec is None:\n records_dont_match.append({\"truth\":(truth_rec),\"dev\":(dev_rec.CHROM,dev_rec.POS,dev_rec.REF,dev_rec.ALT)})\n elif dev_rec is None:\n records_dont_match.append({\"truth\":(truth_rec.CHROM,truth_rec.POS,truth_rec.REF,truth_rec.ALT),\"dev\":(dev_rec)})\n else:\n records_dont_match.append({\"truth\":(truth_rec.CHROM,truth_rec.POS,truth_rec.REF,truth_rec.ALT),\"dev\":(dev_rec.CHROM,dev_rec.POS,dev_rec.REF,dev_rec.ALT)})\n #print ('** Records do not match **',dev_rec,truth_rec)\n\n\n summary.total_count()\n\n stats1,stats2 = summary.get_stats()\n #summary.output_no_format()\n \n #Prints the summary metrics for the entirety of the vcf files\n summary.print_metrics(round(stats1,4),round(stats2,4))\n\n print (\"\\nRecords that didn't match first is truth, second is dev\")\n for i in records_dont_match:\n print (i)\n\n #Outputs histogram values for DP difference and percent difference.\n calculate_hist(call_difference,[1,2,3,4,5,6,7,8,9,max(call_difference)])\n print ('***Please note last bin contains values of difference greater than 8 calls***')\n calculate_hist(percent_difference,[0,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0,max(percent_difference)])\n print ('***Please note last bin contains entities with a percent change greater than 5% ***')\n\n\n print ('\\nDP range:',min(DP_range),max(DP_range))\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.histogram"
]
]
|
mbursa/sim5 | [
"b2f244799b64540de10333eecb631aa316af845b"
]
| [
"python/sim5diskspectrum.py"
]
| [
"# Spectral models for local emission of accretion disks.\n#\n# Module provides classes that define spectral model for radiation\n# emerging from an accretion disk.\n# \n# This file is a part of SIM5 library. \n# See README and LICENCE file for details.\n\n\nfrom __future__ import division\nimport sys\nimport numpy as np\n\n\n\nclass DiskSpectrum:\n def __init__(self):\n pass\n #end of def\n\n\n def spectrum(self, T, m, f, E):\n \"\"\"\n Computes spectrum for specified parameters by interpolating TLUSTY\n spectra on given energy grid.\n\n Makes interpolation on the parameter grid and returns weighted spectrum\n projected onto a given energy grid `E` for emission angle `m`.\n\n If [T,S,Q] point lies outside the spectral grid, a failover black-body spectrum\n is returned with hardening factor f.\n\n Args:\n T: temperature [K]\n m: cosine of emission angle\n f: hardening factor to be used in case of black-body failover\n E: array of energies to which to project the spectrum [keV]\n Returns:\n specific intensity grid [erg/cm2/s/keV/srad]\n \"\"\"\n\n return np.zeros(len(E))\n #end of def\n#end of class\n\n\n\nclass DiskSpectrum_BlackBody(DiskSpectrum):\n def __init__(self):\n pass\n #end of def\n\n\n def spectrum(self, T, m, f, E):\n \"\"\"\n Computes spectrum for specified parameters by evaluating Planck formula\n for given temperature.\n\n Returns specific intensity Iv of a black body using Planck formula. The spectrum\n is modifies by a hardening factor, which redistributes photons from softer\n to higher energies while keeping the total flux, and by limb-darkening effect,\n which redistributed photons between emission angles (more emission along surface normal).\n\n Args:\n T: temperature [K]\n m: cosine of emission angle (if m<0 then isotropic emission is assumed)\n f: hardening factor\n E: array of energies to which to project the spectrum [keV]\n Returns:\n specific intensity [erg/cm2/s/keV/srad]\n \"\"\"\n\n planck_h = 6.626069e-27 # Planck constant [erg.s]\n kev2freq = 2.417990e+17 # keV->Hz (1e3*electronvolt/planck_h)\n speed_of_light2 = 8.987554e+20 # square of speed of light [cm^2/s^2]\n boltzmann_k = 1.380650e-16 # Boltzmann constant [erg/K]\n\n if (T < 1e2): return np.zeros(len(E))\n # make spectrum using black-body formula with hardening factor & limb darkening\n limbf = 0.5+0.75*m if (m>=0.0) else 1.0\n\n # calc Planck spectrum in units [erg/cm^2/s/keV/srad]\n with np.errstate(over='ignore'):\n Iv = limbf*2.0*planck_h*(kev2freq*E)**3/speed_of_light2/(f**4) * 1./(np.exp((planck_h*kev2freq*E)/(boltzmann_k*f*T))-1.0) * kev2freq\n\n return Iv\n #end of def\n#end of class\n\n\n\n\n"
]
| [
[
"numpy.exp",
"numpy.errstate"
]
]
|
ujos89/DualRL | [
"0833c0885a29de477f3af6c5f9b871cca64c068a"
]
| [
"utils/vocab.py"
]
| [
"\"\"\"Standalone script to generate word vocabularies from monolingual corpus.\"\"\"\n\nimport argparse\n\nfrom utils import constants\nfrom opennmt import tokenizers\nfrom opennmt import utils\nimport tensorflow as tf\n\n\ndef build_vocab_from_file(src_file, save_path, min_frequency=5, size=0, without_sequence_tokens=False):\n \"\"\"\n Generate word vocabularies from monolingual corpus.\n :param src_file: Source text file.\n :param save_path: Output vocabulary file.\n :param min_frequency: Minimum word frequency. # for yelp and amazon, min_frequency=5\n :param size: Maximum vocabulary size. If = 0, do not limit vocabulary.\n :param without_sequence_tokens: If set, do not add special sequence tokens (start, end) in the vocabulary.\n :return: No return.\n \"\"\"\n\n special_tokens = [constants.PADDING_TOKEN]\n if not without_sequence_tokens:\n special_tokens.append(constants.START_OF_SENTENCE_TOKEN)\n special_tokens.append(constants.END_OF_SENTENCE_TOKEN)\n\n vocab = utils.Vocab(special_tokens=special_tokens)\n if isinstance(src_file, list):\n for data_file in src_file:\n vocab.add_from_text(data_file)\n else:\n vocab.add_from_text(src_file)\n vocab = vocab.prune(max_size=size, min_frequency=min_frequency)\n vocab.serialize(save_path)\n\n\ndef load_vocab(vocab_file):\n \"\"\"Returns a lookup table and the vocabulary size.\"\"\"\n\n def count_lines(filename):\n \"\"\"Returns the number of lines of the file :obj:`filename`.\"\"\"\n with open(filename, \"rb\") as f:\n i = 0\n for i, _ in enumerate(f):\n pass\n return i + 1\n\n vocab_size = count_lines(vocab_file) + 1 # Add UNK.\n vocab = tf.contrib.lookup.index_table_from_file(\n vocab_file,\n vocab_size=vocab_size - 1,\n num_oov_buckets=1)\n return vocab, vocab_size\n\n\ndef load_vocab_dict(vocab_file):\n \"\"\"Returns a dictionary and the vocabulary size.\"\"\"\n\n def count_lines(filename):\n \"\"\"Returns the number of lines of the file :obj:`filename`.\"\"\"\n with open(filename, \"rb\") as f:\n i = 0\n for i, _ in enumerate(f):\n pass\n return i + 1\n\n # vocab_size = count_lines(vocab_file) + 1 # Add UNK.\n\n vocab_dict = {}\n vocab_size = 0\n with open(vocab_file) as f:\n for line in f:\n word = line.strip()\n vocab_dict[word] = vocab_size\n vocab_size += 1\n vocab_dict[constants.UNKNOWN_TOKEN] = vocab_size\n vocab_size += 1\n return vocab_dict, vocab_size\n\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n \"data\", nargs=\"+\",\n help=\"Source text file.\")\n parser.add_argument(\n \"--save_vocab\", required=True,\n help=\"Output vocabulary file.\")\n parser.add_argument(\n \"--min_frequency\", type=int, default=1,\n help=\"Minimum word frequency.\")\n parser.add_argument(\n \"--size\", type=int, default=0,\n help=\"Maximum vocabulary size. If = 0, do not limit vocabulary.\")\n parser.add_argument(\n \"--without_sequence_tokens\", default=False, action=\"store_true\",\n help=\"If set, do not add special sequence tokens (start, end) in the vocabulary.\")\n tokenizers.add_command_line_arguments(parser)\n args = parser.parse_args()\n\n tokenizer = tokenizers.build_tokenizer(args)\n\n special_tokens = [constants.PADDING_TOKEN]\n if not args.without_sequence_tokens:\n special_tokens.append(constants.START_OF_SENTENCE_TOKEN)\n special_tokens.append(constants.END_OF_SENTENCE_TOKEN)\n\n vocab = utils.Vocab(special_tokens=special_tokens)\n for data_file in args.data:\n vocab.add_from_text(data_file, tokenizer=tokenizer)\n vocab = vocab.prune(max_size=args.size, min_frequency=args.min_frequency)\n vocab.serialize(args.save_vocab)\n\n\ndef test_vocab():\n import tensorflow as tf\n import numpy as np\n import os\n from common_options import load_common_arguments\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n # Load global vocab\n args = load_common_arguments()\n global_vocab, global_vocab_size = load_vocab(args.global_vocab_file)\n\n vocab, vocab_size = load_vocab_dict(args.global_vocab_file)\n\n assert global_vocab_size == vocab_size\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.tables_initializer())\n i = 0\n ks = vocab.keys()\n vs = vocab.values()\n\n v1 = sess.run(global_vocab.lookup(tf.convert_to_tensor(ks)))\n for i in range(len(vs)):\n assert vs[i] == v1[i]\n\n\nif __name__ == \"__main__\":\n main()\n test_vocab()\n"
]
| [
[
"tensorflow.convert_to_tensor",
"tensorflow.Session",
"tensorflow.tables_initializer",
"tensorflow.contrib.lookup.index_table_from_file",
"tensorflow.global_variables_initializer"
]
]
|
manisharmagarg/qymatix | [
"0dc240970359429ae5105db79f9aebf1a99ba6fd"
]
| [
"api/qymatix/analytics/new_insights.py"
]
| [
"import datetime\nimport logging\n\nimport numpy as np\n\nfrom api.infrastructure.mysql import connection\nfrom api.qymatix import results\nfrom api.qymatix.analytics.performance_analytics import kam\nfrom api.qymatix.analytics.performance_analytics import multiparam\nfrom api.qymatix.analytics.sales_analytics import sales\n\nlogger = logging.getLogger(__name__)\n\n\ndef getInsights(dbname, account='all', raw=False, local=False, dbusername='', passwd='', username=''):\n ''' Reads result's database, manipulate the data and returns it.\n '''\n\n dbname = 'data_{}'.format(dbname)\n dbname_results = dbname\n dbname_tasks = dbname.replace('tasks', 'data')\n\n # username = ''\n # passwd = ''\n\n data = dict()\n\n # try:\n # account = account.decode('utf-8')\n # except:\n # pass\n # account = account.encode('latin-1')\n\n try:\n mysql_connection = connection.MySQLConnection(dbname_results)\n con = mysql_connection.connect()\n cur = con.cursor()\n\n if account == 'all':\n data['plans per account'] = kam.plansPerAccount(cur, username=username)\n data['actions per account'] = kam.actionsPerAccount(cur, username=username)\n\n data['activity goals'] = kam.activityGoals(cur, account=account, username=username)\n data['total sales plans'] = kam.totalSalesPlans(cur, account=account, username=username)\n data['total plan goals'] = kam.totalPlanGoals(cur, account=account, username=username)\n\n data['actions per day'] = kam.actionsPerDay(cur, account=account, username=username)\n data['actions per month'] = kam.actionsPerMonth(cur, account=account, username=username)\n data['actions per year'] = kam.actionsPerYear(cur, account=account, username=username)\n\n data['goals per quarter'] = kam.goalsPerQuarter(cur, account=account, username=username)\n data['total calls goal'] = kam.totalCallsGoal(cur, account=account, username=username)\n data['total visits goal'] = kam.totalVisitsGoal(cur, account=account, username=username)\n data['total offers goal'] = kam.totalOffersGoal(cur, account=account, username=username)\n\n month = str(datetime.datetime.now().month)\n try:\n data['actions this month'] = data['actions per month'][month]\n except:\n data['actions this month'] = 0\n\n data['actions QTD'] = kam.actionsQTD(cur, account=account, username=username)\n data['actions MTD'] = kam.actionsMTD(cur, account=account, username=username)\n data['actions YTD'] = kam.actionsYTD(cur, account=account, username=username)\n\n today = str(datetime.datetime.now()).split(\" \")[0]\n firstday = str(datetime.date(datetime.datetime.now().year, 1, 1))\n wd = np.busday_count(firstday, today) * 1.0\n data['actions YTD date ratio'] = round(data['actions YTD'] / wd, 2)\n\n except Exception as e:\n print(e)\n # print(\"!!!!>>>>>\")\n # print(\"Error {0}: {1}\".format(e.args[0], e.args[1]))\n\n if account == 'all':\n data['plans per account'] = \"{}\"\n data['actions per account'] = \"{}\"\n\n data['activity goals'] = 0\n data['total sales plans'] = 0\n data['total plan goals'] = 0\n data['actions per day'] = 0\n data['actions per month'] = 0\n data['actions per year'] = 0\n data['goals per quarter'] = 0\n data['total calls goal'] = 0\n data['total visits goal'] = 0\n data['total offers goal'] = 0\n data['actions this month'] = 0\n data['actions QTD'] = 0\n data['actions MTD'] = 0\n data['actions YTD'] = 0\n data['actions YTD date ratio'] = 0\n\n raise\n\n finally:\n try:\n con.close()\n except:\n print('No Db connection possible')\n pass\n\n try:\n mysql_connection = connection.MySQLConnection(dbname_tasks)\n con = mysql_connection.connect()\n cur = con.cursor()\n\n today = datetime.datetime.now()\n\n # list of all accounts\n if account == 'all':\n data['accounts'] = kam.accounts(cur, username)\n # active accounts and sales in the las 3 months\n data['active accounts'] = kam.activeAccounts(cur, username=username)\n hoy = datetime.datetime.now()\n _tmb = datetime.datetime(year=hoy.year, month=hoy.month, day=hoy.day)\n try:\n data['active accounts growth'] = 100. * (len(data['active accounts'].keys()) / len(\n kam.activeAccounts(cur, account, username, today=_tmb).keys()) - 1)\n except:\n data['active accounts growth'] = 0\n\n data['lost accounts'] = [a for a in data['accounts'] if a not in data['active accounts'].keys()]\n try:\n data['actions-accounts ratio'] = round(float(data['actions YTD']) / len(data['accounts']), 2)\n except:\n data['actions-accounts ratio'] = 0.0\n try:\n data['actions-active accounts ratio'] = round(\n float(data['actions YTD']) / len(data['active accounts'].keys()), 2)\n except:\n data['actions-active accounts ratio'] = 0.0\n try:\n data['penetration ratio'] = round(\n 100 * float(len(data['active accounts'].keys())) / len(data['accounts']), 2)\n except:\n data['penetration ratio'] = 0.0\n\n try:\n data['sales YTD'] = round(sales.salesYTD(cur, account=account, username=username), 2)\n except:\n data['sales YTD'] = 0.0\n try:\n data['margin YTD'] = round(sales.salesYTD(cur, param='margin', account=account, username=username), 2)\n except:\n data['margin YTD'] = 0.0\n try:\n data['sales QTD'] = round(sales.salesQTD(cur, year=today.year, account=account, username=username), 2)\n except:\n data['sales QTD'] = 0.0\n try:\n data['margin QTD'] = round(\n sales.salesQTD(cur, param='margin', year=today.year, account=account, username=username), 2)\n except:\n data['margin QTD'] = 0.0\n try:\n data['sales MTD'] = round(sales.salesMTD(cur, account=account, username=username), 2)\n except:\n data['sales MTD'] = 0.0\n data['sales per quarter'] = sales.salesPerQuarter(cur, param='price', year=today.year, account=account,\n username=username)\n data['margin per quarter'] = sales.salesPerQuarter(cur, param='margin', year=today.year, account=account,\n username=username)\n\n data['monthly sales'] = multiparam.monthlyParam(cur, param='price', year=today.year, account=account,\n username=username)\n data['monthly sales last year'] = multiparam.monthlyParam(cur, param='price', year=today.year - 1,\n account=account, username=username)\n\n data['monthly margin'] = multiparam.monthlyParam(cur, param='margin', year=today.year, account=account,\n username=username)\n data['monthly margin last year'] = multiparam.monthlyParam(cur, param='margin', year=today.year - 1,\n account=account, username=username)\n\n s = 0\n for d in data['monthly sales last year']:\n s += d['sales']\n data['sales last year'] = round(s, 2)\n\n try:\n data['sales growth YTD'] = round(100 * data['sales YTD'] / data['sales last year'], 0)\n except:\n data['sales growth YTD'] = 0.0\n\n s = 0\n if today.month > 1:\n try:\n data['sales growth month'] = round(\n data['monthly sales'][today.month] / data['monthly sales'][today.month - 1], 2)\n except:\n data['sales growth month'] = 0.0\n else:\n for l in data['monthly sales last year']:\n if l['month'] == 12:\n sb = l['sales']\n for l in data['monthly sales']:\n if l['month'] == 12:\n cs = l['sales']\n try:\n data['sales growth month'] = round(cs / sb, 2)\n except:\n data['sales growth month'] = 0.0\n\n s = 0\n for d in data['monthly margin last year']:\n s += d['margin']\n data['margin last year'] = round(s, 2)\n\n try:\n data['margin growth YTD'] = round(100 * data['margin YTD'] / data['margin last year'], 0)\n except:\n data['margin growth YTD'] = 0.0\n\n s = 0\n if today.month > 1:\n try:\n data['margin growth month'] = round(\n data['monthly margin'][today.month] / data['monthly margin'][today.month - 1], 2)\n except:\n data['margin growth month'] = 0.0\n else:\n for l in data['monthly margin last year']:\n if l['month'] == 12:\n sb = l['margin']\n for l in data['monthly margin']:\n if l['month'] == 12:\n cs = l['margin']\n try:\n data['margin growth month'] = round(cs / sb, 2)\n except:\n data['margin growth month'] = 0.0\n\n # SALES\n currentQuarter = (today.month - 1) // 3 + 1\n salesCurrentQuarter = data['sales per quarter'][currentQuarter]\n if currentQuarter == 1:\n salesLastQuarter = round(\n sales.salesPerQuarter(cur, year=today.year - 1, param='price', account=account, username=username)[4],\n 2)\n else:\n salesLastQuarter = round(data['sales per quarter'][currentQuarter - 1], 2)\n\n try:\n data['sales growth QTD'] = round(100 * salesCurrentQuarter / salesLastQuarter, 2)\n except:\n data['sales growth QTD'] = 0.0\n\n # MARGIN\n currentQuarter = (today.month - 1) // 3 + 1\n marginCurrentQuarter = data['margin per quarter'][currentQuarter]\n if currentQuarter == 1:\n marginLastQuarter = round(\n sales.salesPerQuarter(cur, year=today.year - 1, param='margin', account=account, username=username)[4],\n 2)\n else:\n marginLastQuarter = round(data['margin per quarter'][currentQuarter - 1], 2)\n\n try:\n data['margin growth QTD'] = round(100 * marginCurrentQuarter / marginLastQuarter, 2)\n except:\n data['margin growth QTD'] = 0.0\n\n # PIPELINE\n\n data['pipelines'] = sales._pipelines(dbname, cur)\n\n except Exception as e:\n raise\n # print(\"Error {0}: {1}\".format(e.args[0], e.args[1]))\n # data = {}\n data['sales YTD'] = 0\n data['margin YTD'] = 0\n data['sales QTD'] = 0\n data['margin QTD'] = 0\n data['sales MTD'] = 0\n data['sales per quarter'] = 0\n data['margin per quarter'] = 0\n data['monthly sales'] = 0\n data['monthly sales last year'] = 0\n data['monthly margin'] = 0\n data['monthly margin last year'] = 0\n data['sales last year'] = 0\n data['sales growth YTD'] = 0.0\n data['sales growth month'] = 0.0\n data['margin last year'] = 0.0\n data['margin growth YTD'] = 0.0\n data['margin growth month'] = 0.0\n data['sales growth QTD'] = 0.0\n data['margin growth QTD'] = 0.0\n data['pipelines'] = 0.0\n\n finally:\n try:\n con.close()\n except:\n print('No Db connection possible')\n pass\n\n return data\n\n\ndef get_insights(dbname, account='all', raw=False, local=False, dbusername='', passwd='', username=''):\n ''' Reads result's database, manipulate the data and returns it.\n '''\n\n dbname = 'data_{}'.format(dbname)\n dbname_results = dbname\n dbname_tasks = dbname.replace('tasks', 'data')\n\n # username = ''\n # passwd = ''\n\n data = dict()\n\n # try:\n # account = account.decode('utf-8')\n # except:\n # pass\n # account = account.encode('latin-1')\n\n try:\n mysql_connection = connection.MySQLConnection(dbname_results)\n con = mysql_connection.connect()\n cur = con.cursor()\n\n if account == 'all':\n data['plans per account'] = kam.plansPerAccount(cur, username=username)\n data['actions per account'] = kam.actionsPerAccount(cur, username=username)\n\n data['activity goals'] = kam.activityGoals(cur, account=account, username=username)\n data['total sales plans'] = kam.totalSalesPlans(cur, account=account, username=username)\n data['total plan goals'] = kam.totalPlanGoals(cur, account=account, username=username)\n\n data['actions per day'] = kam.actionsPerDay(cur, account=account, username=username)\n data['actions per month'] = kam.actionsPerMonth(cur, account=account, username=username)\n data['actions per year'] = kam.actionsPerYear(cur, account=account, username=username)\n\n data['goals per quarter'] = kam.goalsPerQuarter(cur, account=account, username=username)\n data['total calls goal'] = kam.totalCallsGoal(cur, account=account, username=username)\n data['total visits goal'] = kam.totalVisitsGoal(cur, account=account, username=username)\n data['total offers goal'] = kam.totalOffersGoal(cur, account=account, username=username)\n\n month = str(datetime.datetime.now().month)\n try:\n data['actions this month'] = data['actions per month'][month]\n except:\n data['actions this month'] = 0\n\n data['actions QTD'] = kam.actionsQTD(cur, account=account, username=username)\n data['actions MTD'] = kam.actionsMTD(cur, account=account, username=username)\n data['actions YTD'] = kam.actionsYTD(cur, account=account, username=username)\n\n today = str(datetime.datetime.now()).split(\" \")[0]\n firstday = str(datetime.date(datetime.datetime.now().year, 1, 1))\n wd = np.busday_count(firstday, today) * 1.0\n data['actions YTD date ratio'] = round(data['actions YTD'] / wd, 2)\n\n except Exception as e:\n print(e)\n # print(\"!!!!>>>>>\")\n # print(\"Error {0}: {1}\".format(e.args[0], e.args[1]))\n\n if account == 'all':\n data['plans per account'] = \"{}\"\n data['actions per account'] = \"{}\"\n\n data['activity goals'] = 0\n data['total sales plans'] = 0\n data['total plan goals'] = 0\n data['actions per day'] = 0\n data['actions per month'] = 0\n data['actions per year'] = 0\n data['goals per quarter'] = 0\n data['total calls goal'] = 0\n data['total visits goal'] = 0\n data['total offers goal'] = 0\n data['actions this month'] = 0\n data['actions QTD'] = 0\n data['actions MTD'] = 0\n data['actions YTD'] = 0\n data['actions YTD date ratio'] = 0\n\n raise\n\n finally:\n try:\n con.close()\n except:\n print('No Db connection possible')\n pass\n\n try:\n mysql_connection = connection.MySQLConnection(dbname_tasks)\n con = mysql_connection.connect()\n cur = con.cursor()\n\n today = datetime.datetime.now()\n\n # list of all accounts\n if account == 'all':\n data['accounts'] = kam.accounts(cur, username)\n data['accounts_name'] = kam.accounts_name(cur, username)\n # active accounts and sales in the las 3 months\n data['active accounts'] = kam.activeAccounts(cur, username=username)\n hoy = datetime.datetime.now()\n _tmb = datetime.datetime(year=hoy.year, month=hoy.month, day=hoy.day)\n try:\n data['active accounts growth'] = 100. * (len(data['active accounts'].keys()) / len(\n kam.activeAccounts(cur, account, username, today=_tmb).keys()) - 1)\n except:\n data['active accounts growth'] = 0\n\n data['lost accounts'] = [a for a in data['accounts'] if a not in data['active accounts'].keys()]\n try:\n data['actions-accounts ratio'] = round(float(data['actions YTD']) / len(data['accounts']), 2)\n except:\n data['actions-accounts ratio'] = 0.0\n try:\n data['actions-active accounts ratio'] = round(\n float(data['actions YTD']) / len(data['active accounts'].keys()), 2)\n except:\n data['actions-active accounts ratio'] = 0.0\n try:\n data['penetration ratio'] = round(\n 100 * float(len(data['active accounts'].keys())) / len(data['accounts']), 2)\n except:\n data['penetration ratio'] = 0.0\n\n try:\n data['sales YTD'] = round(sales.salesYTD(cur, account=account, username=username), 2)\n except:\n data['sales YTD'] = 0.0\n try:\n data['margin YTD'] = round(sales.salesYTD(cur, param='margin', account=account, username=username), 2)\n except:\n data['margin YTD'] = 0.0\n try:\n data['sales QTD'] = round(sales.salesQTD(cur, year=today.year, account=account, username=username), 2)\n except:\n data['sales QTD'] = 0.0\n try:\n data['margin QTD'] = round(\n sales.salesQTD(cur, param='margin', year=today.year, account=account, username=username), 2)\n except:\n data['margin QTD'] = 0.0\n try:\n data['sales MTD'] = round(sales.salesMTD(cur, account=account, username=username), 2)\n except:\n data['sales MTD'] = 0.0\n data['sales per quarter'] = sales.salesPerQuarter(cur, param='price', year=today.year, account=account,\n username=username)\n data['margin per quarter'] = sales.salesPerQuarter(cur, param='margin', year=today.year, account=account,\n username=username)\n\n data['monthly sales'] = multiparam.monthlyParam(cur, param='price', year=today.year, account=account,\n username=username)\n data['monthly sales last year'] = multiparam.monthlyParam(cur, param='price', year=today.year - 1,\n account=account, username=username)\n\n data['monthly margin'] = multiparam.monthlyParam(cur, param='margin', year=today.year, account=account,\n username=username)\n data['monthly margin last year'] = multiparam.monthlyParam(cur, param='margin', year=today.year - 1,\n account=account, username=username)\n\n s = 0\n for d in data['monthly sales last year']:\n s += d['sales']\n data['sales last year'] = round(s, 2)\n\n try:\n data['sales growth YTD'] = round(100 * data['sales YTD'] / data['sales last year'], 0)\n except:\n data['sales growth YTD'] = 0.0\n\n s = 0\n if today.month > 1:\n try:\n data['sales growth month'] = round(\n data['monthly sales'][today.month] / data['monthly sales'][today.month - 1], 2)\n except:\n data['sales growth month'] = 0.0\n else:\n for l in data['monthly sales last year']:\n if l['month'] == 12:\n sb = l['sales']\n for l in data['monthly sales']:\n if l['month'] == 12:\n cs = l['sales']\n try:\n data['sales growth month'] = round(cs / sb, 2)\n except:\n data['sales growth month'] = 0.0\n\n s = 0\n for d in data['monthly margin last year']:\n s += d['margin']\n data['margin last year'] = round(s, 2)\n\n try:\n data['margin growth YTD'] = round(100 * data['margin YTD'] / data['margin last year'], 0)\n except:\n data['margin growth YTD'] = 0.0\n\n s = 0\n if today.month > 1:\n try:\n data['margin growth month'] = round(\n data['monthly margin'][today.month] / data['monthly margin'][today.month - 1], 2)\n except:\n data['margin growth month'] = 0.0\n else:\n for l in data['monthly margin last year']:\n if l['month'] == 12:\n sb = l['margin']\n for l in data['monthly margin']:\n if l['month'] == 12:\n cs = l['margin']\n try:\n data['margin growth month'] = round(cs / sb, 2)\n except:\n data['margin growth month'] = 0.0\n\n # SALES\n currentQuarter = (today.month - 1) // 3 + 1\n salesCurrentQuarter = data['sales per quarter'][currentQuarter]\n if currentQuarter == 1:\n salesLastQuarter = round(\n sales.salesPerQuarter(cur, year=today.year - 1, param='price', account=account, username=username)[4],\n 2)\n else:\n salesLastQuarter = round(data['sales per quarter'][currentQuarter - 1], 2)\n\n try:\n data['sales growth QTD'] = round(100 * salesCurrentQuarter / salesLastQuarter, 2)\n except:\n data['sales growth QTD'] = 0.0\n\n # MARGIN\n currentQuarter = (today.month - 1) // 3 + 1\n marginCurrentQuarter = data['margin per quarter'][currentQuarter]\n if currentQuarter == 1:\n marginLastQuarter = round(\n sales.salesPerQuarter(cur, year=today.year - 1, param='margin', account=account, username=username)[4],\n 2)\n else:\n marginLastQuarter = round(data['margin per quarter'][currentQuarter - 1], 2)\n\n try:\n data['margin growth QTD'] = round(100 * marginCurrentQuarter / marginLastQuarter, 2)\n except:\n data['margin growth QTD'] = 0.0\n\n # PIPELINE\n\n data['pipelines'] = sales._pipelines(cur)\n\n min_sales = sales.min_values(cur, username=username)\n max_sales = sales.max_values(cur, username=username)\n min_results = results.min_values(cur, username=username)\n max_results = results.max_values(cur, username=username)\n\n data['min_values'] = {}\n data['min_values'].update(min_sales)\n data['min_values'].update(min_results)\n\n data['max_values'] = {}\n data['max_values'].update(max_sales)\n data['max_values'].update(max_results)\n\n print(data['max_values'])\n\n except Exception as e:\n raise\n # print(\"Error {0}: {1}\".format(e.args[0], e.args[1]))\n # data = {}\n data['sales YTD'] = 0\n data['margin YTD'] = 0\n data['sales QTD'] = 0\n data['margin QTD'] = 0\n data['sales MTD'] = 0\n data['sales per quarter'] = 0\n data['margin per quarter'] = 0\n data['monthly sales'] = 0\n data['monthly sales last year'] = 0\n data['monthly margin'] = 0\n data['monthly margin last year'] = 0\n data['sales last year'] = 0\n data['sales growth YTD'] = 0.0\n data['sales growth month'] = 0.0\n data['margin last year'] = 0.0\n data['margin growth YTD'] = 0.0\n data['margin growth month'] = 0.0\n data['sales growth QTD'] = 0.0\n data['margin growth QTD'] = 0.0\n data['pipelines'] = 0.0\n\n finally:\n try:\n con.close()\n except:\n print('No Db connection possible')\n pass\n\n return data\n\n\ndef getPerformance(username, account='all', raw=False, local=False, dbusername='', passwd=''):\n ''' Reads result's database, manipulate the data and returns it.\n '''\n\n dbname = 'data_{}'.format(username)\n dbname_results = dbname\n dbname_tasks = dbname.replace('tasks', 'data')\n\n # username = ''\n # passwd = ''\n\n data = dict()\n\n try:\n account = account.decode('utf-8')\n except:\n pass\n account = account.encode('latin-1')\n\n try:\n logging.debug(dbname)\n\n mysql_connection = connection.MySQLConnection(dbname_results)\n con = mysql_connection.connect()\n cur = con.cursor()\n\n if account == 'all':\n data['plans per account'] = kam.plansPerAccount(cur)\n data['actions per account'] = kam.actionsPerAccount(cur)\n\n data['activity goals'] = kam.activityGoals(cur, account=account)\n data['total sales plans'] = kam.totalSalesPlans(cur, account=account)\n data['total plan goals'] = kam.totalPlanGoals(cur, account=account)\n\n data['actions per day'] = kam.actionsPerDay(cur, account=account)\n data['actions per month'] = kam.actionsPerMonth(cur, account=account)\n data['actions per year'] = kam.actionsPerYear(cur, account=account)\n\n data['goals per quarter'] = kam.goalsPerQuarter(cur, account=account)\n data['total calls goal'] = kam.totalCallsGoal(cur, account=account)\n data['total visits goal'] = kam.totalVisitsGoal(cur, account=account)\n data['total offers goal'] = kam.totalOffersGoal(cur, account=account)\n\n month = str(datetime.datetime.now().month)\n try:\n data['actions this month'] = data['actions per month'][month]\n except:\n data['actions this month'] = 0\n\n data['actions QTD'] = kam.actionsQTD(cur, account=account)\n data['actions MTD'] = kam.actionsMTD(cur, account=account)\n data['actions YTD'] = kam.actionsYTD(cur, account=account)\n\n today = str(datetime.datetime.now()).split(\" \")[0]\n firstday = str(datetime.date(datetime.datetime.now().year, 1, 1))\n wd = np.busday_count(firstday, today) * 1.0\n data['actions YTD date ratio'] = round(data['actions YTD'] / wd, 2)\n\n except Exception as e:\n # print(\"Error {0}: {1}\".format(e.args[0], e.args[1]))\n\n if account == 'all':\n data['plans per account'] = \"{}\"\n data['actions per account'] = \"{}\"\n\n data['activity goals'] = 0\n data['total sales plans'] = 0\n data['total plan goals'] = 0\n data['actions per day'] = 0\n data['actions per month'] = 0\n data['actions per year'] = 0\n data['goals per quarter'] = 0\n data['total calls goal'] = 0\n data['total visits goal'] = 0\n data['total offers goal'] = 0\n data['actions this month'] = 0\n data['actions QTD'] = 0\n data['actions MTD'] = 0\n data['actions YTD'] = 0\n data['actions YTD date ratio'] = 0\n\n finally:\n try:\n con.close()\n except:\n print('No Db connection possible')\n pass\n\n try:\n mysql_connection = connection.MySQLConnection(dbname_tasks)\n con = mysql_connection.connect()\n cur = con.cursor()\n\n today = datetime.datetime.now()\n\n # list of all accounts\n if account == 'all':\n data['accounts'] = kam.accounts(cur)\n # active accounts and sales in the las 3 months\n data['active accounts'] = kam.activeAccounts(cur)\n hoy = datetime.datetime.now()\n _tmb = datetime.datetime(year=hoy.year, month=hoy.month, day=hoy.day)\n try:\n data['active accounts growth'] = 100. * (\n len(data['active accounts'].keys()) / len(kam.activeAccounts(cur, today=_tmb).keys()) - 1)\n except:\n data['active accounts growth'] = 0\n\n data['lost accounts'] = [a for a in data['accounts'] if a not in data['active accounts'].keys()]\n try:\n data['actions-accounts ratio'] = round(float(data['actions YTD']) / len(data['accounts']), 2)\n except:\n data['actions-accounts ratio'] = 0.0\n try:\n data['actions-active accounts ratio'] = round(\n float(data['actions YTD']) / len(data['active accounts'].keys()), 2)\n except:\n data['actions-active accounts ratio'] = 0.0\n try:\n data['penetration ratio'] = round(\n 100 * float(len(data['active accounts'].keys())) / len(data['accounts']), 2)\n except:\n data['penetration ratio'] = 0.0\n\n data['sales YTD'] = round(sales.salesYTD(cur, account=account), 2)\n data['margin YTD'] = round(sales.salesYTD(cur, param='margin', account=account), 2)\n data['sales QTD'] = round(sales.salesQTD(cur, year=today.year, account=account), 2)\n data['margin QTD'] = round(sales.salesQTD(cur, param='margin', year=today.year, account=account), 2)\n data['sales MTD'] = round(sales.salesMTD(cur, account=account), 2)\n data['sales per quarter'] = sales.salesPerQuarter(cur, param='price', year=today.year, account=account)\n data['margin per quarter'] = sales.salesPerQuarter(cur, param='margin', year=today.year, account=account)\n\n data['monthly sales'] = multiparam.monthlyParam(cur, param='price', year=today.year, account=account)\n data['monthly sales last year'] = multiparam.monthlyParam(cur, param='price', year=today.year - 1,\n account=account)\n\n data['monthly margin'] = multiparam.monthlyParam(cur, param='margin', year=today.year, account=account)\n data['monthly margin last year'] = multiparam.monthlyParam(cur, param='margin', year=today.year - 1,\n account=account)\n\n s = 0\n for d in data['monthly sales last year']:\n s += d['sales']\n data['sales last year'] = round(s, 2)\n\n try:\n data['sales growth YTD'] = round(100 * data['sales YTD'] / data['sales last year'], 0)\n except:\n data['sales growth YTD'] = 0.0\n\n s = 0\n if today.month > 1:\n try:\n data['sales growth month'] = round(\n data['monthly sales'][today.month] / data['monthly sales'][today.month - 1], 2)\n except:\n data['sales growth month'] = 0.0\n else:\n for l in data['monthly sales last year']:\n if l['month'] == 12:\n sb = l['sales']\n for l in data['monthly sales']:\n if l['month'] == 12:\n cs = l['sales']\n try:\n data['sales growth month'] = round(cs / sb, 2)\n except:\n data['sales growth month'] = 0.0\n\n s = 0\n for d in data['monthly margin last year']:\n s += d['margin']\n data['margin last year'] = round(s, 2)\n\n try:\n data['margin growth YTD'] = round(100 * data['margin YTD'] / data['margin last year'], 0)\n except:\n data['margin growth YTD'] = 0.0\n\n s = 0\n if today.month > 1:\n try:\n data['margin growth month'] = round(\n data['monthly margin'][today.month] / data['monthly margin'][today.month - 1], 2)\n except:\n data['margin growth month'] = 0.0\n else:\n for l in data['monthly margin last year']:\n if l['month'] == 12:\n sb = l['margin']\n for l in data['monthly margin']:\n if l['month'] == 12:\n cs = l['margin']\n try:\n data['margin growth month'] = round(cs / sb, 2)\n except:\n data['margin growth month'] = 0.0\n\n # SALES\n currentQuarter = (today.month - 1) // 3 + 1\n salesCurrentQuarter = data['sales per quarter'][currentQuarter]\n if currentQuarter == 1:\n salesLastQuarter = round(sales.salesPerQuarter(cur, year=today.year - 1, param='price', account=account)[4],\n 2)\n else:\n salesLastQuarter = round(data['sales per quarter'][currentQuarter - 1], 2)\n\n try:\n data['sales growth QTD'] = round(100 * salesCurrentQuarter / salesLastQuarter, 2)\n except:\n data['sales growth QTD'] = 0.0\n\n # MARGIN\n currentQuarter = (today.month - 1) // 3 + 1\n marginCurrentQuarter = data['margin per quarter'][currentQuarter]\n if currentQuarter == 1:\n marginLastQuarter = round(\n sales.salesPerQuarter(cur, year=today.year - 1, param='margin', account=account)[4], 2)\n else:\n marginLastQuarter = round(data['margin per quarter'][currentQuarter - 1], 2)\n\n try:\n data['margin growth QTD'] = round(100 * marginCurrentQuarter / marginLastQuarter, 2)\n except:\n data['margin growth QTD'] = 0.0\n\n # PIPELINE\n\n # data['pipelines'] = sales._pipelines()\n data['pipelines'] = sales.pipelines(cur, username)\n\n except Exception as e:\n # print(\"Error {0}: {1}\".format(e.args[0], e.args[1]))\n # data = {}\n data['sales YTD'] = 0\n data['margin YTD'] = 0\n data['sales QTD'] = 0\n data['margin QTD'] = 0\n data['sales MTD'] = 0\n data['sales per quarter'] = 0\n data['margin per quarter'] = 0\n data['monthly sales'] = 0\n data['monthly sales last year'] = 0\n data['monthly margin'] = 0\n data['monthly margin last year'] = 0\n data['sales last year'] = 0\n data['sales growth YTD'] = 0.0\n data['sales growth month'] = 0.0\n data['margin last year'] = 0.0\n data['margin growth YTD'] = 0.0\n data['margin growth month'] = 0.0\n data['sales growth QTD'] = 0.0\n data['margin growth QTD'] = 0.0\n data['pipelines'] = 0.0\n\n finally:\n try:\n con.close()\n except:\n print('No Db connection possible')\n pass\n\n return data\n\n\nif __name__ == \"__main__\":\n import json\n\n local = True\n\n # Tasks database name\n dbname = ''\n username = 'martinmasip'\n dbname = 'data_{}_data_test_2015_2016_copy_4_xlsx'.format(username)\n dbname = '{}_data_test_2015_2016_copy_4_xlsx'.format(username)\n\n passwd = 'Qymatix!!!'\n dbusername = 'webadmin'\n dbusername = 'webuser'\n\n username = 'martin_masip'\n username = 'qymatix_best'\n dbname = username\n\n # data = getInsights(dbname=dbname, local=local, account='Acrion', username=dbusername, passwd=passwd)\n # print(json.dumps(data))\n\n account = u'Krankenhaus Hetzelstift Neustadt/Weinstrasse'\n account = 'St\\xe4dtisches Klinikum Karlsruhe gGmbH'.decode('latin-1')\n account = 'St\\xe4dtisches Klinikum Karlsruhe gGmbH'\n account = 'Klinikum Wolfsburg'\n account = 'all'\n print(account)\n # dbname = 'coldjet_qy'\n # username = 'robert_gruen'\n dbname = 'qy___test_com'\n\n username = 'ep__mtm___ne_de'\n dbname = 'mtm___ne_de'\n dbname = 'qymatix_de'\n dbname = 'qymatix_best'\n username = 'admin'\n # username = 'chancho_babe__qymatix_best'\n account = 'all'\n\n # data = getInsights(dbname=dbname, local=False, account=account, username=dbusername, passwd=passwd)\n # data = getInsights(dbname=dbname, local=False, account=account, dbusername='webuser', username=username, passwd=passwd)\n data = get_insights(dbname=dbname, local=False, account=account, dbusername='webuser', username=username,\n passwd=passwd)\n print(data['monthly sales'])\n # print(json.dumps(data))\n data = json.dumps(data, encoding='latin-1')\n print(data)\n\n # dbname = 'demo'\n # data = getPerformance(username=dbname, local=local, account='all', dbusername=dbusername, passwd=passwd)\n # print(data)\n # print(json.dumps(data))\n"
]
| [
[
"numpy.busday_count"
]
]
|
Menosse/python-programmer | [
"101ae0449478f4d266ce8b24ae67f409268b8ac9"
]
| [
"1. Spyder python files/practice/sierpinski_triagle.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 19 10:25:58 2020\n\n@author: Fernando\n\nCreate triagles inside triagles by transforming it:\n transformation 1:\n xn+1 = 0.5xn\n yn+1 = 0.5yn\n transformation 2:\n xn+1 = 0.5xn + 0.5\n yn+1 = 0.5yn + 0.5\n transformation 3:\n xn+1 = 0.5xn + 1\n yn+1 = 0.5yn\n\"\"\"\nfrom random import choice\nfrom matplotlib import pyplot as plt\ndef trans_1(p):\n x = p[0]\n y = p[1]\n x1 = 0.5 * x\n y1 = 0.5 * y\n \n return x1, y1\n\ndef trans_2(p):\n x = p[0]\n y = p[1]\n x1 = 0.5 * x + 0.5\n y1 = 0.5 * y + 0.5\n \n return x1, y1\n\ndef trans_3(p):\n x = p[0]\n y = p[1]\n x1 = 0.5 * x + 1\n y1 = 0.5 * y\n \n return x1, y1\n\ntransform = [trans_1,trans_2,trans_3]\na1 = [0]\nb1 = [0]\na,b = 0,0\n\nfor i in range(1000000):\n trans = choice(transform)\n a,b = trans((a,b))\n a1.append(a)\n b1.append(b)\n \nplt.rc('figure', figsize=(16,16))\nplt.plot(a1,b1,'o')\nplt.savefig('my_triagle')"
]
| [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.rc"
]
]
|
bsunnquist/jwql | [
"b7623ef81b4aae3d598265b703bc0166e2c1d551"
]
| [
"jwql/website/apps/jwql/data_containers.py"
]
| [
"\"\"\"Various functions to collect data to be used by the ``views`` of the\n``jwql`` app.\n\nThis module contains several functions that assist in collecting and\nproducing various data to be rendered in ``views.py`` for use by the\n``jwql`` app.\n\nAuthors\n-------\n\n - Lauren Chambers\n - Matthew Bourque\n\nUse\n---\n\n The functions within this module are intended to be imported and\n used by ``views.py``, e.g.:\n\n ::\n from .data_containers import get_proposal_info\n\"\"\"\n\nimport copy\nimport glob\nimport os\nimport re\nimport tempfile\n\nfrom astropy.io import fits\nfrom astropy.time import Time\nfrom django.conf import settings\nimport numpy as np\n\n# astroquery.mast import that depends on value of auth_mast\n# this import has to be made before any other import of astroquery.mast\nfrom jwql.utils.utils import get_config, filename_parser, check_config\ncheck_config('auth_mast')\nauth_mast = get_config()['auth_mast']\nmast_flavour = '.'.join(auth_mast.split('.')[1:])\nfrom astropy import config\nconf = config.get_config('astroquery')\nconf['mast'] = {'server': 'https://{}'.format(mast_flavour)}\nfrom astroquery.mast import Mast\nfrom jwedb.edb_interface import mnemonic_inventory\n\nfrom jwql.database import database_interface as di\nfrom jwql.edb.engineering_database import get_mnemonic, get_mnemonic_info\nfrom jwql.instrument_monitors.miri_monitors.data_trending import dashboard as miri_dash\nfrom jwql.instrument_monitors.nirspec_monitors.data_trending import dashboard as nirspec_dash\nfrom jwql.jwql_monitors import monitor_cron_jobs\nfrom jwql.utils.utils import ensure_dir_exists\nfrom jwql.utils.constants import MONITORS, JWST_INSTRUMENT_NAMES_MIXEDCASE\nfrom jwql.utils.preview_image import PreviewImage\nfrom jwql.utils.credentials import get_mast_token\nfrom .forms import MnemonicSearchForm, MnemonicQueryForm, MnemonicExplorationForm\n\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nFILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem')\nPREVIEW_IMAGE_FILESYSTEM = os.path.join(get_config()['jwql_dir'], 'preview_images')\nTHUMBNAIL_FILESYSTEM = os.path.join(get_config()['jwql_dir'], 'thumbnails')\nPACKAGE_DIR = os.path.dirname(__location__.split('website')[0])\nREPO_DIR = os.path.split(PACKAGE_DIR)[0]\n\n\ndef data_trending():\n \"\"\"Container for Miri datatrending dashboard and components\n\n Returns\n -------\n variables : int\n nonsense\n dashboard : list\n A list containing the JavaScript and HTML content for the\n dashboard\n \"\"\"\n dashboard, variables = miri_dash.data_trending_dashboard()\n\n return variables, dashboard\n\n\ndef nirspec_trending():\n \"\"\"Container for Miri datatrending dashboard and components\n\n Returns\n -------\n variables : int\n nonsense\n dashboard : list\n A list containing the JavaScript and HTML content for the\n dashboard\n \"\"\"\n dashboard, variables = nirspec_dash.data_trending_dashboard()\n\n return variables, dashboard\n\n\ndef get_acknowledgements():\n \"\"\"Returns a list of individuals who are acknowledged on the\n ``about`` page.\n\n The list is generated by reading in the contents of the ``jwql``\n ``README`` file. In this way, the website will automatically\n update with updates to the ``README`` file.\n\n Returns\n -------\n acknowledgements : list\n A list of individuals to be acknowledged.\n \"\"\"\n\n # Locate README file\n readme_file = os.path.join(REPO_DIR, 'README.md')\n\n # Get contents of the README file\n with open(readme_file, 'r') as f:\n data = f.readlines()\n\n # Find where the acknowledgements start\n for i, line in enumerate(data):\n if 'Acknowledgments' in line:\n index = i\n\n # Parse out the list of individuals\n acknowledgements = data[index + 1:]\n acknowledgements = [item.strip().replace('- ', '').split(' [@')[0].strip()\n for item in acknowledgements]\n\n return acknowledgements\n\n\ndef get_all_proposals():\n \"\"\"Return a list of all proposals that exist in the filesystem.\n\n Returns\n -------\n proposals : list\n A list of proposal numbers for all proposals that exist in the\n filesystem\n \"\"\"\n\n proposals = glob.glob(os.path.join(FILESYSTEM_DIR, '*'))\n proposals = [proposal.split('jw')[-1] for proposal in proposals]\n proposals = [proposal for proposal in proposals if len(proposal) == 5]\n\n return proposals\n\n\ndef get_current_flagged_anomalies(rootname):\n \"\"\"Return a list of currently flagged anomalies for the given\n ``rootname``\n\n Parameters\n ----------\n rootname : str\n The rootname of interest (e.g.\n ``jw86600008001_02101_00001_guider2/``)\n\n Returns\n -------\n current_anomalies : list\n A list of currently flagged anomalies for the given ``rootname``\n (e.g. ``['snowball', 'crosstalk']``)\n \"\"\"\n\n query = di.session.query(di.Anomaly).filter(di.Anomaly.rootname == rootname).order_by(di.Anomaly.flag_date.desc()).limit(1)\n all_records = query.data_frame\n if not all_records.empty:\n current_anomalies = [col for col, val in np.sum(all_records, axis=0).items() if val]\n else:\n current_anomalies = []\n\n return current_anomalies\n\n\ndef get_dashboard_components():\n \"\"\"Build and return dictionaries containing components and html\n needed for the dashboard.\n\n Returns\n -------\n dashboard_components : dict\n A dictionary containing components needed for the dashboard.\n dashboard_html : dict\n A dictionary containing full HTML needed for the dashboard.\n \"\"\"\n\n output_dir = get_config()['outputs']\n name_dict = {'': '',\n 'monitor_mast': 'Database Monitor',\n 'monitor_filesystem': 'Filesystem Monitor'}\n\n # Run the cron job monitor to produce an updated table\n monitor_cron_jobs.status(production_mode=True)\n\n # Build dictionary of Bokeh components from files in the output directory\n dashboard_components = {}\n for dir_name, _, file_list in os.walk(output_dir):\n monitor_name = os.path.basename(dir_name)\n\n # Only continue if the dashboard knows how to build that monitor\n if monitor_name in name_dict.keys():\n formatted_monitor_name = name_dict[monitor_name]\n dashboard_components[formatted_monitor_name] = {}\n for fname in file_list:\n if 'component' in fname:\n full_fname = '{}/{}'.format(monitor_name, fname)\n plot_name = fname.split('_component')[0]\n\n # Generate formatted plot name\n formatted_plot_name = plot_name.title().replace('_', ' ')\n for lowercase, mixed_case in JWST_INSTRUMENT_NAMES_MIXEDCASE.items():\n formatted_plot_name = formatted_plot_name.replace(lowercase.capitalize(),\n mixed_case)\n formatted_plot_name = formatted_plot_name.replace('Jwst', 'JWST')\n formatted_plot_name = formatted_plot_name.replace('Caom', 'CAOM')\n\n # Get the div\n html_file = full_fname.split('.')[0] + '.html'\n with open(os.path.join(output_dir, html_file), 'r') as f:\n div = f.read()\n\n # Get the script\n js_file = full_fname.split('.')[0] + '.js'\n with open(os.path.join(output_dir, js_file), 'r') as f:\n script = f.read()\n\n # Save to dictionary\n dashboard_components[formatted_monitor_name][formatted_plot_name] = [div, script]\n\n # Add HTML that cannot be saved as components to the dictionary\n with open(os.path.join(output_dir, 'monitor_cron_jobs', 'cron_status_table.html'), 'r') as f:\n cron_status_table_html = f.read()\n dashboard_html = {}\n dashboard_html['Cron Job Monitor'] = cron_status_table_html\n\n return dashboard_components, dashboard_html\n\n\ndef get_edb_components(request):\n \"\"\"Return dictionary with content needed for the EDB page.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n Returns\n -------\n edb_components : dict\n Dictionary with the required components\n\n \"\"\"\n mnemonic_name_search_result = {}\n mnemonic_query_result = {}\n mnemonic_query_result_plot = None\n mnemonic_exploration_result = None\n\n # If this is a POST request, we need to process the form data\n if request.method == 'POST':\n\n if 'mnemonic_name_search' in request.POST.keys():\n # authenticate with astroquery.mast if necessary\n logged_in = log_into_mast(request)\n\n mnemonic_name_search_form = MnemonicSearchForm(request.POST, logged_in=logged_in,\n prefix='mnemonic_name_search')\n\n if mnemonic_name_search_form.is_valid():\n mnemonic_identifier = mnemonic_name_search_form['search'].value()\n if mnemonic_identifier is not None:\n mnemonic_name_search_result = get_mnemonic_info(mnemonic_identifier)\n\n # create forms for search fields not clicked\n mnemonic_query_form = MnemonicQueryForm(prefix='mnemonic_query')\n mnemonic_exploration_form = MnemonicExplorationForm(prefix='mnemonic_exploration')\n\n elif 'mnemonic_query' in request.POST.keys():\n # authenticate with astroquery.mast if necessary\n logged_in = log_into_mast(request)\n\n mnemonic_query_form = MnemonicQueryForm(request.POST, logged_in=logged_in,\n prefix='mnemonic_query')\n\n # proceed only if entries make sense\n if mnemonic_query_form.is_valid():\n mnemonic_identifier = mnemonic_query_form['search'].value()\n start_time = Time(mnemonic_query_form['start_time'].value(), format='iso')\n end_time = Time(mnemonic_query_form['end_time'].value(), format='iso')\n\n if mnemonic_identifier is not None:\n mnemonic_query_result = get_mnemonic(mnemonic_identifier, start_time, end_time)\n mnemonic_query_result_plot = mnemonic_query_result.bokeh_plot()\n\n # generate table download in web app\n result_table = mnemonic_query_result.data\n\n # save file locally to be available for download\n static_dir = os.path.join(settings.BASE_DIR, 'static')\n ensure_dir_exists(static_dir)\n file_name_root = 'mnemonic_query_result_table'\n file_for_download = '{}.csv'.format(file_name_root)\n path_for_download = os.path.join(static_dir, file_for_download)\n\n # add meta data to saved table\n comments = []\n comments.append('DMS EDB query of {}:'.format(mnemonic_identifier))\n for key, value in mnemonic_query_result.info.items():\n comments.append('{} = {}'.format(key, str(value)))\n result_table.meta['comments'] = comments\n comments.append(' ')\n comments.append('Start time {}'.format(start_time.isot))\n comments.append('End time {}'.format(end_time.isot))\n comments.append('Number of rows {}'.format(len(result_table)))\n comments.append(' ')\n result_table.write(path_for_download, format='ascii.fixed_width',\n overwrite=True, delimiter=',', bookend=False)\n mnemonic_query_result.file_for_download = file_for_download\n\n # create forms for search fields not clicked\n mnemonic_name_search_form = MnemonicSearchForm(prefix='mnemonic_name_search')\n mnemonic_exploration_form = MnemonicExplorationForm(prefix='mnemonic_exploration')\n\n elif 'mnemonic_exploration' in request.POST.keys():\n mnemonic_exploration_form = MnemonicExplorationForm(request.POST,\n prefix='mnemonic_exploration')\n if mnemonic_exploration_form.is_valid():\n mnemonic_exploration_result, meta = mnemonic_inventory()\n\n # loop over filled fields and implement simple AND logic\n for field in mnemonic_exploration_form.fields:\n field_value = mnemonic_exploration_form[field].value()\n if field_value != '':\n column_name = mnemonic_exploration_form[field].label\n\n # matching indices in table (case-insensitive)\n index = [\n i for i, item in enumerate(mnemonic_exploration_result[column_name]) if\n re.search(field_value, item, re.IGNORECASE)\n ]\n mnemonic_exploration_result = mnemonic_exploration_result[index]\n\n mnemonic_exploration_result.n_rows = len(mnemonic_exploration_result)\n\n # generate tables for display and download in web app\n display_table = copy.deepcopy(mnemonic_exploration_result)\n\n # temporary html file,\n # see http://docs.astropy.org/en/stable/_modules/astropy/table/\n tmpdir = tempfile.mkdtemp()\n file_name_root = 'mnemonic_exploration_result_table'\n path_for_html = os.path.join(tmpdir, '{}.html'.format(file_name_root))\n with open(path_for_html, 'w') as tmp:\n display_table.write(tmp, format='jsviewer')\n mnemonic_exploration_result.html_file_content = open(path_for_html, 'r').read()\n\n # pass on meta data to have access to total number of mnemonics\n mnemonic_exploration_result.meta = meta\n\n # save file locally to be available for download\n static_dir = os.path.join(settings.BASE_DIR, 'static')\n ensure_dir_exists(static_dir)\n file_for_download = '{}.csv'.format(file_name_root)\n path_for_download = os.path.join(static_dir, file_for_download)\n display_table.write(path_for_download, format='ascii.fixed_width',\n overwrite=True, delimiter=',', bookend=False)\n mnemonic_exploration_result.file_for_download = file_for_download\n\n if mnemonic_exploration_result.n_rows == 0:\n mnemonic_exploration_result = 'empty'\n\n # create forms for search fields not clicked\n mnemonic_name_search_form = MnemonicSearchForm(prefix='mnemonic_name_search')\n mnemonic_query_form = MnemonicQueryForm(prefix='mnemonic_query')\n\n else:\n mnemonic_name_search_form = MnemonicSearchForm(prefix='mnemonic_name_search')\n mnemonic_query_form = MnemonicQueryForm(prefix='mnemonic_query')\n mnemonic_exploration_form = MnemonicExplorationForm(prefix='mnemonic_exploration')\n\n edb_components = {'mnemonic_query_form': mnemonic_query_form,\n 'mnemonic_query_result': mnemonic_query_result,\n 'mnemonic_query_result_plot': mnemonic_query_result_plot,\n 'mnemonic_name_search_form': mnemonic_name_search_form,\n 'mnemonic_name_search_result': mnemonic_name_search_result,\n 'mnemonic_exploration_form': mnemonic_exploration_form,\n 'mnemonic_exploration_result': mnemonic_exploration_result}\n\n return edb_components\n\n\ndef get_expstart(rootname):\n \"\"\"Return the exposure start time (``expstart``) for the given\n group of files.\n\n The ``expstart`` is gathered from a query to the\n ``astroquery.mast`` service.\n\n Parameters\n ----------\n rootname : str\n The rootname of the observation of interest (e.g.\n ``jw86700006001_02101_00006_guider1``).\n\n Returns\n -------\n expstart : float\n The exposure start time of the observation (in MJD).\n \"\"\"\n\n return 5000.00\n\n\ndef get_filenames_by_instrument(instrument):\n \"\"\"Returns a list of paths to files that match the given\n ``instrument``.\n\n Parameters\n ----------\n instrument : str\n The instrument of interest (e.g. `FGS`).\n\n Returns\n -------\n filepaths : list\n A list of full paths to the files that match the given\n instrument.\n \"\"\"\n\n # Query files from MAST database\n # filepaths, filenames = DatabaseConnection('MAST', instrument=instrument).\\\n # get_files_for_instrument(instrument)\n\n # Find all of the matching files in filesytem\n # (TEMPORARY WHILE THE MAST STUFF IS BEING WORKED OUT)\n instrument_match = {'FGS': 'guider',\n 'MIRI': 'mir',\n 'NIRCam': 'nrc',\n 'NIRISS': 'nis',\n 'NIRSpec': 'nrs'}\n search_filepath = os.path.join(FILESYSTEM_DIR, '*', '*.fits')\n filepaths = [f for f in glob.glob(search_filepath) if instrument_match[instrument] in f]\n\n return filepaths\n\n\ndef get_filenames_by_proposal(proposal):\n \"\"\"Return a list of filenames that are available in the filesystem\n for the given ``proposal``.\n\n Parameters\n ----------\n proposal : str\n The one- to five-digit proposal number (e.g. ``88600``).\n\n Returns\n -------\n filenames : list\n A list of filenames associated with the given ``proposal``.\n \"\"\"\n\n proposal_string = '{:05d}'.format(int(proposal))\n filenames = sorted(glob.glob(os.path.join(\n FILESYSTEM_DIR, 'jw{}'.format(proposal_string), '*')))\n filenames = [os.path.basename(filename) for filename in filenames]\n\n return filenames\n\n\ndef get_filenames_by_rootname(rootname):\n \"\"\"Return a list of filenames available in the filesystem that\n are part of the given ``rootname``.\n\n Parameters\n ----------\n rootname : str\n The rootname of interest (e.g. ``jw86600008001_02101_00007_guider2``).\n\n Returns\n -------\n filenames : list\n A list of filenames associated with the given ``rootname``.\n \"\"\"\n\n proposal = rootname.split('_')[0].split('jw')[-1][0:5]\n filenames = sorted(glob.glob(os.path.join(\n FILESYSTEM_DIR,\n 'jw{}'.format(proposal),\n '{}*'.format(rootname))))\n filenames = [os.path.basename(filename) for filename in filenames]\n\n return filenames\n\n\ndef get_header_info(file):\n \"\"\"Return the header information for a given ``file``.\n\n Parameters\n ----------\n file : str\n The name of the file of interest.\n\n Returns\n -------\n header : str\n The primary FITS header for the given ``file``.\n \"\"\"\n\n dirname = file[:7]\n fits_filepath = os.path.join(FILESYSTEM_DIR, dirname, file)\n header = fits.getheader(fits_filepath, ext=0).tostring(sep='\\n')\n\n return header\n\n\ndef get_image_info(file_root, rewrite):\n \"\"\"Build and return a dictionary containing information for a given\n ``file_root``.\n\n Parameters\n ----------\n file_root : str\n The rootname of the file of interest.\n rewrite : bool\n ``True`` if the corresponding JPEG needs to be rewritten,\n ``False`` if not.\n\n Returns\n -------\n image_info : dict\n A dictionary containing various information for the given\n ``file_root``.\n \"\"\"\n\n # Initialize dictionary to store information\n image_info = {}\n image_info['all_jpegs'] = []\n image_info['suffixes'] = []\n image_info['num_ints'] = {}\n\n preview_dir = os.path.join(get_config()['jwql_dir'], 'preview_images')\n\n # Find all of the matching files\n dirname = file_root[:7]\n search_filepath = os.path.join(FILESYSTEM_DIR, dirname, file_root + '*.fits')\n image_info['all_files'] = glob.glob(search_filepath)\n\n for file in image_info['all_files']:\n\n # Get suffix information\n suffix = os.path.basename(file).split('_')[4].split('.')[0]\n image_info['suffixes'].append(suffix)\n\n # Determine JPEG file location\n jpg_dir = os.path.join(preview_dir, dirname)\n jpg_filename = os.path.basename(os.path.splitext(file)[0] + '_integ0.jpg')\n jpg_filepath = os.path.join(jpg_dir, jpg_filename)\n\n # Check that a jpg does not already exist. If it does (and rewrite=False),\n # just call the existing jpg file\n if os.path.exists(jpg_filepath) and not rewrite:\n pass\n\n # If it doesn't, make it using the preview_image module\n else:\n if not os.path.exists(jpg_dir):\n os.makedirs(jpg_dir)\n im = PreviewImage(file, 'SCI')\n im.output_directory = jpg_dir\n im.make_image()\n\n # Record how many integrations there are per filetype\n search_jpgs = os.path.join(preview_dir, dirname,\n file_root + '_{}_integ*.jpg'.format(suffix))\n num_jpgs = len(glob.glob(search_jpgs))\n image_info['num_ints'][suffix] = num_jpgs\n\n image_info['all_jpegs'].append(jpg_filepath)\n\n return image_info\n\n\ndef get_instrument_proposals(instrument):\n \"\"\"Return a list of proposals for the given instrument\n\n Parameters\n ----------\n instrument : str\n Name of the JWST instrument\n\n Returns\n -------\n proposals : list\n List of proposals for the given instrument\n \"\"\"\n\n service = \"Mast.Jwst.Filtered.{}\".format(instrument)\n params = {\"columns\": \"program\",\n \"filters\": []}\n response = Mast.service_request_async(service, params)\n results = response[0].json()['data']\n proposals = list(set(result['program'] for result in results))\n\n return proposals\n\n\ndef get_preview_images_by_instrument(inst):\n \"\"\"Return a list of preview images available in the filesystem for\n the given instrument.\n\n Parameters\n ----------\n inst : str\n The instrument of interest (e.g. ``NIRCam``).\n\n Returns\n -------\n preview_images : list\n A list of preview images available in the filesystem for the\n given instrument.\n \"\"\"\n\n # Make sure the instrument is of the proper format (e.g. \"Nircam\")\n instrument = inst[0].upper() + inst[1:].lower()\n\n # Query MAST for all rootnames for the instrument\n service = \"Mast.Jwst.Filtered.{}\".format(instrument)\n params = {\"columns\": \"filename\",\n \"filters\": []}\n response = Mast.service_request_async(service, params)\n results = response[0].json()['data']\n\n # Parse the results to get the rootnames\n filenames = [result['filename'].split('.')[0] for result in results]\n\n # Get list of all preview_images\n preview_images = glob.glob(os.path.join(PREVIEW_IMAGE_FILESYSTEM, '*', '*.jpg'))\n\n # Get subset of preview images that match the filenames\n preview_images = [os.path.basename(item) for item in preview_images if\n os.path.basename(item).split('_integ')[0] in filenames]\n\n # Return only\n\n return preview_images\n\n\ndef get_preview_images_by_proposal(proposal):\n \"\"\"Return a list of preview images available in the filesystem for\n the given ``proposal``.\n\n Parameters\n ----------\n proposal : str\n The one- to five-digit proposal number (e.g. ``88600``).\n\n Returns\n -------\n preview_images : list\n A list of preview images available in the filesystem for the\n given ``proposal``.\n \"\"\"\n\n proposal_string = '{:05d}'.format(int(proposal))\n preview_images = glob.glob(os.path.join(PREVIEW_IMAGE_FILESYSTEM, 'jw{}'.format(proposal_string), '*'))\n preview_images = [os.path.basename(preview_image) for preview_image in preview_images]\n\n return preview_images\n\n\ndef get_preview_images_by_rootname(rootname):\n \"\"\"Return a list of preview images available in the filesystem for\n the given ``rootname``.\n\n Parameters\n ----------\n rootname : str\n The rootname of interest (e.g. ``jw86600008001_02101_00007_guider2``).\n\n Returns\n -------\n preview_images : list\n A list of preview images available in the filesystem for the\n given ``rootname``.\n \"\"\"\n\n proposal = rootname.split('_')[0].split('jw')[-1][0:5]\n preview_images = sorted(glob.glob(os.path.join(\n PREVIEW_IMAGE_FILESYSTEM,\n 'jw{}'.format(proposal),\n '{}*'.format(rootname))))\n preview_images = [os.path.basename(preview_image) for preview_image in preview_images]\n\n return preview_images\n\n\ndef get_proposal_info(filepaths):\n \"\"\"Builds and returns a dictionary containing various information\n about the proposal(s) that correspond to the given ``filepaths``.\n\n The information returned contains such things as the number of\n proposals, the paths to the corresponding thumbnails, and the total\n number of files.\n\n Parameters\n ----------\n filepaths : list\n A list of full paths to files of interest.\n\n Returns\n -------\n proposal_info : dict\n A dictionary containing various information about the\n proposal(s) and files corresponding to the given ``filepaths``.\n \"\"\"\n\n proposals = list(set([f.split('/')[-1][2:7] for f in filepaths]))\n thumbnail_dir = os.path.join(get_config()['jwql_dir'], 'thumbnails')\n thumbnail_paths = []\n num_files = []\n for proposal in proposals:\n thumbnail_search_filepath = os.path.join(\n thumbnail_dir, 'jw{}'.format(proposal), 'jw{}*rate*.thumb'.format(proposal)\n )\n thumbnail = glob.glob(thumbnail_search_filepath)\n if len(thumbnail) > 0:\n thumbnail = thumbnail[0]\n thumbnail = '/'.join(thumbnail.split('/')[-2:])\n thumbnail_paths.append(thumbnail)\n\n fits_search_filepath = os.path.join(\n FILESYSTEM_DIR, 'jw{}'.format(proposal), 'jw{}*.fits'.format(proposal)\n )\n num_files.append(len(glob.glob(fits_search_filepath)))\n\n # Put the various information into a dictionary of results\n proposal_info = {}\n proposal_info['num_proposals'] = len(proposals)\n proposal_info['proposals'] = proposals\n proposal_info['thumbnail_paths'] = thumbnail_paths\n proposal_info['num_files'] = num_files\n\n return proposal_info\n\n\ndef get_thumbnails_by_instrument(inst):\n \"\"\"Return a list of thumbnails available in the filesystem for the\n given instrument.\n\n Parameters\n ----------\n inst : str\n The instrument of interest (e.g. ``NIRCam``).\n\n Returns\n -------\n preview_images : list\n A list of thumbnails available in the filesystem for the\n given instrument.\n \"\"\"\n\n # Make sure the instrument is of the proper format (e.g. \"Nircam\")\n instrument = inst[0].upper() + inst[1:].lower()\n\n # Query MAST for all rootnames for the instrument\n service = \"Mast.Jwst.Filtered.{}\".format(instrument)\n params = {\"columns\": \"filename\",\n \"filters\": []}\n response = Mast.service_request_async(service, params)\n results = response[0].json()['data']\n\n # Parse the results to get the rootnames\n filenames = [result['filename'].split('.')[0] for result in results]\n\n # Get list of all thumbnails\n thumbnails = glob.glob(os.path.join(THUMBNAIL_FILESYSTEM, '*', '*.thumb'))\n\n # Get subset of preview images that match the filenames\n thumbnails = [os.path.basename(item) for item in thumbnails if\n os.path.basename(item).split('_integ')[0] in filenames]\n\n return thumbnails\n\n\ndef get_thumbnails_by_proposal(proposal):\n \"\"\"Return a list of thumbnails available in the filesystem for the\n given ``proposal``.\n\n Parameters\n ----------\n proposal : str\n The one- to five-digit proposal number (e.g. ``88600``).\n\n Returns\n -------\n thumbnails : list\n A list of thumbnails available in the filesystem for the given\n ``proposal``.\n \"\"\"\n\n proposal_string = '{:05d}'.format(int(proposal))\n thumbnails = glob.glob(os.path.join(THUMBNAIL_FILESYSTEM, 'jw{}'.format(proposal_string), '*'))\n thumbnails = [os.path.basename(thumbnail) for thumbnail in thumbnails]\n\n return thumbnails\n\n\ndef get_thumbnails_by_rootname(rootname):\n \"\"\"Return a list of preview images available in the filesystem for\n the given ``rootname``.\n\n Parameters\n ----------\n rootname : str\n The rootname of interest (e.g. ``jw86600008001_02101_00007_guider2``).\n\n Returns\n -------\n thumbnails : list\n A list of preview images available in the filesystem for the\n given ``rootname``.\n \"\"\"\n\n proposal = rootname.split('_')[0].split('jw')[-1][0:5]\n thumbnails = sorted(glob.glob(os.path.join(\n THUMBNAIL_FILESYSTEM,\n 'jw{}'.format(proposal),\n '{}*'.format(rootname))))\n\n thumbnails = [os.path.basename(thumbnail) for thumbnail in thumbnails]\n\n return thumbnails\n\n\ndef log_into_mast(request):\n \"\"\"Login via astroquery.mast if user authenticated in web app.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n \"\"\"\n if Mast.authenticated():\n return True\n\n # get the MAST access token if present\n access_token = str(get_mast_token(request))\n\n # authenticate with astroquery.mast if necessary\n if access_token != 'None':\n Mast.login(token=access_token)\n return Mast.authenticated()\n else:\n return False\n\n\ndef random_404_page():\n \"\"\"Randomly select one of the various 404 templates for JWQL\n\n Returns\n -------\n random_template : str\n Filename of the selected template\n \"\"\"\n templates = ['404_space.html', '404_spacecat.html']\n choose_page = np.random.choice(len(templates))\n random_template = templates[choose_page]\n\n return random_template\n\n\ndef thumbnails(inst, proposal=None):\n \"\"\"Generate a page showing thumbnail images corresponding to\n activities, from a given ``proposal``\n\n Parameters\n ----------\n inst : str\n Name of JWST instrument\n proposal : str (optional)\n Number of APT proposal to filter\n\n Returns\n -------\n dict_to_render : dict\n Dictionary of parameters for the thumbnails\n \"\"\"\n\n filepaths = get_filenames_by_instrument(inst)\n\n # JUST FOR DEVELOPMENT\n # Split files into \"archived\" and \"unlooked\"\n if proposal is not None:\n page_type = 'archive'\n else:\n page_type = 'unlooked'\n filepaths = split_files(filepaths, page_type)\n\n # Determine file ID (everything except suffix)\n # e.g. jw00327001001_02101_00002_nrca1\n full_ids = set(['_'.join(f.split('/')[-1].split('_')[:-1]) for f in filepaths])\n\n # If the proposal is specified (i.e. if the page being loaded is\n # an archive page), only collect data for given proposal\n if proposal is not None:\n proposal_string = '{:05d}'.format(int(proposal))\n full_ids = [f for f in full_ids if f[2:7] == proposal_string]\n\n detectors = []\n proposals = []\n for i, file_id in enumerate(full_ids):\n for file in filepaths:\n if '_'.join(file.split('/')[-1].split('_')[:-1]) == file_id:\n\n # Parse filename to get program_id\n try:\n program_id = filename_parser(file)['program_id']\n detector = filename_parser(file)['detector']\n except ValueError:\n # Temporary workaround for noncompliant files in filesystem\n program_id = nfile_id[2:7]\n detector = file_id[26:]\n\n # Add parameters to sort by\n if detector not in detectors and not detector.startswith('f'):\n detectors.append(detector)\n if program_id not in proposals:\n proposals.append(program_id)\n\n # Extract information for sorting with dropdown menus\n # (Don't include the proposal as a sorting parameter if the\n # proposal has already been specified)\n if proposal is not None:\n dropdown_menus = {'detector': detectors}\n else:\n dropdown_menus = {'detector': detectors,\n 'proposal': proposals}\n\n dict_to_render = {'inst': inst,\n 'tools': MONITORS,\n 'dropdown_menus': dropdown_menus,\n 'prop': proposal}\n\n return dict_to_render\n\n\ndef thumbnails_ajax(inst, proposal=None):\n \"\"\"Generate a page that provides data necessary to render the\n ``thumbnails`` template.\n\n Parameters\n ----------\n inst : str\n Name of JWST instrument\n proposal : str (optional)\n Number of APT proposal to filter\n\n Returns\n -------\n data_dict : dict\n Dictionary of data needed for the ``thumbnails`` template\n \"\"\"\n\n # Get the available files for the instrument\n filepaths = get_filenames_by_instrument(inst)\n\n # Get set of unique rootnames\n rootnames = set(['_'.join(f.split('/')[-1].split('_')[:-1]) for f in filepaths])\n\n # If the proposal is specified (i.e. if the page being loaded is\n # an archive page), only collect data for given proposal\n if proposal is not None:\n proposal_string = '{:05d}'.format(int(proposal))\n rootnames = [rootname for rootname in rootnames if rootname[2:7] == proposal_string]\n\n # Initialize dictionary that will contain all needed data\n data_dict = {}\n data_dict['inst'] = inst\n data_dict['file_data'] = {}\n\n # Gather data for each rootname\n for rootname in rootnames:\n\n # Parse filename\n try:\n filename_dict = filename_parser(rootname)\n except ValueError:\n # Temporary workaround for noncompliant files in filesystem\n filename_dict = {'activity': rootname[17:19],\n 'detector': rootname[26:],\n 'exposure_id': rootname[20:25],\n 'observation': rootname[7:10],\n 'parallel_seq_id': rootname[16],\n 'program_id': rootname[2:7],\n 'visit': rootname[10:13],\n 'visit_group': rootname[14:16]}\n\n # Get list of available filenames\n available_files = get_filenames_by_rootname(rootname)\n\n # Add data to dictionary\n data_dict['file_data'][rootname] = {}\n data_dict['file_data'][rootname]['filename_dict'] = filename_dict\n data_dict['file_data'][rootname]['available_files'] = available_files\n data_dict['file_data'][rootname]['expstart'] = get_expstart(rootname)\n data_dict['file_data'][rootname]['suffixes'] = [filename_parser(filename)['suffix'] for\n filename in available_files]\n\n # Extract information for sorting with dropdown menus\n # (Don't include the proposal as a sorting parameter if the\n # proposal has already been specified)\n detectors = [data_dict['file_data'][rootname]['filename_dict']['detector'] for\n rootname in list(data_dict['file_data'].keys())]\n proposals = [data_dict['file_data'][rootname]['filename_dict']['program_id'] for\n rootname in list(data_dict['file_data'].keys())]\n if proposal is not None:\n dropdown_menus = {'detector': detectors}\n else:\n dropdown_menus = {'detector': detectors,\n 'proposal': proposals}\n\n data_dict['tools'] = MONITORS\n data_dict['dropdown_menus'] = dropdown_menus\n data_dict['prop'] = proposal\n\n return data_dict\n"
]
| [
[
"numpy.sum"
]
]
|
Luo1916/frds | [
"b348a3ce5ee0b7e4bf85d6f114e1feffa431beda"
]
| [
"frds/data/utils.py"
]
| [
"\"\"\"Some utility functions\"\"\"\r\n\r\nimport numpy as np\r\n\r\n\r\ndef filter_funda(funda: np.recarray) -> np.recarray:\r\n \"\"\"Standard filter on the `wrds.comp.funda` dataset\r\n\r\n Parameters\r\n ----------\r\n funda : np.recarray\r\n `wrds.comp.funda` dataset\r\n\r\n Returns\r\n -------\r\n np.recarray\r\n Filtered dataset\r\n \"\"\"\r\n return funda[\r\n np.in1d(funda.datafmt, (\"STD\"))\r\n & np.in1d(funda.indfmt, (\"INDL\"))\r\n & np.in1d(funda.popsrc, (\"D\"))\r\n & np.in1d(funda.consol, (\"C\"))\r\n ]\r\n"
]
| [
[
"numpy.in1d"
]
]
|
zhawhjw/Tool_Functions | [
"c462a1466fd2540211f72d234cb59c50602c8f6d"
]
| [
"Tool_Functions/twitter-sentiment-analysis/Stanford_Tokenize.py"
]
| [
"import json\nimport pandas as pd\nimport string\nimport nltk\nfrom nltk.tokenize.stanford import StanfordTokenizer\nfrom nltk.tag import StanfordPOSTagger\nfrom nltk import word_tokenize\nimport os\n\n# stop words to remove from text\nnltk.download(\"stopwords\")\n# also removing @ in this case since Stanford Tokenizer tokenizes them\nuseless_ones = nltk.corpus.stopwords.words(\"english\") + list(string.punctuation) + ['@']\n# workaround for adding environment variable for tagger\njar = 'stanford-postagger.jar'\nmodel = 'english-bidirectional-distsim.tagger'\npos_tagger = StanfordPOSTagger(model, jar, encoding='utf8')\n# set java path in environment\njava_path = 'C:/Program Files/Java/jdk-13/bin/java.exe'\nos.environ['JAVAHOME'] = java_path\n\ndef tokenizeTweets(tweetList):\n retTweetList = []\n for tweet in tweetList:\n wordlist = [word for word in pos_tagger.tag(word_tokenize(tweet)) if word not in useless_ones]\n retTweetList.append(wordlist)\n return retTweetList\n\ndef tokenizeTweet(tweet):\n wordlist = [word[0] for word in pos_tagger.tag(word_tokenize(tweet)) if word not in useless_ones]\n return wordlist\n\ndef main(twtInfo:object):\n clean_data_tweets = pd.read_json(twtInfo, orient=\"records\")\n tweets = clean_data_tweets[\"text\"]\n data_id = clean_data_tweets[\"id\"]\n data_tc_tweets = []\n for tweet in tweets:\n data_tc_tweets.append(tokenizeTweet(tweet))\n ret = []\n for i in range(len(data_tc_tweets)):\n ret.append({})\n ret[i][\"text\"] = data_tc_tweets[i]\n ret[i][\"id\"] = data_id[i]\n return pd.Series(ret).to_json(orient=\"records\")\n"
]
| [
[
"pandas.read_json",
"pandas.Series"
]
]
|
mdjabc/hyperparameter_hunter | [
"bfbd1faf63272a62e6f971d7e9a0487d71aea8f6"
]
| [
"examples/xgboost_examples/regression.py"
]
| [
"from hyperparameter_hunter import Environment, CVExperiment\nfrom hyperparameter_hunter import GBRT, Real, Integer, Categorical\nimport pandas as pd\nfrom sklearn.datasets import load_diabetes\nfrom xgboost import XGBRegressor\n\n#################### Format DataFrame ####################\ndata = load_diabetes()\ntrain_df = pd.DataFrame(data=data.data, columns=data.feature_names)\ntrain_df[\"progression\"] = data.target\n\n#################### Set Up Environment ####################\nenv = Environment(\n train_dataset=train_df,\n results_path=\"HyperparameterHunterAssets\",\n target_column=\"progression\",\n metrics=[\"mean_absolute_error\"],\n cv_type=\"KFold\",\n cv_params=dict(n_splits=12, shuffle=True, random_state=32),\n runs=2,\n)\n\n# Now that HyperparameterHunter has an active `Environment`, we can do two things:\n#################### 1. Perform Experiments ####################\nexperiment = CVExperiment(\n model_initializer=XGBRegressor,\n model_init_params=dict(max_depth=4, n_estimators=400, subsample=0.5),\n model_extra_params=dict(fit=dict(eval_metric=\"mae\")),\n)\n\n# And/or...\n#################### 2. Hyperparameter Optimization ####################\noptimizer = GBRT(iterations=20, random_state=32)\noptimizer.set_experiment_guidelines(\n model_initializer=XGBRegressor,\n model_init_params=dict(\n max_depth=Integer(2, 20),\n n_estimators=Integer(100, 900),\n learning_rate=Real(0.0001, 0.5),\n subsample=0.5,\n booster=Categorical([\"gbtree\", \"gblinear\"]),\n ),\n model_extra_params=dict(fit=dict(eval_metric=Categorical([\"rmse\", \"mae\"]))),\n)\noptimizer.go()\n\n# Notice, `optimizer` recognizes our earlier `experiment`'s hyperparameters fit inside the search\n# space/guidelines set for `optimizer`.\n\n# Then, when optimization is started, it automatically learns from `experiment`'s results\n# - without any extra work for us!\n"
]
| [
[
"pandas.DataFrame",
"sklearn.datasets.load_diabetes"
]
]
|
elyqg/ross | [
"1806842c9cf13482c6054e214a9918e613cb380d"
]
| [
"ross/rotor_assembly.py"
]
| [
"# fmt: off\nimport os\nimport shutil\nimport warnings\nfrom collections import Counter, namedtuple\nfrom collections.abc import Iterable\nfrom copy import copy, deepcopy\nfrom itertools import chain, cycle\nfrom pathlib import Path\n\nimport bokeh.palettes as bp\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy.io as sio\nimport scipy.linalg as la\nimport scipy.signal as signal\nimport scipy.sparse.linalg as las\nimport toml\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.glyphs import Text\nfrom bokeh.plotting import figure, output_file\nfrom cycler import cycler\n\nfrom ross.bearing_seal_element import BearingElement, SealElement\nfrom ross.disk_element import DiskElement\nfrom ross.materials import steel\nfrom ross.results import (CampbellResults, ConvergenceResults,\n ForcedResponseResults, FrequencyResponseResults,\n ModalResults, OrbitResponseResults, StaticResults,\n SummaryResults, TimeResponseResults)\nfrom ross.shaft_element import ShaftElement\nfrom ross.utils import convert\n\n# fmt: on\n\n__all__ = [\"Rotor\", \"CoAxialRotor\", \"rotor_example\", \"coaxrotor_example\"]\n\n# set style and colors\nplt.style.use(\"seaborn-white\")\nplt.style.use(\n {\n \"lines.linewidth\": 2.5,\n \"axes.grid\": True,\n \"axes.linewidth\": 0.1,\n \"grid.color\": \".9\",\n \"grid.linestyle\": \"--\",\n \"legend.frameon\": True,\n \"legend.framealpha\": 0.2,\n }\n)\n\n# set bokeh palette of colors\nbokeh_colors = bp.RdGy[11]\n\n_orig_rc_params = mpl.rcParams.copy()\n\nseaborn_colors = [\"#4c72b0\", \"#55a868\", \"#c44e52\", \"#8172b2\", \"#ccb974\", \"#64b5cd\"]\n\n\nclass Rotor(object):\n r\"\"\"A rotor object.\n\n This class will create a rotor with the shaft,\n disk, bearing and seal elements provided.\n\n Parameters\n ----------\n shaft_elements : list\n List with the shaft elements\n disk_elements : list\n List with the disk elements\n bearing_elements : list\n List with the bearing elements\n point_mass_elements: list\n List with the point mass elements\n sparse : bool, optional\n If sparse, eigenvalues will be calculated with arpack.\n Default is True.\n n_eigen : int, optional\n Number of eigenvalues calculated by arpack.\n Default is 12.\n tag : str\n A tag for the rotor\n\n Returns\n -------\n A rotor object.\n\n Attributes\n ----------\n evalues : array\n Rotor's eigenvalues.\n evectors : array\n Rotor's eigenvectors.\n wn : array\n Rotor's natural frequencies in rad/s.\n wd : array\n Rotor's damped natural frequencies in rad/s.\n\n Examples\n --------\n >>> # Rotor without damping with 2 shaft elements 1 disk and 2 bearings\n >>> import ross as rs\n >>> steel = rs.materials.steel\n >>> z = 0\n >>> le = 0.25\n >>> i_d = 0\n >>> o_d = 0.05\n >>> tim0 = rs.ShaftElement(le, i_d, o_d,\n ... material=steel,\n ... shear_effects=True,\n ... rotary_inertia=True,\n ... gyroscopic=True)\n >>> tim1 = rs.ShaftElement(le, i_d, o_d,\n ... material=steel,\n ... shear_effects=True,\n ... rotary_inertia=True,\n ... gyroscopic=True)\n >>> shaft_elm = [tim0, tim1]\n >>> disk0 = rs.DiskElement.from_geometry(1, steel, 0.07, 0.05, 0.28)\n >>> stf = 1e6\n >>> bearing0 = rs.BearingElement(0, kxx=stf, cxx=0)\n >>> bearing1 = rs.BearingElement(2, kxx=stf, cxx=0)\n >>> rotor = rs.Rotor(shaft_elm, [disk0], [bearing0, bearing1])\n >>> modal = rotor.run_modal(speed=0)\n >>> modal.wd[0] # doctest: +ELLIPSIS\n 215.3707...\n \"\"\"\n\n def __init__(\n self,\n shaft_elements,\n disk_elements=None,\n bearing_elements=None,\n point_mass_elements=None,\n sparse=True,\n n_eigen=12,\n min_w=None,\n max_w=None,\n rated_w=None,\n tag=None,\n ):\n\n self.parameters = {\n \"sparse\": True,\n \"n_eigen\": n_eigen,\n \"min_w\": min_w,\n \"max_w\": max_w,\n \"rated_w\": rated_w,\n }\n if tag is None:\n self.tag = \"Rotor 0\"\n\n ####################################################\n # Config attributes\n ####################################################\n\n self.sparse = sparse\n self.n_eigen = n_eigen\n # operational speeds\n self.min_w = min_w\n self.max_w = max_w\n self.rated_w = rated_w\n\n ####################################################\n\n # flatten shaft_elements\n def flatten(l):\n for el in l:\n if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):\n yield from flatten(el)\n else:\n yield el\n\n # flatten and make a copy for shaft elements to avoid altering\n # attributes for elements that might be used in different rotors\n # e.g. altering shaft_element.n\n shaft_elements = [copy(el) for el in flatten(shaft_elements)]\n\n # set n for each shaft element\n for i, sh in enumerate(shaft_elements):\n if sh.n is None:\n sh.n = i\n if sh.tag is None:\n sh.tag = sh.__class__.__name__ + \" \" + str(i)\n\n if disk_elements is None:\n disk_elements = []\n if bearing_elements is None:\n bearing_elements = []\n if point_mass_elements is None:\n point_mass_elements = []\n\n for i, disk in enumerate(disk_elements):\n if disk.tag is None:\n disk.tag = \"Disk \" + str(i)\n\n for i, brg in enumerate(bearing_elements):\n if brg.__class__.__name__ == \"BearingElement\" and brg.tag is None:\n brg.tag = \"Bearing \" + str(i)\n if brg.__class__.__name__ == \"SealElement\" and brg.tag is None:\n brg.tag = \"Seal \" + str(i)\n\n for i, p_mass in enumerate(point_mass_elements):\n if p_mass.tag is None:\n p_mass.tag = \"Point Mass \" + str(i)\n\n self.shaft_elements = sorted(shaft_elements, key=lambda el: el.n)\n self.bearing_elements = sorted(bearing_elements, key=lambda el: el.n)\n self.disk_elements = disk_elements\n self.point_mass_elements = point_mass_elements\n self.elements = [\n el\n for el in flatten(\n [\n self.shaft_elements,\n self.disk_elements,\n self.bearing_elements,\n self.point_mass_elements,\n ]\n )\n ]\n\n ####################################################\n # Rotor summary\n ####################################################\n columns = [\n \"type\",\n \"n\",\n \"n_link\",\n \"L\",\n \"node_pos\",\n \"node_pos_r\",\n \"idl\",\n \"odl\",\n \"idr\",\n \"odr\",\n \"i_d\",\n \"o_d\",\n \"beam_cg\",\n \"axial_cg_pos\",\n \"y_pos\",\n \"material\",\n \"rho\",\n \"volume\",\n \"m\",\n \"tag\",\n ]\n\n df_shaft = pd.DataFrame([el.summary() for el in self.shaft_elements])\n df_disks = pd.DataFrame([el.summary() for el in self.disk_elements])\n df_bearings = pd.DataFrame(\n [\n el.summary()\n for el in self.bearing_elements\n if (el.__class__.__name__ == \"BearingElement\")\n ]\n )\n df_seals = pd.DataFrame(\n [\n el.summary()\n for el in self.bearing_elements\n if (el.__class__.__name__ == \"SealElement\")\n ]\n )\n df_point_mass = pd.DataFrame([el.summary() for el in self.point_mass_elements])\n\n nodes_pos_l = np.zeros(len(df_shaft.n_l))\n nodes_pos_r = np.zeros(len(df_shaft.n_l))\n axial_cg_pos = np.zeros(len(df_shaft.n_l))\n\n for i, sh in enumerate(self.shaft_elements):\n if i == 0:\n nodes_pos_r[i] = nodes_pos_r[i] + df_shaft.loc[i, \"L\"]\n axial_cg_pos[i] = sh.beam_cg + nodes_pos_l[i]\n sh.axial_cg_pos = axial_cg_pos[i]\n continue\n if df_shaft.loc[i, \"n_l\"] == df_shaft.loc[i - 1, \"n_l\"]:\n nodes_pos_l[i] = nodes_pos_l[i - 1]\n nodes_pos_r[i] = nodes_pos_r[i - 1]\n else:\n nodes_pos_l[i] = nodes_pos_r[i - 1]\n nodes_pos_r[i] = nodes_pos_l[i] + df_shaft.loc[i, \"L\"]\n axial_cg_pos[i] = sh.beam_cg + nodes_pos_l[i]\n sh.axial_cg_pos = axial_cg_pos[i]\n\n df_shaft[\"nodes_pos_l\"] = nodes_pos_l\n df_shaft[\"nodes_pos_r\"] = nodes_pos_r\n df_shaft[\"axial_cg_pos\"] = axial_cg_pos\n\n df = pd.concat(\n [df_shaft, df_disks, df_bearings, df_point_mass, df_seals], sort=True\n )\n df = df.sort_values(by=\"n_l\")\n df = df.reset_index(drop=True)\n df[\"shaft_number\"] = np.zeros(len(df))\n\n df_shaft[\"shaft_number\"] = np.zeros(len(df_shaft))\n df_disks[\"shaft_number\"] = np.zeros(len(df_disks))\n df_bearings[\"shaft_number\"] = np.zeros(len(df_bearings))\n df_seals[\"shaft_number\"] = np.zeros(len(df_seals))\n df_point_mass[\"shaft_number\"] = np.zeros(len(df_point_mass))\n\n self.df_disks = df_disks\n self.df_bearings = df_bearings\n self.df_shaft = df_shaft\n self.df_point_mass = df_point_mass\n self.df_seals = df_seals\n\n # check consistence for disks and bearings location\n if len(df_point_mass) > 0:\n max_loc_point_mass = df_point_mass.n.max()\n else:\n max_loc_point_mass = 0\n max_location = max(df_shaft.n_r.max(), max_loc_point_mass)\n if df.n_l.max() > max_location:\n raise ValueError(\"Trying to set disk or bearing outside shaft\")\n\n # nodes axial position and diameter\n nodes_pos = list(df_shaft.groupby(\"n_l\")[\"nodes_pos_l\"].max())\n nodes_pos.append(df_shaft[\"nodes_pos_r\"].iloc[-1])\n self.nodes_pos = nodes_pos\n\n nodes_i_d = list(df_shaft.groupby(\"n_l\")[\"i_d\"].min())\n nodes_i_d.append(df_shaft[\"i_d\"].iloc[-1])\n self.nodes_i_d = nodes_i_d\n\n nodes_o_d = list(df_shaft.groupby(\"n_l\")[\"o_d\"].max())\n nodes_o_d.append(df_shaft[\"o_d\"].iloc[-1])\n self.nodes_o_d = nodes_o_d\n\n shaft_elements_length = list(df_shaft.groupby(\"n_l\")[\"L\"].min())\n self.shaft_elements_length = shaft_elements_length\n\n self.nodes = list(range(len(self.nodes_pos)))\n self.L = nodes_pos[-1]\n\n # rotor mass can also be calculated with self.M()[::4, ::4].sum()\n self.m_disks = np.sum([disk.m for disk in self.disk_elements])\n self.m_shaft = np.sum([sh_el.m for sh_el in self.shaft_elements])\n self.m = self.m_disks + self.m_shaft\n\n # rotor center of mass and total inertia\n CG_sh = np.sum(\n [(sh.m * sh.axial_cg_pos) / self.m for sh in self.shaft_elements]\n )\n CG_dsk = np.sum(\n [disk.m * nodes_pos[disk.n] / self.m for disk in self.disk_elements]\n )\n self.CG = CG_sh + CG_dsk\n\n Ip_sh = np.sum([sh.Im for sh in self.shaft_elements])\n Ip_dsk = np.sum([disk.Ip for disk in self.disk_elements])\n self.Ip = Ip_sh + Ip_dsk\n\n # values for evalues and evectors will be calculated by self.run_modal\n self.evalues = None\n self.evectors = None\n self.wn = None\n self.wd = None\n self.lti = None\n\n self._v0 = None # used to call eigs\n\n # number of dofs\n self.ndof = (\n 4 * max([el.n for el in shaft_elements])\n + 8\n + 2 * len([el for el in point_mass_elements])\n )\n\n # global indexes for dofs\n n_last = self.shaft_elements[-1].n\n for elm in self.elements:\n dof_mapping = elm.dof_mapping()\n global_dof_mapping = {}\n for k, v in dof_mapping.items():\n dof_letter, dof_number = k.split(\"_\")\n global_dof_mapping[dof_letter + \"_\" + str(int(dof_number) + elm.n)] = v\n dof_tuple = namedtuple(\"GlobalIndex\", global_dof_mapping)\n\n if elm.n <= n_last + 1:\n for k, v in global_dof_mapping.items():\n global_dof_mapping[k] = 4 * elm.n + v\n else:\n for k, v in global_dof_mapping.items():\n global_dof_mapping[k] = 2 * n_last + 2 * elm.n + 4 + v\n\n if hasattr(elm, \"n_link\") and elm.n_link is not None:\n if elm.n_link <= n_last + 1:\n global_dof_mapping[f\"x_{elm.n_link}\"] = 4 * elm.n_link\n global_dof_mapping[f\"y_{elm.n_link}\"] = 4 * elm.n_link + 1\n else:\n global_dof_mapping[f\"x_{elm.n_link}\"] = (\n 2 * n_last + 2 * elm.n_link + 4\n )\n global_dof_mapping[f\"y_{elm.n_link}\"] = (\n 2 * n_last + 2 * elm.n_link + 5\n )\n\n dof_tuple = namedtuple(\"GlobalIndex\", global_dof_mapping)\n elm.dof_global_index = dof_tuple(**global_dof_mapping)\n df.at[\n df.loc[df.tag == elm.tag].index[0], \"dof_global_index\"\n ] = elm.dof_global_index\n\n # values for static analysis will be calculated by def static\n self.Vx = None\n self.Bm = None\n self.disp_y = None\n\n # define positions for disks\n for disk in disk_elements:\n z_pos = nodes_pos[disk.n]\n y_pos = nodes_o_d[disk.n]\n df.loc[df.tag == disk.tag, \"nodes_pos_l\"] = z_pos\n df.loc[df.tag == disk.tag, \"nodes_pos_r\"] = z_pos\n df.loc[df.tag == disk.tag, \"y_pos\"] = y_pos\n\n # define positions for bearings\n # check if there are bearings without location\n bearings_no_zloc = {\n b\n for b in bearing_elements\n if pd.isna(df.loc[df.tag == b.tag, \"nodes_pos_l\"]).all()\n }\n # cycle while there are bearings without a z location\n for b in cycle(self.bearing_elements):\n if bearings_no_zloc:\n if b in bearings_no_zloc:\n # first check if b.n is on list, if not, check for n_link\n node_l = df.loc[(df.n_l == b.n) & (df.tag != b.tag), \"nodes_pos_l\"]\n node_r = df.loc[(df.n_r == b.n) & (df.tag != b.tag), \"nodes_pos_r\"]\n if len(node_l) == 0 and len(node_r) == 0:\n node_l = df.loc[\n (df.n_link == b.n) & (df.tag != b.tag), \"nodes_pos_l\"\n ]\n node_r = node_l\n if len(node_l):\n df.loc[df.tag == b.tag, \"nodes_pos_l\"] = node_l.values[0]\n df.loc[df.tag == b.tag, \"nodes_pos_r\"] = node_l.values[0]\n bearings_no_zloc.discard(b)\n elif len(node_r):\n df.loc[df.tag == b.tag, \"nodes_pos_l\"] = node_r.values[0]\n df.loc[df.tag == b.tag, \"nodes_pos_r\"] = node_r.values[0]\n bearings_no_zloc.discard(b)\n else:\n break\n\n dfb = df[df.type == \"BearingElement\"]\n z_positions = [pos for pos in dfb[\"nodes_pos_l\"]]\n z_positions = list(dict.fromkeys(z_positions))\n for z_pos in z_positions:\n dfb_z_pos = dfb[dfb.nodes_pos_l == z_pos]\n dfb_z_pos = dfb_z_pos.sort_values(by=\"n_l\")\n if z_pos == df_shaft[\"nodes_pos_l\"].iloc[0]:\n y_pos = (\n max(\n df_shaft[\"odl\"][\n df_shaft.n_l == int(dfb_z_pos.iloc[0][\"n_l\"])\n ].values\n )\n / 2\n )\n elif z_pos == df_shaft[\"nodes_pos_r\"].iloc[-1]:\n y_pos = (\n max(\n df_shaft[\"odr\"][\n df_shaft.n_r == int(dfb_z_pos.iloc[0][\"n_r\"])\n ].values\n )\n / 2\n )\n else:\n y_pos = (\n max(\n [\n max(\n df_shaft[\"odl\"][\n df_shaft._n == int(dfb_z_pos.iloc[0][\"n_l\"])\n ].values\n ),\n max(\n df_shaft[\"odr\"][\n df_shaft._n == int(dfb_z_pos.iloc[0][\"n_l\"]) - 1\n ].values\n ),\n ]\n )\n / 2\n )\n mean_od = np.mean(nodes_o_d)\n scale_size = dfb[\"scale_factor\"] * mean_od\n y_pos_sup = y_pos + 2 * scale_size\n\n for t in dfb_z_pos.tag:\n df.loc[df.tag == t, \"y_pos\"] = y_pos\n df.loc[df.tag == t, \"y_pos_sup\"] = y_pos_sup\n y_pos += 2 * mean_od * df[\"scale_factor\"][df.tag == t].values[0]\n y_pos_sup += 2 * mean_od * df[\"scale_factor\"][df.tag == t].values[0]\n\n # define position for point mass elements\n dfb = df[df.type == \"BearingElement\"]\n for p in point_mass_elements:\n z_pos = dfb[dfb.n_l == p.n][\"nodes_pos_l\"].values[0]\n y_pos = dfb[dfb.n_l == p.n][\"y_pos\"].values[0]\n df.loc[df.tag == p.tag, \"nodes_pos_l\"] = z_pos\n df.loc[df.tag == p.tag, \"nodes_pos_r\"] = z_pos\n df.loc[df.tag == p.tag, \"y_pos\"] = y_pos\n\n self.df = df\n\n def __eq__(self, other):\n \"\"\"\n Equality method for comparasions\n\n Parameters\n ----------\n other : obj\n parameter for comparasion\n\n Returns\n -------\n True if other is equal to the reference parameter.\n False if not.\n \"\"\"\n if self.elements == other.elements and self.parameters == other.parameters:\n return True\n else:\n return False\n\n def run_modal(self, speed):\n \"\"\"\n Method to calculate eigenvalues and eigvectors for a given rotor system\n This method is automatically called when a rotor is instantiated.\n\n Parameters\n ----------\n\n Returns\n -------\n evalues : array\n Eigenvalues array\n evectors : array\n Eigenvectors array\n wn : array\n Undamped natural frequencies array\n wd : array\n Damped natural frequencies array\n log_dec : array\n Logarithmic decrement array\n\n Example\n -------\n >>> rotor = rotor_example()\n >>> modal = rotor.run_modal(speed=0)\n >>> modal.wn[:2]\n array([91.79655318, 96.28899977])\n >>> modal.wd[:2]\n array([91.79655318, 96.28899977])\n >>> modal.plot_mode3D(0) # doctest: +ELLIPSIS\n (<Figure ...\n \"\"\"\n evalues, evectors = self._eigen(speed)\n wn_len = len(evalues) // 2\n wn = (np.absolute(evalues))[:wn_len]\n wd = (np.imag(evalues))[:wn_len]\n damping_ratio = (-np.real(evalues) / np.absolute(evalues))[:wn_len]\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n log_dec = 2 * np.pi * damping_ratio / np.sqrt(1 - damping_ratio ** 2)\n lti = self._lti(speed)\n modal_results = ModalResults(\n speed,\n evalues,\n evectors,\n wn,\n wd,\n damping_ratio,\n log_dec,\n lti,\n self.ndof,\n self.nodes,\n self.nodes_pos,\n self.shaft_elements_length,\n )\n\n return modal_results\n\n def convergence(self, n_eigval=0, err_max=1e-02):\n \"\"\"\n Function to analyze the eigenvalues convergence through the number of\n shaft elements. Every new run doubles the number os shaft elements.\n\n Parameters\n ----------\n n_eigval : int\n The nth eigenvalue which the convergence analysis will run.\n Default is 0 (the first eigenvalue).\n err_max : float\n Maximum allowable convergence error.\n Default is 1e-02\n\n Returns\n -------\n Lists containing the information about:\n The number or elements in each run;\n The relative error calculated in each run;\n The natural frequency calculated in each run.\n\n Example\n -------\n >>> import ross as rs\n >>> i_d = 0\n >>> o_d = 0.05\n >>> n = 6\n >>> L = [0.25 for _ in range(n)]\n ...\n >>> shaft_elem = [rs.ShaftElement(l, i_d, o_d, material=steel,\n ... shear_effects=True, rotary_inertia=True, gyroscopic=True) for l in L]\n >>> disk0 = DiskElement.from_geometry(2, steel, 0.07, 0.05, 0.28)\n >>> disk1 = DiskElement.from_geometry(4, steel, 0.07, 0.05, 0.35)\n >>> bearing0 = BearingElement(0, kxx=1e6, kyy=8e5, cxx=2e3)\n >>> bearing1 = BearingElement(6, kxx=1e6, kyy=8e5, cxx=2e3)\n >>> rotor0 = Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n >>> len(rotor0.shaft_elements)\n 6\n >>> convergence = rotor0.convergence(n_eigval=0, err_max=1e-08)\n >>> len(rotor0.shaft_elements)\n 96\n \"\"\"\n el_num = np.array([len(self.shaft_elements)])\n eigv_arr = np.array([])\n error_arr = np.array([0])\n\n modal = self.run_modal(speed=0)\n eigv_arr = np.append(eigv_arr, modal.wn[n_eigval])\n\n # this value is up to start the loop while\n error = 1.0e10\n nel_r = 2\n\n while error > err_max:\n shaft_elem = []\n disk_elem = []\n brgs_elem = []\n\n for shaft in self.shaft_elements:\n le = shaft.L / nel_r\n odl = shaft.odl\n odr = shaft.odr\n idl = shaft.idl\n idr = shaft.idr\n\n # loop to double the number of element\n for j in range(nel_r):\n odr = ((nel_r - j - 1) * odl + (j + 1) * odr) / nel_r\n idr = ((nel_r - j - 1) * idl + (j + 1) * idr) / nel_r\n odl = ((nel_r - j) * odl + j * odr) / nel_r\n idl = ((nel_r - j) * idl + j * idr) / nel_r\n shaft_elem.append(\n ShaftElement(\n L=le,\n idl=idl,\n odl=odl,\n idr=idr,\n odr=odr,\n material=shaft.material,\n shear_effects=shaft.shear_effects,\n rotary_inertia=shaft.rotary_inertia,\n gyroscopic=shaft.gyroscopic,\n )\n )\n\n for DiskEl in self.disk_elements:\n aux_DiskEl = deepcopy(DiskEl)\n aux_DiskEl.n = nel_r * DiskEl.n\n disk_elem.append(aux_DiskEl)\n\n for Brg_SealEl in self.bearing_elements:\n aux_Brg_SealEl = deepcopy(Brg_SealEl)\n aux_Brg_SealEl.n = nel_r * Brg_SealEl.n\n brgs_elem.append(aux_Brg_SealEl)\n\n aux_rotor = Rotor(shaft_elem, disk_elem, brgs_elem, n_eigen=self.n_eigen)\n aux_modal = aux_rotor.run_modal(speed=0)\n\n eigv_arr = np.append(eigv_arr, aux_modal.wn[n_eigval])\n el_num = np.append(el_num, len(shaft_elem))\n\n error = min(eigv_arr[-1], eigv_arr[-2]) / max(eigv_arr[-1], eigv_arr[-2])\n error = 1 - error\n\n error_arr = np.append(error_arr, 100 * error)\n nel_r *= 2\n\n self.__dict__ = aux_rotor.__dict__\n self.error_arr = error_arr\n\n results = ConvergenceResults(el_num[1:], eigv_arr[1:], error_arr[1:])\n\n return results\n\n def M(self):\n r\"\"\"Mass matrix for an instance of a rotor.\n\n Returns\n -------\n Mass matrix for the rotor.\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> rotor.M()[:4, :4]\n array([[ 1.42050794, 0. , 0. , 0.04931719],\n [ 0. , 1.42050794, -0.04931719, 0. ],\n [ 0. , -0.04931719, 0.00231392, 0. ],\n [ 0.04931719, 0. , 0. , 0.00231392]])\n \"\"\"\n M0 = np.zeros((self.ndof, self.ndof))\n\n for elm in self.elements:\n dofs = elm.dof_global_index\n M0[np.ix_(dofs, dofs)] += elm.M()\n\n return M0\n\n def K(self, frequency):\n \"\"\"Stiffness matrix for an instance of a rotor.\n\n Parameters\n ----------\n frequency : float, optional\n Excitation frequency.\n\n Returns\n -------\n Stiffness matrix for the rotor.\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> np.round(rotor.K(0)[:4, :4]/1e6)\n array([[47., 0., 0., 6.],\n [ 0., 46., -6., 0.],\n [ 0., -6., 1., 0.],\n [ 6., 0., 0., 1.]])\n \"\"\"\n K0 = np.zeros((self.ndof, self.ndof))\n\n for elm in self.elements:\n dofs = elm.dof_global_index\n try:\n K0[np.ix_(dofs, dofs)] += elm.K(frequency)\n except TypeError:\n K0[np.ix_(dofs, dofs)] += elm.K()\n\n return K0\n\n def C(self, frequency):\n \"\"\"Damping matrix for an instance of a rotor.\n\n Parameters\n ----------\n frequency : float\n Excitation frequency.\n\n Returns\n -------\n Damping matrix for the rotor.\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> rotor.C(0)[:4, :4]\n array([[0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]])\n \"\"\"\n C0 = np.zeros((self.ndof, self.ndof))\n\n for elm in self.elements:\n dofs = elm.dof_global_index\n\n try:\n C0[np.ix_(dofs, dofs)] += elm.C(frequency)\n except TypeError:\n C0[np.ix_(dofs, dofs)] += elm.C()\n\n return C0\n\n def G(self):\n \"\"\"Gyroscopic matrix for an instance of a rotor.\n\n Returns\n -------\n Gyroscopic matrix for the rotor.\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> rotor.G()[:4, :4]\n array([[ 0. , 0.01943344, -0.00022681, 0. ],\n [-0.01943344, 0. , 0. , -0.00022681],\n [ 0.00022681, 0. , 0. , 0.0001524 ],\n [ 0. , 0.00022681, -0.0001524 , 0. ]])\n \"\"\"\n G0 = np.zeros((self.ndof, self.ndof))\n\n for elm in self.elements:\n dofs = elm.dof_global_index\n G0[np.ix_(dofs, dofs)] += elm.G()\n\n return G0\n\n def A(self, speed=0, frequency=None):\n \"\"\"State space matrix for an instance of a rotor.\n\n Parameters\n ----------\n speed: float, optional\n Rotor speed.\n Default is 0.\n frequency : float, optional\n Excitation frequency. Default is rotor speed.\n\n Returns\n -------\n State space matrix for the rotor.\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> np.round(rotor.A()[50:56, :2])\n array([[ 0., 10927.],\n [-10924., -0.],\n [ -174., 0.],\n [ -0., -174.],\n [ -0., 10723.],\n [-10719., -0.]])\n \"\"\"\n if frequency is None:\n frequency = speed\n\n Z = np.zeros((self.ndof, self.ndof))\n I = np.eye(self.ndof)\n\n # fmt: off\n A = np.vstack(\n [np.hstack([Z, I]),\n np.hstack([la.solve(-self.M(), self.K(frequency)), la.solve(-self.M(), (self.C(frequency) + self.G() * speed))])])\n # fmt: on\n\n return A\n\n @staticmethod\n def _index(eigenvalues):\n r\"\"\"Function used to generate an index that will sort\n eigenvalues and eigenvectors based on the imaginary (wd)\n part of the eigenvalues. Positive eigenvalues will be\n positioned at the first half of the array.\n\n Parameters\n ----------\n eigenvalues: array\n Array with the eigenvalues.\n\n Returns\n -------\n idx:\n An array with indices that will sort the\n eigenvalues and eigenvectors.\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> evalues, evectors = rotor._eigen(0, sorted_=True)\n >>> idx = rotor._index(evalues)\n >>> idx[:6] # doctest: +ELLIPSIS\n array([0, 1, 2, 3, 4, ...\n \"\"\"\n # avoid float point errors when sorting\n evals_truncated = np.around(eigenvalues, decimals=10)\n a = np.imag(evals_truncated) # First column\n b = np.absolute(evals_truncated) # Second column\n ind = np.lexsort((b, a)) # Sort by imag (wd), then by absolute (wn)\n # Positive eigenvalues first\n positive = [i for i in ind[len(a) // 2 :]]\n negative = [i for i in ind[: len(a) // 2]]\n\n idx = np.array([positive, negative]).flatten()\n\n return idx\n\n def _eigen(self, speed, frequency=None, sorted_=True, A=None):\n r\"\"\"This method will return the eigenvalues and eigenvectors of the\n state space matrix A, sorted by the index method which considers\n the imaginary part (wd) of the eigenvalues for sorting.\n To avoid sorting use sorted_=False\n\n Parameters\n ----------\n speed: float\n Rotor speed.\n frequency: float\n Excitation frequency.\n sorted_: bool, optional\n Sort considering the imaginary part (wd)\n Default is True\n A: np.array, optional\n Matrix for which eig will be calculated.\n Defaul is the rotor A matrix.\n\n\n Returns\n -------\n evalues: array\n An array with the eigenvalues\n evectors array\n An array with the eigenvectors\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> evalues, evectors = rotor._eigen(0)\n >>> evalues[0].imag # doctest: +ELLIPSIS\n 91.796...\n \"\"\"\n if A is None:\n A = self.A(speed=speed, frequency=frequency)\n\n if self.sparse is True:\n try:\n evalues, evectors = las.eigs(\n A,\n k=self.n_eigen,\n sigma=0,\n ncv=2 * self.n_eigen,\n which=\"LM\",\n v0=self._v0,\n )\n # store v0 as a linear combination of the previously\n # calculated eigenvectors to use in the next call to eigs\n self._v0 = np.real(sum(evectors.T))\n except las.ArpackError:\n evalues, evectors = la.eig(A)\n else:\n evalues, evectors = la.eig(A)\n\n if sorted_ is False:\n return evalues, evectors\n\n idx = self._index(evalues)\n\n return evalues[idx], evectors[:, idx]\n\n def _lti(self, speed, frequency=None):\n \"\"\"Continuous-time linear time invariant system.\n\n This method is used to create a Continuous-time linear\n time invariant system for the mdof system.\n From this system we can obtain poles, impulse response,\n generate a bode, etc.\n\n Parameters\n ----------\n speed: float\n Rotor speed.\n frequency: float, optional\n Excitation frequency.\n Default is rotor speed.\n\n Returns\n -------\n sys : StateSpaceContinuous\n Space State Continuos with A, B, C and D matrices\n\n Example\n -------\n >>> rotor = rotor_example()\n >>> A = rotor._lti(speed=0).A\n >>> B = rotor._lti(speed=0).B\n >>> C = rotor._lti(speed=0).C\n >>> D = rotor._lti(speed=0).D\n \"\"\"\n Z = np.zeros((self.ndof, self.ndof))\n I = np.eye(self.ndof)\n\n # x' = Ax + Bu\n B2 = I\n if frequency is None:\n frequency = speed\n A = self.A(speed=speed, frequency=frequency)\n # fmt: off\n B = np.vstack([Z,\n la.solve(self.M(), B2)])\n # fmt: on\n\n # y = Cx + Du\n # Observation matrices\n Cd = I\n Cv = Z\n Ca = Z\n\n # fmt: off\n C = np.hstack((Cd - Ca @ la.solve(self.M(), self.K(frequency)), Cv - Ca @ la.solve(self.M(), self.C(frequency))))\n # fmt: on\n D = Ca @ la.solve(self.M(), B2)\n\n sys = signal.lti(A, B, C, D)\n\n return sys\n\n def transfer_matrix(self, speed=None, frequency=None, modes=None):\n \"\"\"\n Calculates the fer matrix for the frequency response function (FRF)\n\n Paramenters\n -----------\n frequency : float, optional\n Excitation frequency. Default is rotor speed.\n speed : float, optional\n Rotating speed. Default is rotor speed (frequency).\n modes : list, optional\n List with modes used to calculate the matrix.\n (all modes will be used if a list is not given).\n\n Returns\n -------\n H : matrix\n System transfer matrix\n\n Example\n -------\n >>> rotor = rotor_example()\n >>> speed = 100.0\n >>> H = rotor.transfer_matrix(speed=speed)\n \"\"\"\n modal = self.run_modal(speed=speed)\n B = modal.lti.B\n C = modal.lti.C\n D = modal.lti.D\n\n # calculate eigenvalues and eigenvectors using la.eig to get\n # left and right eigenvectors.\n\n evals, psi, = la.eig(self.A(speed, frequency))\n\n psi_inv = la.inv(psi)\n\n if modes is not None:\n n = self.ndof # n dof -> number of modes\n m = len(modes) # -> number of desired modes\n # idx to get each evalue/evector and its conjugate\n idx = np.zeros((2 * m), int)\n idx[0:m] = modes # modes\n idx[m:] = range(2 * n)[-m:] # conjugates (see how evalues are ordered)\n\n evals = evals[np.ix_(idx)]\n psi = psi[np.ix_(range(2 * n), idx)]\n psi_inv = psi_inv[np.ix_(idx, range(2 * n))]\n\n diag = np.diag([1 / (1j * speed - lam) for lam in evals])\n\n H = C @ psi @ diag @ psi_inv @ B + D\n\n return H\n\n def run_freq_response(self, speed_range=None, modes=None):\n \"\"\"Frequency response for a mdof system.\n\n This method returns the frequency response for a mdof system\n given a range of frequencies and the modes that will be used.\n\n Parameters\n ----------\n speed_range : array, optional\n Array with the desired range of frequencies (the default\n is 0 to 1.5 x highest damped natural frequency.\n modes : list, optional\n Modes that will be used to calculate the frequency response\n (all modes will be used if a list is not given).\n\n Returns\n -------\n results : array\n Array with the frequencies, magnitude (dB) of the frequency\n response for each pair input/output, and\n phase of the frequency response for each pair input/output..\n It will be returned if plot=False.\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> speed = np.linspace(0, 1000, 101)\n >>> response = rotor.run_freq_response(speed_range=speed)\n >>> response.magnitude # doctest: +ELLIPSIS\n array([[[1.00000000e-06, 1.00261725e-06, 1.01076952e-06, ...\n \"\"\"\n if speed_range is None:\n modal = self.run_modal(0)\n speed_range = np.linspace(0, max(modal.evalues.imag) * 1.5, 1000)\n\n freq_resp = np.empty((self.ndof, self.ndof, len(speed_range)), dtype=np.complex)\n\n for i, speed in enumerate(speed_range):\n H = self.transfer_matrix(speed=speed, modes=modes)\n freq_resp[..., i] = H\n\n results = FrequencyResponseResults(\n freq_resp=freq_resp,\n speed_range=speed_range,\n magnitude=abs(freq_resp),\n phase=np.angle(freq_resp),\n )\n\n return results\n\n def run_forced_response(self, force=None, speed_range=None, modes=None):\n \"\"\"Unbalanced response for a mdof system.\n\n This method returns the unbalanced response for a mdof system\n given magnitude and phase of the unbalance, the node where it's\n applied and a frequency range.\n\n Parameters\n ----------\n force : list\n Unbalance force in each degree of freedom for each value in omega\n speed_range : list, float\n Array with the desired range of frequencies\n modes : list, optional\n Modes that will be used to calculate the frequency response\n (all modes will be used if a list is not given).\n\n Returns\n -------\n force_resp : array\n Array with the force response for each node for each frequency\n speed_range : array\n Array with the frequencies\n magnitude : array\n Magnitude (dB) of the frequency response for node for each frequency\n phase : array\n Phase of the frequency response for node for each frequency\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> speed = np.linspace(0, 1000, 101)\n >>> force = rotor._unbalance_force(3, 10.0, 0.0, speed)\n >>> resp = rotor.run_forced_response(force=force, speed_range=speed)\n >>> resp.magnitude # doctest: +ELLIPSIS\n array([[0.00000000e+00, 5.06073311e-04, 2.10044826e-03, ...\n \"\"\"\n freq_resp = self.run_freq_response(speed_range=speed_range, modes=modes)\n\n forced_resp = np.zeros(\n (self.ndof, len(freq_resp.speed_range)), dtype=np.complex\n )\n\n for i in range(len(freq_resp.speed_range)):\n forced_resp[:, i] = freq_resp.freq_resp[..., i] @ force[..., i]\n\n forced_resp = ForcedResponseResults(\n forced_resp=forced_resp,\n speed_range=speed_range,\n magnitude=abs(forced_resp),\n phase=np.angle(forced_resp),\n )\n\n return forced_resp\n\n def _unbalance_force(self, node, magnitude, phase, omega):\n \"\"\"\n Function to calculate unbalance force\n\n Parameters\n ----------\n node : int\n Node where the unbalance is applied.\n magnitude : float\n Unbalance magnitude (kg.m)\n phase : float\n Unbalance phase (rad)\n omega : list, float\n Array with the desired range of frequencies\n\n Returns\n -------\n F0 : list\n Unbalance force in each degree of freedom for each value in omega\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> speed = np.linspace(0, 1000, 101)\n >>> rotor._unbalance_force(3, 10.0, 0.0, speed)[12] # doctest: +ELLIPSIS\n array([0.000e+00+0.j, 1.000e+03+0.j, 4.000e+03+0.j, ...\n \"\"\"\n\n F0 = np.zeros((self.ndof, len(omega)), dtype=np.complex128)\n me = magnitude\n delta = phase\n b0 = np.array(\n [\n me * np.exp(1j * delta),\n -1j * me * np.exp(1j * delta),\n 0, # 1j*(Id - Ip)*beta*np.exp(1j*gamma),\n 0,\n ]\n ) # (Id - Ip)*beta*np.exp(1j*gamma)])\n\n n0 = 4 * node\n n1 = n0 + 4\n for i, w in enumerate(omega):\n F0[n0:n1, i] += w ** 2 * b0\n\n return F0\n\n def unbalance_response(self, node, magnitude, phase, frequency_range=None):\n \"\"\"Unbalanced response for a mdof system.\n\n This method returns the unbalanced response for a mdof system\n given magnitide and phase of the unbalance, the node where it's\n applied and a frequency range.\n\n Parameters\n ----------\n node : list, int\n Node where the unbalance is applied.\n magnitude : list, float\n Unbalance magnitude (kg.m)\n phase : list, float\n Unbalance phase (rad)\n frequency_range : list, float\n Array with the desired range of frequencies\n\n Returns\n -------\n force_resp : array\n Array with the force response for each node for each frequency\n speed_range : array\n Array with the frequencies\n magdb : array\n Magnitude (dB) of the frequency response for each pair input/output.\n The order of the array is: [output, input, magnitude]\n phase : array\n Phase of the frequency response for each pair input/output.\n The order of the array is: [output, input, phase]\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> speed = np.linspace(0, 1000, 101)\n >>> response = rotor.unbalance_response(node=3, magnitude=10.0, phase=0.0, frequency_range=speed)\n >>> response.magnitude # doctest: +ELLIPSIS\n array([[0.00000000e+00, 5.06073311e-04, 2.10044826e-03, ...\n \"\"\"\n force = np.zeros((self.ndof, len(frequency_range)), dtype=np.complex)\n\n try:\n for n, m, p in zip(node, magnitude, phase):\n force += self._unbalance_force(n, m, p, frequency_range)\n except TypeError:\n force = self._unbalance_force(node, magnitude, phase, frequency_range)\n\n forced_response = self.run_forced_response(force, frequency_range)\n\n return forced_response\n\n def time_response(self, speed, F, t, ic=None):\n \"\"\"Time response for a rotor.\n\n This method returns the time response for a rotor\n given a force, time and initial conditions.\n\n Parameters\n ----------\n F : array\n Force array (needs to have the same length as time array).\n t : array\n Time array. (must have the same length than lti.B matrix)\n ic : array, optional\n The initial conditions on the state vector (zero by default).\n\n Returns\n -------\n t : array\n Time values for the output.\n yout : array\n System response.\n xout : array\n Time evolution of the state vector.\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> speed = 0\n >>> size = 28\n >>> t = np.linspace(0, 5, size)\n >>> F = np.ones((size, rotor.ndof))\n >>> rotor.time_response(speed, F, t) # doctest: +ELLIPSIS\n (array([0. , 0.18518519, 0.37037037, ...\n \"\"\"\n modal = self.run_modal(speed=speed)\n return signal.lsim(modal.lti, F, t, X0=ic)\n\n def _plot_rotor_matplotlib(self, nodes=1, check_sld=False, ax=None):\n \"\"\"Plots a rotor object.\n\n This function will take a rotor object and plot its shaft,\n disks and bearing elements\n\n Parameters\n ----------\n nodes : int, optional\n Increment that will be used to plot nodes label.\n check_sld : bool\n If True, checks the slenderness ratio for each element\n ax : matplotlib plotting axes, optional\n Axes in which the plot will be drawn.\n\n Returns\n -------\n ax : matplotlib axes\n Returns the axes object with the plot.\n\n Example\n -------\n >>> import ross as rs\n >>> rotor = rs.rotor_example()\n >>> rotor._plot_rotor_matplotlib() # doctest: +ELLIPSIS\n <matplotlib.axes...\n \"\"\"\n if ax is None:\n ax = plt.gca()\n\n # plot shaft centerline\n shaft_end = max(self.nodes_pos)\n ax.plot([-0.2 * shaft_end, 1.2 * shaft_end], [0, 0], \"k-.\")\n\n try:\n max_diameter = max([disk.o_d for disk in self.disk_elements])\n except (ValueError, AttributeError):\n max_diameter = max([shaft.odl for shaft in self.shaft_elements])\n\n ax.set_ylim(-1.2 * max_diameter, 1.2 * max_diameter)\n ax.axis(\"equal\")\n ax.set_xlabel(\"Axial location (m)\")\n ax.set_ylabel(\"Shaft radius (m)\")\n\n # plot nodes\n for node, position in enumerate(self.nodes_pos[::nodes]):\n ax.plot(\n position,\n 0,\n zorder=2,\n ls=\"\",\n marker=\"D\",\n color=\"#6caed6\",\n markersize=10,\n alpha=0.6,\n )\n ax.text(\n position,\n 0,\n f\"{node*nodes}\",\n size=\"smaller\",\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n )\n\n # plot shaft elements\n for sh_elm in self.shaft_elements:\n position = self.nodes_pos[sh_elm.n]\n sh_elm.patch(position, check_sld, ax)\n\n mean_od = np.mean(self.nodes_o_d)\n # plot disk elements\n for disk in self.disk_elements:\n position = (self.nodes_pos[disk.n], self.nodes_o_d[disk.n] / 2, mean_od)\n disk.patch(position, ax)\n\n # plot bearings\n for bearing in self.bearing_elements:\n z_pos = self.df[self.df.tag == bearing.tag][\"nodes_pos_l\"].values[0]\n y_pos = self.df[self.df.tag == bearing.tag][\"y_pos\"].values[0]\n y_pos_sup = self.df[self.df.tag == bearing.tag][\"y_pos_sup\"].values[0]\n position = (z_pos, y_pos, y_pos_sup)\n bearing.patch(position, ax)\n\n # plot point mass\n for p_mass in self.point_mass_elements:\n z_pos = self.df[self.df.tag == p_mass.tag][\"nodes_pos_l\"].values[0]\n y_pos = self.df[self.df.tag == p_mass.tag][\"y_pos\"].values[0]\n position = (z_pos, y_pos)\n p_mass.patch(position, ax)\n\n return ax\n\n def _plot_rotor_bokeh(self, nodes=1, check_sld=False, bk_ax=None):\n \"\"\"Plots a rotor object.\n\n This function will take a rotor object and plot its shaft,\n disks and bearing elements\n\n Parameters\n ----------\n nodes : int, optional\n Increment that will be used to plot nodes label.\n check_sld : bool\n If True, checks the slenderness ratio for each element\n bk_ax : bokeh plotting axes, optional\n Axes in which the plot will be drawn.\n\n Returns\n -------\n bk_ax : bokeh plotting axes\n Returns the axes object with the plot.\n\n Example\n -------\n >>> import ross as rs\n >>> rotor = rs.rotor_example()\n >>> figure = rotor._plot_rotor_bokeh()\n \"\"\"\n # plot shaft centerline\n shaft_end = max(self.nodes_pos)\n\n # bokeh plot - create a new plot\n bk_ax = figure(\n tools=\"pan, wheel_zoom, reset, save\",\n width=800,\n height=600,\n x_range=[-0.1 * shaft_end, 1.1 * shaft_end],\n y_range=[-0.3 * shaft_end, 0.3 * shaft_end],\n title=\"Rotor model\",\n x_axis_label=\"Axial location (m)\",\n y_axis_label=\"Shaft radius (m)\",\n match_aspect=True,\n )\n bk_ax.xaxis.axis_label_text_font_size = \"14pt\"\n bk_ax.yaxis.axis_label_text_font_size = \"14pt\"\n\n # bokeh plot - plot shaft centerline\n bk_ax.line(\n [-0.2 * shaft_end, 1.2 * shaft_end],\n [0, 0],\n line_width=3,\n line_dash=\"dotdash\",\n line_color=bokeh_colors[0],\n )\n\n # plot nodes\n text = []\n x_pos = []\n for node, position in enumerate(self.nodes_pos[::nodes]):\n # bokeh plot\n text.append(str(node * nodes))\n x_pos.append(position)\n\n # bokeh plot - plot nodes\n y_pos = np.linspace(0, 0, len(self.nodes_pos[::nodes]))\n\n source = ColumnDataSource(dict(x=x_pos, y=y_pos, text=text))\n\n bk_ax.circle(\n x=x_pos, y=y_pos, size=30, fill_alpha=0.8, fill_color=bokeh_colors[6]\n )\n\n glyph = Text(\n x=\"x\",\n y=\"y\",\n text=\"text\",\n text_font_style=\"bold\",\n text_baseline=\"middle\",\n text_align=\"center\",\n text_alpha=1.0,\n text_color=bokeh_colors[0],\n )\n bk_ax.add_glyph(source, glyph)\n\n # plot shaft elements\n for sh_elm in self.shaft_elements:\n position = self.nodes_pos[sh_elm.n]\n hover = sh_elm.bokeh_patch(position, check_sld, bk_ax)\n\n bk_ax.add_tools(hover)\n\n mean_od = np.mean(self.nodes_o_d)\n # plot disk elements\n for disk in self.disk_elements:\n position = (self.nodes_pos[disk.n], self.nodes_o_d[disk.n] / 2, mean_od)\n hover = disk.bokeh_patch(position, bk_ax)\n\n bk_ax.add_tools(hover)\n\n # plot bearings\n for bearing in self.bearing_elements:\n z_pos = self.df[self.df.tag == bearing.tag][\"nodes_pos_l\"].values[0]\n y_pos = self.df[self.df.tag == bearing.tag][\"y_pos\"].values[0]\n y_pos_sup = self.df[self.df.tag == bearing.tag][\"y_pos_sup\"].values[0]\n position = (z_pos, y_pos, y_pos_sup)\n bearing.bokeh_patch(position, bk_ax)\n\n # plot point mass\n for p_mass in self.point_mass_elements:\n z_pos = self.df[self.df.tag == p_mass.tag][\"nodes_pos_l\"].values[0]\n y_pos = self.df[self.df.tag == p_mass.tag][\"y_pos\"].values[0]\n position = (z_pos, y_pos)\n hover = p_mass.bokeh_patch(position, bk_ax)\n\n bk_ax.add_tools(hover)\n\n return bk_ax\n\n def plot_rotor(self, nodes=1, *args, plot_type=\"bokeh\", **kwargs):\n \"\"\"Plots a rotor object.\n\n This function will take a rotor object and plot its shaft,\n disks and bearing elements\n\n Parameters\n ----------\n nodes : int, optional\n Increment that will be used to plot nodes label.\n plot_type : str\n Matplotlib or bokeh.\n Default is matplotlib.\n ax : matplotlib axes, optional\n Axes in which the plot will be drawn.\n bk_ax : bokeh plotting axes, optional\n Axes in which the plot will be drawn.\n\n Returns\n -------\n ax : matplotlib axes\n Returns the axes object with the plot.\n bk_ax : bokeh plotting axes\n Returns the axes object with the plot.\n\n Examples:\n >>> import ross as rs\n >>> rotor = rs.rotor_example()\n >>> rotor.plot_rotor() # doctest: +ELLIPSIS\n Figure...\n \"\"\"\n if plot_type == \"matplotlib\":\n return self._plot_rotor_matplotlib(\n nodes=nodes, check_sld=False, *args, **kwargs\n )\n elif plot_type == \"bokeh\":\n return self._plot_rotor_bokeh(nodes=nodes, check_sld=False, *args, **kwargs)\n else:\n raise ValueError(f\"{plot_type} is not a valid plot type.\")\n\n def check_slenderness_ratio(self, nodes=1, *args, plot_type=\"matplotlib\", **kwargs):\n \"\"\"Plots a rotor object and check the slenderness ratio\n\n Parameters\n ----------\n nodes : int, optional\n Increment that will be used to plot nodes label.\n plot_type : str\n Matplotlib or bokeh.\n Default is matplotlib.\n\n Returns\n -------\n ax : matplotlib axes\n Returns the axes object with the plot.\n bk_ax : bokeh plotting axes\n Returns the axes object with the plot.\n\n Example\n -------\n >>> import ross as rs\n >>> rotor = rs.rotor_example()\n >>> rotor.check_slenderness_ratio() # doctest: +ELLIPSIS\n <matplotlib.axes...\n \"\"\"\n\n # check slenderness ratio of beam elements\n SR = np.array([])\n for shaft in self.shaft_elements:\n if shaft.slenderness_ratio < 1.6:\n SR = np.append(SR, shaft.n)\n if len(SR) != 0:\n warnings.warn(\n \"The beam elements \"\n + str(SR)\n + \" have slenderness ratio (G*A*L^2 / EI) of less than 1.6.\"\n + \" Results may not converge correctly\"\n )\n\n if plot_type == \"matplotlib\":\n return self._plot_rotor_matplotlib(\n nodes=nodes, check_sld=True, *args, **kwargs\n )\n elif plot_type == \"bokeh\":\n return self._plot_rotor_bokeh(nodes=nodes, check_sld=True, *args, **kwargs)\n else:\n raise ValueError(f\"{plot_type} is not a valid plot type.\")\n\n def run_campbell(self, speed_range, frequencies=6, frequency_type=\"wd\"):\n \"\"\"Calculates the Campbell diagram.\n\n This function will calculate the damped natural frequencies\n for a speed range.\n\n Parameters\n ----------\n speed_range : array\n Array with the speed range in rad/s.\n frequencies : int, optional\n Number of frequencies that will be calculated.\n Default is 6.\n\n Returns\n -------\n results : array\n Array with the damped natural frequencies, log dec and precessions\n corresponding to each speed of the speed_rad array.\n It will be returned if plot=False.\n\n Examples\n --------\n >>> rotor1 = rotor_example()\n >>> speed = np.linspace(0, 400, 101)\n >>> camp = rotor1.run_campbell(speed)\n >>> camp.plot() # doctest: +ELLIPSIS\n Figure...\n \"\"\"\n # store in results [speeds(x axis), frequencies[0] or logdec[1] or\n # whirl[2](y axis), 3]\n results = np.zeros([len(speed_range), frequencies, 5])\n\n for i, w in enumerate(speed_range):\n modal = self.run_modal(speed=w)\n\n if frequency_type == \"wd\":\n results[i, :, 0] = modal.wd[:frequencies]\n results[i, :, 1] = modal.log_dec[:frequencies]\n results[i, :, 2] = modal.whirl_values()[:frequencies]\n else:\n idx = modal.wn.argsort()\n results[i, :, 0] = modal.wn[idx][:frequencies]\n results[i, :, 1] = modal.log_dec[idx][:frequencies]\n results[i, :, 2] = modal.whirl_values()[idx][:frequencies]\n\n results[i, :, 3] = w\n results[i, :, 4] = modal.wn[:frequencies]\n\n results = CampbellResults(\n speed_range=speed_range,\n wd=results[..., 0],\n log_dec=results[..., 1],\n whirl_values=results[..., 2],\n )\n\n return results\n\n def plot_ucs(self, stiffness_range=None, num=20, ax=None, output_html=False):\n \"\"\"Plot undamped critical speed map.\n\n This method will plot the undamped critical speed map for a given range\n of stiffness values. If the range is not provided, the bearing\n stiffness at rated speed will be used to create a range.\n\n Parameters\n ----------\n stiffness_range : tuple, optional\n Tuple with (start, end) for stiffness range.\n num : int\n Number of steps in the range.\n Default is 20.\n ax : matplotlib axes, optional\n Axes in which the plot will be drawn.\n output_html : Boolean, optional\n outputs a html file.\n Default is False\n\n Returns\n -------\n ax : matplotlib axes\n Returns the axes object with the plot.\n bk_ax : bokeh plot axes\n Returns the axes object with the plot.\n\n Example\n -------\n >>> i_d = 0\n >>> o_d = 0.05\n >>> n = 6\n >>> L = [0.25 for _ in range(n)]\n >>> shaft_elem = [\n ... ShaftElement(\n ... l, i_d, o_d, material=steel, shear_effects=True,\n ... rotary_inertia=True, gyroscopic=True\n ... )\n ... for l in L\n ... ]\n >>> disk0 = DiskElement.from_geometry(\n ... n=2, material=steel, width=0.07, i_d=0.05, o_d=0.28\n ... )\n >>> disk1 = DiskElement.from_geometry(\n ... n=4, material=steel, width=0.07, i_d=0.05, o_d=0.28\n ... )\n >>> stfx = [1e6, 2e7, 3e8]\n >>> stfy = [0.8e6, 1.6e7, 2.4e8]\n >>> bearing0 = BearingElement(0, kxx=stfx, kyy=stfy, cxx=0, frequency=[0,1000, 2000])\n >>> bearing1 = BearingElement(6, kxx=stfx, kyy=stfy, cxx=0, frequency=[0,1000, 2000])\n >>> rotor = Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n >>> rotor.plot_ucs() # doctest: +ELLIPSIS\n <matplotlib.axes._subplots.AxesSubplot ...\n \"\"\"\n if ax is None:\n ax = plt.gca()\n\n if stiffness_range is None:\n if self.rated_w is not None:\n bearing = self.bearing_elements[0]\n k = bearing.kxx.interpolated(self.rated_w)\n k = int(np.log10(k))\n stiffness_range = (k - 3, k + 3)\n else:\n stiffness_range = (6, 11)\n\n stiffness_log = np.logspace(*stiffness_range, num=num)\n rotor_wn = np.zeros((4, len(stiffness_log)))\n\n bearings_elements = [] # exclude the seals\n for bearing in self.bearing_elements:\n if type(bearing) == BearingElement:\n bearings_elements.append(bearing)\n\n for i, k in enumerate(stiffness_log):\n bearings = [BearingElement(b.n, kxx=k, cxx=0) for b in bearings_elements]\n rotor = self.__class__(\n self.shaft_elements, self.disk_elements, bearings, n_eigen=16\n )\n modal = rotor.run_modal(speed=0)\n rotor_wn[:, i] = modal.wn[:8:2]\n\n ax.set_prop_cycle(cycler(\"color\", seaborn_colors))\n ax.loglog(stiffness_log, rotor_wn.T)\n ax.set_xlabel(\"Bearing Stiffness (N/m)\")\n ax.set_ylabel(\"Critical Speed (rad/s)\")\n\n bearing0 = bearings_elements[0]\n\n ax.plot(\n bearing0.kxx.interpolated(bearing0.frequency),\n bearing0.frequency,\n marker=\"o\",\n color=\"k\",\n alpha=0.25,\n markersize=5,\n lw=0,\n label=\"kxx\",\n )\n ax.plot(\n bearing0.kyy.interpolated(bearing0.frequency),\n bearing0.frequency,\n marker=\"s\",\n color=\"k\",\n alpha=0.25,\n markersize=5,\n lw=0,\n label=\"kyy\",\n )\n ax.legend()\n\n # bokeh plot - output to static HTML file\n if output_html:\n output_file(\"Plot_UCS.html\")\n\n # bokeh plot - create a new plot\n bk_ax = figure(\n tools=\"pan, box_zoom, wheel_zoom, reset, save\",\n width=1200,\n height=900,\n title=\"Undamped critical speed map\",\n x_axis_label=\"Bearing Stiffness (N/m)\",\n y_axis_label=\"Critical Speed (rad/s)\",\n x_axis_type=\"log\",\n y_axis_type=\"log\",\n )\n bk_ax.xaxis.axis_label_text_font_size = \"14pt\"\n bk_ax.yaxis.axis_label_text_font_size = \"14pt\"\n\n # bokeh plot - plot shaft centerline\n bk_ax.circle(\n bearing0.kxx.interpolated(bearing0.frequency),\n bearing0.frequency,\n size=5,\n fill_alpha=0.5,\n fill_color=bokeh_colors[0],\n legend_label=\"Kxx\",\n )\n bk_ax.square(\n bearing0.kyy.interpolated(bearing0.frequency),\n bearing0.frequency,\n size=5,\n fill_alpha=0.5,\n fill_color=bokeh_colors[0],\n legend_label=\"Kyy\",\n )\n for j in range(rotor_wn.T.shape[1]):\n bk_ax.line(\n stiffness_log,\n np.transpose(rotor_wn.T)[j],\n line_width=3,\n line_color=bokeh_colors[-j + 1],\n )\n\n return ax\n\n def plot_level1(\n self, n=None, stiffness_range=None, num=5, ax=None, output_html=False, **kwargs\n ):\n \"\"\"Plot level 1 stability analysis.\n\n This method will plot the stability 1 analysis for a\n given stiffness range.\n\n Parameters\n ----------\n stiffness_range : tuple, optional\n Tuple with (start, end) for stiffness range.\n num : int\n Number of steps in the range.\n Default is 5.\n ax : matplotlib axes, optional\n Axes in which the plot will be drawn.\n output_html : Boolean, optional\n outputs a html file.\n Default is False\n\n Returns\n -------\n ax : matplotlib axes\n Returns the axes object with the plot.\n bk_ax : bokeh plot axes\n Returns the axes object with the plot.\n Example\n -------\n >>> i_d = 0\n >>> o_d = 0.05\n >>> n = 6\n >>> L = [0.25 for _ in range(n)]\n >>> shaft_elem = [\n ... ShaftElement(\n ... l, i_d, o_d, material=steel, shear_effects=True,\n ... rotary_inertia=True, gyroscopic=True\n ... )\n ... for l in L\n ... ]\n >>> disk0 = DiskElement.from_geometry(\n ... n=2, material=steel, width=0.07, i_d=0.05, o_d=0.28\n ... )\n >>> disk1 = DiskElement.from_geometry(\n ... n=4, material=steel, width=0.07, i_d=0.05, o_d=0.28\n ... )\n >>> stfx = 1e6\n >>> stfy = 0.8e6\n >>> bearing0 = BearingElement(0, kxx=stfx, kyy=stfy, cxx=0)\n >>> bearing1 = BearingElement(6, kxx=stfx, kyy=stfy, cxx=0)\n >>> rotor = Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1], rated_w=0)\n >>> rotor.plot_level1(n=0, stiffness_range=(1e6, 1e11)) # doctest: +ELLIPSIS\n (<matplotlib.axes._subplots.AxesSubplot ...\n \"\"\"\n if ax is None:\n ax = plt.gca()\n\n stiffness = np.linspace(*stiffness_range, num)\n\n log_dec = np.zeros(len(stiffness))\n\n # set rotor speed to mcs\n speed = self.rated_w\n modal = self.run_modal(speed=speed)\n\n for i, Q in enumerate(stiffness):\n bearings = [copy(b) for b in self.bearing_elements]\n cross_coupling = BearingElement(n=n, kxx=0, cxx=0, kxy=Q, kyx=-Q)\n bearings.append(cross_coupling)\n\n rotor = self.__class__(self.shaft_elements, self.disk_elements, bearings)\n\n modal = rotor.run_modal(speed=speed)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec[i] = modal.log_dec[non_backward][0]\n\n ax.plot(stiffness, log_dec, \"--\", **kwargs)\n ax.set_xlabel(\"Applied Cross Coupled Stiffness, Q (N/m)\")\n ax.set_ylabel(\"Log Dec\")\n\n # bokeh plot - output to static HTML file\n if output_html:\n output_file(\"Plot_level1.html\")\n\n # bokeh plot - create a new plot\n bk_ax = figure(\n tools=\"pan, box_zoom, wheel_zoom, reset, save\",\n width=1200,\n height=900,\n title=\"Level 1 stability analysis\",\n x_axis_label=\"Applied Cross Coupled Stiffness, Q (N/m)\",\n y_axis_label=\"Log Dec\",\n )\n bk_ax.xaxis.axis_label_text_font_size = \"14pt\"\n bk_ax.yaxis.axis_label_text_font_size = \"14pt\"\n\n # bokeh plot - plot shaft centerline\n bk_ax.line(stiffness, log_dec, line_width=3, line_color=bokeh_colors[0])\n\n return ax, bk_ax\n\n def run_time_response(self, speed, F, t, dof):\n \"\"\"Calculates the time response.\n\n This function will take a rotor object and plot its time response\n given a force and a time.\n\n Parameters\n ----------\n F : array\n Force array (needs to have the same number of rows as time array).\n Each column corresponds to a dof and each row to a time.\n t : array\n Time array.\n dof : int\n Degree of freedom that will be observed.\n\n Returns\n -------\n results : array\n Array containing the time array, the system response, and the\n time evolution of the state vector.\n It will be returned if plot=False.\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> speed = 0\n >>> size = 28\n >>> t = np.linspace(0, 5, size)\n >>> F = np.ones((size, rotor.ndof))\n >>> dof = 13\n >>> response = rotor.run_time_response(speed, F, t, dof)\n >>> response.yout[:, dof] # doctest: +ELLIPSIS\n array([ 0.00000000e+00, 1.06327334e-05, 1.54684988e-05, ...\n \"\"\"\n t_, yout, xout = self.time_response(speed, F, t)\n\n results = TimeResponseResults(t, yout, xout, dof)\n\n return results\n\n def run_orbit_response(self, speed, F, t):\n \"\"\"Calculates the orbit for a given node.\n\n This function will take a rotor object and plot the orbit for a single\n (2D graph) or all nodes (3D graph).\n\n Parameters\n ----------\n speed: float\n Rotor speed\n F: array\n Force array (needs to have the same number of rows as time array).\n Each column corresponds to a dof and each row to a time.\n t: array\n Time array.\n\n Returns\n -------\n results : array\n Array containing the time array, the system response, and the\n time evolution of the state vector.\n It will be returned if plot=False.\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> speed = 500.0\n >>> size = 1000\n >>> node = 3\n >>> t = np.linspace(0, 10, size)\n >>> F = np.zeros((size, rotor.ndof))\n >>> F[:, 4 * node] = 10 * np.cos(2 * t)\n >>> F[:, 4 * node + 1] = 10 * np.sin(2 * t)\n >>> response = rotor.run_orbit_response(speed, F, t)\n >>> response.yout[:, 4 * node] # doctest: +ELLIPSIS\n array([ 0.00000000e+00, 6.94968863e-06, 2.13014440e-05, ...\n \"\"\"\n t_, yout, xout = self.time_response(speed, F, t)\n\n results = OrbitResponseResults(t, yout, xout, self.nodes, self.nodes_pos)\n\n return results\n\n def save_mat(self, file_path, speed, frequency=None):\n \"\"\"Save matrices and rotor model to a .mat file.\n\n Parameters\n ----------\n file_path : str\n\n speed: float\n Rotor speed.\n frequency: float, optional\n Excitation frequency.\n Default is rotor speed.\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> rotor.save_mat('new_matrices.mat', speed=0)\n \"\"\"\n if frequency is None:\n frequency = speed\n\n dic = {\n \"M\": self.M(),\n \"K\": self.K(frequency),\n \"C\": self.C(frequency),\n \"G\": self.G(),\n \"nodes\": self.nodes_pos,\n }\n\n sio.savemat(\"%s/%s.mat\" % (os.getcwd(), file_path), dic)\n\n def save(self, rotor_name=\"rotor\", file_path=Path(\".\")):\n \"\"\"Save rotor to toml file.\n\n Parameters\n ----------\n file_path : str\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> rotor.save('new_rotor')\n >>> Rotor.remove('new_rotor')\n \"\"\"\n path_rotor = Path(file_path)\n\n if os.path.isdir(path_rotor / rotor_name):\n if int(\n input(\n \"There is a rotor with this file_path, do you want to overwrite it? (1 for yes and 0 for no)\"\n )\n ):\n shutil.rmtree(path_rotor / rotor_name)\n else:\n return \"The rotor was not saved.\"\n\n os.mkdir(path_rotor / rotor_name)\n rotor_folder = path_rotor / rotor_name\n os.mkdir(rotor_folder / \"results\")\n os.mkdir(rotor_folder / \"elements\")\n\n with open(rotor_folder / \"properties.toml\", \"w\") as f:\n toml.dump({\"parameters\": self.parameters}, f)\n\n elements_folder = rotor_folder / \"elements\"\n\n for element in self.elements:\n element.save(elements_folder)\n\n @staticmethod\n def load(file_path):\n \"\"\"Load rotor from toml file.\n\n Parameters\n ----------\n file_path : str\n\n Returns\n -------\n rotor : ross.rotor.Rotor\n\n Example\n -------\n >>> rotor1 = rotor_example()\n >>> rotor1.save(Path('.')/'new_rotor1')\n >>> rotor2 = Rotor.load(Path('.')/'new_rotor1')\n >>> rotor1 == rotor2\n True\n >>> Rotor.remove('new_rotor1')\n \"\"\"\n rotor_path = Path(file_path)\n\n if os.path.isdir(rotor_path / \"elements\"):\n elements_path = rotor_path / \"elements\"\n else:\n raise FileNotFoundError(\"Elements folder not found.\")\n\n with open(rotor_path / \"properties.toml\", \"r\") as f:\n parameters = toml.load(f)[\"parameters\"]\n\n global_elements = {}\n for el in os.listdir(elements_path):\n elements = []\n if \".toml\" in el:\n with open(Path(elements_path) / el, \"r\") as f:\n el_dict = toml.load(f)\n element_class = list(el_dict.keys())[0]\n for el_number in el_dict[element_class]:\n element = (\n element_class + f\"(**{el_dict[element_class][el_number]})\"\n )\n elements.append(eval(element))\n global_elements[convert(element_class + \"s\")] = elements\n\n return Rotor(**global_elements, **parameters)\n\n @staticmethod\n def remove(file_path):\n \"\"\"\n Remove a previously saved rotor in rotors folder.\n\n Parameters\n ----------\n file_path : str\n\n Example\n -------\n >>> rotor = rotor_example()\n >>> rotor.save('new_rotor2')\n >>> Rotor.remove('new_rotor2')\n \"\"\"\n try:\n Rotor.load(file_path)\n shutil.rmtree(Path(file_path))\n except:\n return \"This is not a valid rotor.\"\n\n def run_static(self):\n \"\"\"Rotor static analysis.\n Static analysis calculates free-body diagram, deformed shaft, shearing\n force diagram and bending moment diagram.\n\n Parameters\n ----------\n\n Attributes\n ----------\n shaft_weight: float\n Shaft total weight\n disk_weigth_force: list\n Weight forces of each disk\n bearing_reaction_force: list\n The static reaction forces on each bearing\n disp_y: array\n The shaft static displacement vector,\n Vx: array\n Shearing force vector\n Bm: array\n Bending moment vector\n\n Returns\n -------\n results: object\n An instance of StaticResult class, which is used to create plots.\n\n Example\n -------\n >>> rotor = rotor_example()\n >>> static = rotor.run_static()\n >>> rotor.bearing_forces_nodal\n {'node_0': 432.4, 'node_6': 432.4}\n >>> rotor.bearing_forces_tag\n {'Bearing 0': 432.4, 'Bearing 1': 432.4}\n \"\"\"\n if not len(self.df_bearings):\n raise ValueError(\"Rotor has no bearings\")\n\n aux_brg = []\n for elm in self.bearing_elements:\n if elm.n not in self.nodes:\n pass\n elif elm.n_link in self.nodes:\n aux_brg.append(\n BearingElement(n=elm.n, n_link=elm.n_link, kxx=1e14, cxx=0)\n )\n else:\n aux_brg.append(BearingElement(n=elm.n, kxx=1e14, cxx=0))\n\n if isinstance(self, CoAxialRotor):\n aux_rotor = CoAxialRotor(self.shafts, self.disk_elements, aux_brg)\n else:\n aux_rotor = Rotor(self.shaft_elements, self.disk_elements, aux_brg)\n aux_K = aux_rotor.K(0)\n aux_M = aux_rotor.M()\n\n for elm in aux_rotor.bearing_elements:\n if isinstance(elm, SealElement):\n dofs = elm.dof_global_index\n try:\n aux_K[np.ix_(dofs, dofs)] -= elm.K(0)\n except TypeError:\n aux_K[np.ix_(dofs, dofs)] -= elm.K()\n\n df_num = aux_rotor.df[\"shaft_number\"].values\n sh_num = [int(item) for item, count in Counter(df_num).items() if count > 1]\n\n # gravity aceleration vector\n g = 9.8065\n grav = np.zeros(len(aux_K))\n\n # place gravity effect on shaft and disks nodes\n for node_y in range(int(len(aux_K) / 4)):\n grav[4 * node_y + 1] = -g\n\n # calculates x, for [K]*(x) = [M]*(g)\n disp = (la.solve(aux_K, aux_M @ grav)).flatten()\n\n # calculates displacement values in gravity's direction\n # dof = degree of freedom\n disp_y = np.array([])\n for node_dof in range(int(len(disp) / 4)):\n disp_y = np.append(disp_y, disp[4 * node_dof + 1])\n\n # Shearing Force\n BRG = [0] * len(self.nodes_pos)\n DSK = [0] * len(self.nodes_pos)\n SCH = [0] * len(self.nodes_pos)\n BrgForce_nodal = {\"node_\" + str(i): 0 for i in self.nodes}\n DskForce_nodal = {\"node_\" + str(i): 0 for i in self.nodes}\n BrgForce_tag = {\"node_\" + str(i): 0 for i in self.nodes}\n DskForce_tag = {\"node_\" + str(i): 0 for i in self.nodes}\n\n # Bearing Forces\n for i, node in enumerate(aux_rotor.df_bearings[\"n\"]):\n if not pd.isna(aux_rotor.df_bearings.loc[i, \"n_link\"]):\n BRG[node] = (\n BRG[node]\n + disp_y[node]\n * self.df_bearings.loc[\n self.df_bearings.tag == aux_rotor.df_bearings.tag, \"kyy\"\n ][0].coefficient[0]\n )\n BrgForce_nodal[\"node_\" + str(node)] = np.around(\n BrgForce_nodal[\"node_\" + str(node)]\n + disp_y[node]\n * self.df_bearings.loc[\n self.df_bearings.tag == aux_rotor.df_bearings.tag, \"kyy\"\n ][0].coefficient[0],\n decimals=1,\n )\n BrgForce_tag[aux_rotor.df_bearings.loc[i, \"tag\"]] = BrgForce_nodal[\n \"node_\" + str(node)\n ]\n\n node = int(aux_rotor.df_bearings.loc[i, \"n_link\"])\n BRG[node] = (\n BRG[node]\n - disp_y[node]\n * self.df_bearings.loc[self.df_bearings.n_link == node, \"kyy\"]\n .values[0]\n .coefficient[0]\n )\n BrgForce_nodal[\"node_\" + str(node)] = np.around(\n BrgForce_nodal[\"node_\" + str(node)]\n - disp_y[node]\n * self.df_bearings.loc[self.df_bearings.n_link == node, \"kyy\"]\n .values[0]\n .coefficient[0],\n decimals=1,\n )\n BrgForce_tag[aux_rotor.df_bearings.loc[i, \"tag\"]] = BrgForce_nodal[\n \"node_\" + str(node)\n ]\n\n else:\n BRG[node] = (\n BRG[node]\n - disp_y[node] * aux_rotor.df_bearings.loc[i, \"kyy\"].coefficient[0]\n )\n BrgForce_nodal[\"node_\" + str(node)] = np.around(\n BrgForce_nodal[\"node_\" + str(node)]\n - disp_y[node] * aux_rotor.df_bearings.loc[i, \"kyy\"].coefficient[0],\n decimals=1,\n )\n BrgForce_tag[aux_rotor.df_bearings.loc[i, \"tag\"]] = BrgForce_nodal[\n \"node_\" + str(node)\n ]\n\n # counting nodes with more than 1 bearing attached to\n node_b = list(aux_rotor.df_bearings[\"n\"])\n node_b.extend(list(aux_rotor.df_bearings[\"n_link\"]))\n count = len(node_b) - len(Counter(node_b))\n\n # Disk Forces\n if len(self.df_disks):\n for i, node in enumerate(self.df_disks[\"n\"]):\n DSK[node] = self.df_disks.loc[i, \"m\"] * -g\n DskForce_nodal[\"node_\" + str(node)] = np.around(\n self.df_disks.loc[i, \"m\"] * -g, decimals=1\n )\n DskForce_tag[aux_rotor.df_disks.loc[i, \"tag\"]] = DskForce_nodal[\n \"node_\" + str(node)\n ]\n\n # Shaft Weight Forces\n for i, node in enumerate(self.df_shaft[\"_n\"]):\n SCH[node + 1] = self.df_shaft.loc[i, \"m\"] * -g\n\n # Organizing data for each shaft\n BrgForce = []\n DskForce = []\n SchForce = []\n nodes = []\n nodes_pos = []\n displacement = []\n dsk = []\n brg = []\n for i in sh_num:\n n_min = min(\n aux_rotor.df_shaft.loc[aux_rotor.df_shaft.shaft_number == i, \"n_l\"]\n )\n n_max = max(\n aux_rotor.df_shaft.loc[(aux_rotor.df_shaft.shaft_number == i), \"n_r\"]\n )\n BrgForce.append(BRG[n_min : n_max + 1])\n DskForce.append(DSK[n_min : n_max + 1])\n SchForce.append(SCH[n_min : n_max + 1])\n nodes_pos.append(self.nodes_pos[n_min : n_max + 1])\n displacement.append(disp_y[n_min : n_max + 1])\n nodes.append(list(range(n_min, n_max + 1)))\n\n # get bearings and disks for each shaft\n dsk.append(\n aux_rotor.df_disks.loc[\n aux_rotor.df_disks.shaft_number == i, \"tag\"\n ].values\n )\n brg.append(\n aux_rotor.df_bearings.loc[\n (aux_rotor.df_bearings.shaft_number == i)\n | (\n aux_rotor.df_bearings.n_link.isin(list(range(n_min, n_max + 1)))\n ),\n \"tag\",\n ].values\n )\n\n Mx = []\n Vx = []\n Bm = []\n Vx_axis = []\n for j in sh_num:\n # Shearing Force vector\n aux_Vx = [0] * (len(nodes_pos[j]))\n aux_Vx_axis = [0] * (len(nodes_pos[j]))\n\n for i in range(int(len(nodes_pos[j]))):\n aux_Vx_axis[i] = nodes_pos[j][i]\n aux_Vx[i] = (\n aux_Vx[i - 1] + BrgForce[j][i] + DskForce[j][i] + SchForce[j][i]\n )\n\n for i in range(len(aux_Vx_axis) + len(dsk[j]) + len(brg[j]) - count):\n if DskForce[j][i] != 0:\n aux_Vx.insert(i, aux_Vx[i - 1] + SchForce[j][i])\n DskForce[j].insert(i + 1, 0)\n SchForce[j].insert(i + 1, 0)\n BrgForce[j].insert(i + 1, 0)\n aux_Vx_axis.insert(i, aux_Vx_axis[i])\n\n if BrgForce[j][i] != 0:\n aux_Vx.insert(i, aux_Vx[i - 1] + SchForce[j][i])\n BrgForce[j].insert(i + 1, 0)\n DskForce[j].insert(i + 1, 0)\n SchForce[j].insert(i + 1, 0)\n aux_Vx_axis.insert(i, aux_Vx_axis[i])\n\n aux_Vx = [x * -1 for x in aux_Vx]\n Vx.append(np.array(aux_Vx))\n Vx_axis.append(np.array(aux_Vx_axis))\n\n # Bending Moment vector\n aux_Mx = []\n for i in range(len(aux_Vx) - 1):\n if aux_Vx_axis[i] == aux_Vx_axis[i + 1]:\n pass\n else:\n aux_Mx.append(\n (\n (aux_Vx_axis[i + 1] * aux_Vx[i + 1])\n + (aux_Vx_axis[i + 1] * aux_Vx[i])\n - (aux_Vx_axis[i] * aux_Vx[i + 1])\n - (aux_Vx_axis[i] * aux_Vx[i])\n )\n / 2\n )\n Mx.append(aux_Mx)\n\n aux_Bm = np.zeros(1)\n for i in range(len(Mx[j])):\n aux_Bm = np.append(aux_Bm, aux_Bm[i] + aux_Mx[i])\n Bm.append(aux_Bm)\n\n self.Vx = Vx\n self.Bm = Bm\n self.disp_y = displacement\n\n self.w_shaft = [\n sum(self.df_shaft.loc[self.df_shaft.shaft_number == i, \"m\"]) * g\n for i in sh_num\n ]\n\n DskForce_nodal = {k: v for k, v in DskForce_nodal.items() if v != 0}\n BrgForce_nodal = {k: v for k, v in BrgForce_nodal.items() if v != 0}\n BrgForce_tag = {k: v for k, v in BrgForce_tag.items() if v != 0}\n DskForce_tag = {k: v for k, v in DskForce_tag.items() if v != 0}\n\n self.disk_forces_nodal = DskForce_nodal\n self.bearing_forces_nodal = BrgForce_nodal\n self.bearing_forces_tag = BrgForce_tag\n self.disk_forces_tag = DskForce_tag\n\n results = StaticResults(\n self.disp_y,\n self.Vx,\n self.Bm,\n self.w_shaft,\n self.disk_forces_nodal,\n self.bearing_forces_nodal,\n nodes,\n nodes_pos,\n Vx_axis,\n )\n\n return results\n\n def summary(self):\n \"\"\"Rotor summary.\n\n This creates a summary of the main parameters and attributes from the\n rotor model. The data is presented in a table format.\n\n Parameters\n ----------\n\n Returns\n -------\n results : class instance\n An instance of SumarryResults class to build the summary table\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> table = rotor.summary().plot()\n >>> # to display the plot use the command:\n >>> # show(table)\n \"\"\"\n self.run_static()\n forces = self.bearing_forces_tag\n results = SummaryResults(\n self.df_shaft,\n self.df_disks,\n self.df_bearings,\n self.nodes_pos,\n forces,\n self.CG,\n self.Ip,\n self.tag,\n )\n return results\n\n @classmethod\n def from_section(\n cls,\n leng_data,\n idl_data,\n odl_data,\n idr_data=None,\n odr_data=None,\n material_data=None,\n disk_data=None,\n brg_seal_data=None,\n sparse=True,\n min_w=None,\n max_w=None,\n rated_w=None,\n n_eigen=12,\n nel_r=1,\n tag=None,\n ):\n \"\"\"This class is an alternative to build rotors from separated\n sections. Each section has the same number (n) of shaft elements.\n\n Parameters\n ----------\n leng_data : list\n List with the lengths of rotor regions.\n idl_data : list\n List with the inner diameters of rotor regions (Left Station).\n odl_data : list\n List with the outer diameters of rotor regions (Left Station).\n idr_data : list, optional\n List with the inner diameters of rotor regions (Right Station).\n Default is equal to idl_data (cylindrical element).\n odr_data : list, optional\n List with the outer diameters of rotor regions (Right Station).\n Default is equal to odl_data (cylindrical element).\n material_data : ross.material or list of ross.material\n Defines a single material for all sections or each section can be\n defined by a material individually.\n disk_data : dict, optional\n Dict holding disks datas.\n Example : disk_data=DiskElement.from_geometry(n=2,\n material=steel,\n width=0.07,\n i_d=0,\n o_d=0.28\n )\n ***See 'disk_element.py' docstring for more information***\n brg_seal_data : dict, optional\n Dict holding lists of bearings and seals datas.\n Example : brg_seal_data=BearingElement(n=1, kxx=1e6, cxx=0,\n kyy=1e6, cyy=0, kxy=0,\n cxy=0, kyx=0, cyx=0)\n ***See 'bearing_seal_element.py' docstring for more information***\n nel_r : int, optional\n Number or elements per shaft region.\n Default is 1.\n n_eigen : int, optional\n Number of eigenvalues calculated by arpack.\n Default is 12.\n tag : str\n A tag for the rotor\n\n Returns\n -------\n A rotor object\n\n Example\n -------\n >>> from ross.materials import steel\n >>> rotor = Rotor.from_section(leng_data=[0.5,0.5,0.5],\n ... odl_data=[0.05,0.05,0.05],\n ... idl_data=[0,0,0],\n ... material_data=steel,\n ... disk_data=[DiskElement.from_geometry(n=1, material=steel, width=0.07, i_d=0, o_d=0.28),\n ... DiskElement.from_geometry(n=2, material=steel, width=0.07, i_d=0, o_d=0.35)],\n ... brg_seal_data=[BearingElement(n=0, kxx=1e6, cxx=0, kyy=1e6, cyy=0, kxy=0, cxy=0, kyx=0, cyx=0),\n ... BearingElement(n=3, kxx=1e6, cxx=0, kyy=1e6, cyy=0, kxy=0, cxy=0, kyx=0, cyx=0)],\n ... nel_r=1)\n >>> modal = rotor.run_modal(speed=0)\n >>> modal.wn.round(4)\n array([ 85.7634, 85.7634, 271.9326, 271.9326, 718.58 , 718.58 ])\n \"\"\"\n\n if len(leng_data) != len(odl_data) or len(leng_data) != len(idl_data):\n raise ValueError(\n \"The lists size do not match (leng_data, odl_data and idl_data).\"\n )\n\n if material_data is None:\n raise AttributeError(\"Please define a material or a list of materials\")\n\n if idr_data is None:\n idr_data = idl_data\n if odr_data is None:\n odr_data = odl_data\n else:\n if len(leng_data) != len(odr_data) or len(leng_data) != len(idr_data):\n raise ValueError(\n \"The lists size do not match (leng_data, odr_data and idr_data).\"\n )\n\n def rotor_regions(nel_r):\n \"\"\"\n A subroutine to discretize each rotor region into n elements\n\n Parameters\n ----------\n nel_r : int\n Number of elements per region\n\n Returns\n -------\n regions : list\n List with elements\n \"\"\"\n regions = []\n shaft_elements = []\n disk_elements = []\n bearing_elements = []\n\n try:\n if len(leng_data) != len(material_data):\n raise IndexError(\n \"material_data size does not match size of other lists\"\n )\n\n # loop through rotor regions\n for i, leng in enumerate(leng_data):\n le = leng / nel_r\n for j in range(nel_r):\n idl = (idr_data[i] - idl_data[i]) * j * le / leng + idl_data[i]\n odl = (odr_data[i] - odl_data[i]) * j * le / leng + odl_data[i]\n idr = (idr_data[i] - idl_data[i]) * (\n j + 1\n ) * le / leng + idl_data[i]\n odr = (odr_data[i] - odl_data[i]) * (\n j + 1\n ) * le / leng + odl_data[i]\n shaft_elements.append(\n ShaftElement(\n le,\n idl,\n odl,\n idr,\n odr,\n material=material_data[i],\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n )\n except TypeError:\n for i, leng in enumerate(leng_data):\n le = leng / nel_r\n for j in range(nel_r):\n idl = (idr_data[i] - idl_data[i]) * j * le / leng + idl_data[i]\n odl = (odr_data[i] - odl_data[i]) * j * le / leng + odl_data[i]\n idr = (idr_data[i] - idl_data[i]) * (\n j + 1\n ) * le / leng + idl_data[i]\n odr = (odr_data[i] - odl_data[i]) * (\n j + 1\n ) * le / leng + odl_data[i]\n shaft_elements.append(\n ShaftElement(\n le,\n idl,\n odl,\n idr,\n odr,\n material=material_data,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n )\n\n regions.extend([shaft_elements])\n\n for DiskEl in disk_data:\n aux_DiskEl = deepcopy(DiskEl)\n aux_DiskEl.n = nel_r * DiskEl.n\n aux_DiskEl.n_l = nel_r * DiskEl.n_l\n aux_DiskEl.n_r = nel_r * DiskEl.n_r\n disk_elements.append(aux_DiskEl)\n\n for Brg_SealEl in brg_seal_data:\n aux_Brg_SealEl = deepcopy(Brg_SealEl)\n aux_Brg_SealEl.n = nel_r * Brg_SealEl.n\n aux_Brg_SealEl.n_l = nel_r * Brg_SealEl.n_l\n aux_Brg_SealEl.n_r = nel_r * Brg_SealEl.n_r\n bearing_elements.append(aux_Brg_SealEl)\n\n regions.append(disk_elements)\n regions.append(bearing_elements)\n\n return regions\n\n regions = rotor_regions(nel_r)\n shaft_elements = regions[0]\n disk_elements = regions[1]\n bearing_elements = regions[2]\n\n return cls(\n shaft_elements,\n disk_elements,\n bearing_elements,\n sparse=sparse,\n n_eigen=n_eigen,\n min_w=min_w,\n max_w=max_w,\n rated_w=rated_w,\n tag=tag,\n )\n\n\nclass CoAxialRotor(Rotor):\n r\"\"\"A rotor object.\n\n This class will create a system of co-axial rotors with the shaft,\n disk, bearing and seal elements provided.\n\n Parameters\n ----------\n shafts : list of lists\n Each list of shaft elements builds a different shaft. The number of\n lists sets the number of shafts.\n disk_elements : list\n List with the disk elements\n bearing_elements : list\n List with the bearing elements\n point_mass_elements: list\n List with the point mass elements\n shaft_start_pos : list\n List indicating the initial node position for each shaft.\n Default is zero for each shaft created.\n sparse : bool, optional\n If sparse, eigenvalues will be calculated with arpack.\n Default is True.\n n_eigen : int, optional\n Number of eigenvalues calculated by arpack.\n Default is 12.\n tag : str\n A tag for the rotor\n\n Returns\n -------\n A rotor object.\n\n Attributes\n ----------\n nodes : list\n List of the model's nodes.\n nodes_pos : list\n List with nodal spatial location.\n CG : float\n Center of gravity\n\n Examples\n --------\n >>> import ross as rs\n >>> steel = rs.materials.steel\n >>> i_d = 0\n >>> o_d = 0.05\n >>> n = 10\n >>> L = [0.25 for _ in range(n)]\n >>> axial_shaft = [rs.ShaftElement(l, i_d, o_d, material=steel) for l in L]\n >>> i_d = 0.15\n >>> o_d = 0.20\n >>> n = 6\n >>> L = [0.25 for _ in range(n)]\n >>> coaxial_shaft = [rs.ShaftElement(l, i_d, o_d, material=steel) for l in L]\n >>> shaft = [axial_shaft, coaxial_shaft]\n >>> disk0 = rs.DiskElement.from_geometry(n=1,\n ... material=steel,\n ... width=0.07,\n ... i_d=0.05,\n ... o_d=0.28)\n >>> disk1 = rs.DiskElement.from_geometry(n=9,\n ... material=steel,\n ... width=0.07,\n ... i_d=0.05,\n ... o_d=0.28)\n >>> disk2 = rs.DiskElement.from_geometry(n=13,\n ... material=steel,\n ... width=0.07,\n ... i_d=0.20,\n ... o_d=0.48)\n >>> disk3 = rs.DiskElement.from_geometry(n=15,\n ... material=steel,\n ... width=0.07,\n ... i_d=0.20,\n ... o_d=0.48)\n >>> disks = [disk0, disk1, disk2, disk3]\n >>> stfx = 1e6\n >>> stfy = 0.8e6\n >>> bearing0 = rs.BearingElement(0, kxx=stfx, kyy=stfy, cxx=0)\n >>> bearing1 = rs.BearingElement(10, kxx=stfx, kyy=stfy, cxx=0)\n >>> bearing2 = rs.BearingElement(11, kxx=stfx, kyy=stfy, cxx=0)\n >>> bearing3 = rs.BearingElement(8, n_link=17, kxx=stfx, kyy=stfy, cxx=0)\n >>> bearings = [bearing0, bearing1, bearing2, bearing3]\n >>> rotor = rs.CoAxialRotor(shaft, disks, bearings)\n \"\"\"\n\n def __init__(\n self,\n shafts,\n disk_elements=None,\n bearing_elements=None,\n point_mass_elements=None,\n sparse=True,\n n_eigen=12,\n min_w=None,\n max_w=None,\n rated_w=None,\n tag=None,\n ):\n\n self.parameters = {\n \"sparse\": True,\n \"n_eigen\": n_eigen,\n \"min_w\": min_w,\n \"max_w\": max_w,\n \"rated_w\": rated_w,\n }\n if tag is None:\n self.tag = \"Rotor 0\"\n\n ####################################################\n # Config attributes\n ####################################################\n\n self.sparse = sparse\n self.n_eigen = n_eigen\n # operational speeds\n self.min_w = min_w\n self.max_w = max_w\n self.rated_w = rated_w\n\n ####################################################\n\n # set n for each shaft element\n aux_n = 0\n aux_n_tag = 0\n for j, shaft in enumerate(shafts):\n for i, sh in enumerate(shaft):\n if sh.n is None:\n sh.n = i + aux_n\n if sh.tag is None:\n sh.tag = sh.__class__.__name__ + \" \" + str(i + aux_n_tag)\n aux_n = shaft[-1].n_r + 1\n aux_n_tag = aux_n - 1 - j\n\n # flatten and make a copy for shaft elements to avoid altering\n # attributes for elements that might be used in different rotors\n # e.g. altering shaft_element.n\n shafts = [copy(sh) for sh in shafts]\n shaft_elements = list(chain(*shafts))\n\n if disk_elements is None:\n disk_elements = []\n if bearing_elements is None:\n bearing_elements = []\n if point_mass_elements is None:\n point_mass_elements = []\n\n for i, disk in enumerate(disk_elements):\n if disk.tag is None:\n disk.tag = \"Disk \" + str(i)\n\n for i, brg in enumerate(bearing_elements):\n if brg.__class__.__name__ == \"BearingElement\" and brg.tag is None:\n brg.tag = \"Bearing \" + str(i)\n if brg.__class__.__name__ == \"SealElement\" and brg.tag is None:\n brg.tag = \"Seal \" + str(i)\n\n for i, p_mass in enumerate(point_mass_elements):\n if p_mass.tag is None:\n p_mass.tag = \"Point Mass \" + str(i)\n\n self.shafts = shafts\n self.shaft_elements = sorted(shaft_elements, key=lambda el: el.n)\n self.bearing_elements = sorted(bearing_elements, key=lambda el: el.n)\n self.disk_elements = disk_elements\n self.point_mass_elements = point_mass_elements\n self.elements = list(\n chain(\n *[\n self.shaft_elements,\n self.disk_elements,\n self.bearing_elements,\n self.point_mass_elements,\n ]\n )\n )\n\n ####################################################\n # Rotor summary\n ####################################################\n columns = [\n \"type\",\n \"n\",\n \"n_link\",\n \"L\",\n \"node_pos\",\n \"node_pos_r\",\n \"idl\",\n \"odl\",\n \"idr\",\n \"odr\",\n \"i_d\",\n \"o_d\",\n \"beam_cg\",\n \"axial_cg_pos\",\n \"y_pos\",\n \"material\",\n \"rho\",\n \"volume\",\n \"m\",\n \"tag\",\n ]\n\n df_shaft = pd.DataFrame([el.summary() for el in self.shaft_elements])\n df_disks = pd.DataFrame([el.summary() for el in self.disk_elements])\n df_bearings = pd.DataFrame(\n [\n el.summary()\n for el in self.bearing_elements\n if not isinstance(el, SealElement)\n ]\n )\n df_seals = pd.DataFrame(\n [\n el.summary()\n for el in self.bearing_elements\n if isinstance(el, SealElement)\n ]\n )\n df_point_mass = pd.DataFrame([el.summary() for el in self.point_mass_elements])\n\n nodes_pos_l = np.zeros(len(df_shaft.n_l))\n nodes_pos_r = np.zeros(len(df_shaft.n_l))\n axial_cg_pos = np.zeros(len(df_shaft.n_l))\n shaft_number = np.zeros(len(df_shaft.n_l))\n\n i = 0\n for j, shaft in enumerate(self.shafts):\n for k, sh in enumerate(shaft):\n shaft_number[k + i] = j\n if k == 0:\n nodes_pos_r[k + i] = df_shaft.loc[k + i, \"L\"]\n axial_cg_pos[k + i] = sh.beam_cg + nodes_pos_l[k + i]\n sh.axial_cg_pos = axial_cg_pos[k + i]\n if (\n k > 0\n and df_shaft.loc[k + i, \"n_l\"] == df_shaft.loc[k + i - 1, \"n_l\"]\n ):\n nodes_pos_l[k + i] = nodes_pos_l[k + i - 1]\n nodes_pos_r[k + i] = nodes_pos_r[k + i - 1]\n else:\n nodes_pos_l[k + i] = nodes_pos_r[k + i - 1]\n nodes_pos_r[k + i] = nodes_pos_l[k + i] + df_shaft.loc[k + i, \"L\"]\n\n if sh.n in df_bearings[\"n_link\"].values:\n idx = df_bearings.loc[df_bearings.n_link == sh.n, \"n\"].values[0]\n nodes_pos_l[i : sh.n] += nodes_pos_l[idx] - nodes_pos_l[k + i]\n nodes_pos_r[i : sh.n] += nodes_pos_r[idx] - nodes_pos_r[k + i]\n axial_cg_pos[i : sh.n] += nodes_pos_r[idx] - nodes_pos_r[k + i]\n elif sh.n_r in df_bearings[\"n_link\"].values:\n idx = df_bearings.loc[df_bearings.n_link == sh.n_r, \"n\"].values[0]\n nodes_pos_l[i : sh.n_r] += nodes_pos_l[idx - 1] - nodes_pos_l[k + i]\n nodes_pos_r[i : sh.n_r] += nodes_pos_r[idx - 1] - nodes_pos_r[k + i]\n axial_cg_pos[i : sh.n_r] += (\n nodes_pos_r[idx - 1] - nodes_pos_r[k + i]\n )\n\n axial_cg_pos[k + i] = sh.beam_cg + nodes_pos_l[k + i]\n sh.axial_cg_pos = axial_cg_pos[k + i]\n i += k + 1\n\n df_shaft[\"shaft_number\"] = shaft_number\n df_shaft[\"nodes_pos_l\"] = nodes_pos_l\n df_shaft[\"nodes_pos_r\"] = nodes_pos_r\n df_shaft[\"axial_cg_pos\"] = axial_cg_pos\n\n df = pd.concat(\n [df_shaft, df_disks, df_bearings, df_point_mass, df_seals], sort=True\n )\n df = df.sort_values(by=\"n_l\")\n df = df.reset_index(drop=True)\n\n # check consistence for disks and bearings location\n if len(df_point_mass) > 0:\n max_loc_point_mass = df_point_mass.n.max()\n else:\n max_loc_point_mass = 0\n max_location = max(df_shaft.n_r.max(), max_loc_point_mass)\n if df.n_l.max() > max_location:\n raise ValueError(\"Trying to set disk or bearing outside shaft\")\n\n # nodes axial position and diameter\n nodes_pos = list(df_shaft.groupby(\"n_l\")[\"nodes_pos_l\"].max())\n nodes_i_d = list(df_shaft.groupby(\"n_l\")[\"i_d\"].min())\n nodes_o_d = list(df_shaft.groupby(\"n_l\")[\"o_d\"].max())\n\n for i, shaft in enumerate(self.shafts):\n pos = shaft[-1].n_r\n if i < len(self.shafts) - 1:\n nodes_pos.insert(pos, df_shaft[\"nodes_pos_r\"].iloc[pos - 1])\n nodes_i_d.insert(pos, df_shaft[\"i_d\"].iloc[pos - 1])\n nodes_o_d.insert(pos, df_shaft[\"o_d\"].iloc[pos - 1])\n else:\n nodes_pos.append(df_shaft[\"nodes_pos_r\"].iloc[-1])\n nodes_i_d.append(df_shaft[\"i_d\"].iloc[-1])\n nodes_o_d.append(df_shaft[\"o_d\"].iloc[-1])\n\n self.nodes_pos = nodes_pos\n self.nodes_i_d = nodes_i_d\n self.nodes_o_d = nodes_o_d\n\n shaft_elements_length = list(df_shaft.groupby(\"n_l\")[\"L\"].min())\n self.shaft_elements_length = shaft_elements_length\n\n self.nodes = list(range(len(self.nodes_pos)))\n self.L = nodes_pos[-1]\n\n # rotor mass can also be calculated with self.M()[::4, ::4].sum()\n self.m_disks = np.sum([disk.m for disk in self.disk_elements])\n self.m_shaft = np.sum([sh_el.m for sh_el in self.shaft_elements])\n self.m = self.m_disks + self.m_shaft\n\n # rotor center of mass and total inertia\n CG_sh = np.sum(\n [(sh.m * sh.axial_cg_pos) / self.m for sh in self.shaft_elements]\n )\n CG_dsk = np.sum(\n [disk.m * nodes_pos[disk.n] / self.m for disk in self.disk_elements]\n )\n self.CG = CG_sh + CG_dsk\n\n Ip_sh = np.sum([sh.Im for sh in self.shaft_elements])\n Ip_dsk = np.sum([disk.Ip for disk in self.disk_elements])\n self.Ip = Ip_sh + Ip_dsk\n\n # values for evalues and evectors will be calculated by self.run_modal\n self.evalues = None\n self.evectors = None\n self.wn = None\n self.wd = None\n self.lti = None\n\n self._v0 = None # used to call eigs\n\n # number of dofs\n self.ndof = (\n 4 * max([el.n for el in shaft_elements])\n + 8\n + 2 * len([el for el in point_mass_elements])\n )\n\n elm_no_shaft_id = {\n elm\n for elm in self.elements\n if pd.isna(df.loc[df.tag == elm.tag, \"shaft_number\"]).all()\n }\n for elm in cycle(self.elements):\n if elm_no_shaft_id:\n if elm in elm_no_shaft_id:\n shnum_l = df.loc[\n (df.n_l == elm.n) & (df.tag != elm.tag), \"shaft_number\"\n ]\n shnum_r = df.loc[\n (df.n_r == elm.n) & (df.tag != elm.tag), \"shaft_number\"\n ]\n if len(shnum_l) == 0 and len(shnum_r) == 0:\n shnum_l = df.loc[\n (df.n_link == elm.n) & (df.tag != elm.tag), \"shaft_number\"\n ]\n shnum_r = shnum_l\n if len(shnum_l):\n df.loc[df.tag == elm.tag, \"shaft_number\"] = shnum_l.values[0]\n elm_no_shaft_id.discard(elm)\n elif len(shnum_r):\n df.loc[df.tag == elm.tag, \"shaft_number\"] = shnum_r.values[0]\n elm_no_shaft_id.discard(elm)\n else:\n break\n\n df_disks[\"shaft_number\"] = df.loc[\n (df.type == \"DiskElement\"), \"shaft_number\"\n ].values\n df_bearings[\"shaft_number\"] = df.loc[\n (df.type == \"BearingElement\"), \"shaft_number\"\n ].values\n df_seals[\"shaft_number\"] = df.loc[\n (df.type == \"SealElement\"), \"shaft_number\"\n ].values\n df_point_mass[\"shaft_number\"] = df.loc[\n (df.type == \"PointMass\"), \"shaft_number\"\n ].values\n\n self.df_disks = df_disks\n self.df_bearings = df_bearings\n self.df_shaft = df_shaft\n self.df_point_mass = df_point_mass\n self.df_seals = df_seals\n\n # global indexes for dofs\n n_last = self.shaft_elements[-1].n\n for elm in self.elements:\n dof_mapping = elm.dof_mapping()\n global_dof_mapping = {}\n for k, v in dof_mapping.items():\n dof_letter, dof_number = k.split(\"_\")\n global_dof_mapping[dof_letter + \"_\" + str(int(dof_number) + elm.n)] = v\n\n if elm.n <= n_last + 1:\n for k, v in global_dof_mapping.items():\n global_dof_mapping[k] = 4 * elm.n + v\n else:\n for k, v in global_dof_mapping.items():\n global_dof_mapping[k] = 2 * n_last + 2 * elm.n + 4 + v\n\n if hasattr(elm, \"n_link\") and elm.n_link is not None:\n if elm.n_link <= n_last + 1:\n global_dof_mapping[f\"x_{elm.n_link}\"] = 4 * elm.n_link\n global_dof_mapping[f\"y_{elm.n_link}\"] = 4 * elm.n_link + 1\n else:\n global_dof_mapping[f\"x_{elm.n_link}\"] = (\n 2 * n_last + 2 * elm.n_link + 4\n )\n global_dof_mapping[f\"y_{elm.n_link}\"] = (\n 2 * n_last + 2 * elm.n_link + 5\n )\n\n dof_tuple = namedtuple(\"GlobalIndex\", global_dof_mapping)\n elm.dof_global_index = dof_tuple(**global_dof_mapping)\n df.at[\n df.loc[df.tag == elm.tag].index[0], \"dof_global_index\"\n ] = elm.dof_global_index\n\n # values for static analysis will be calculated by def static\n self.Vx = None\n self.Bm = None\n self.disp_y = None\n\n # define positions for disks\n for disk in disk_elements:\n z_pos = nodes_pos[disk.n]\n y_pos = nodes_o_d[disk.n]\n df.loc[df.tag == disk.tag, \"nodes_pos_l\"] = z_pos\n df.loc[df.tag == disk.tag, \"nodes_pos_r\"] = z_pos\n df.loc[df.tag == disk.tag, \"y_pos\"] = y_pos\n\n # define positions for bearings\n # check if there are bearings without location\n bearings_no_zloc = {\n b\n for b in bearing_elements\n if pd.isna(df.loc[df.tag == b.tag, \"nodes_pos_l\"]).all()\n }\n\n # cycle while there are bearings without a z location\n for b in cycle(self.bearing_elements):\n if bearings_no_zloc:\n if b in bearings_no_zloc:\n # first check if b.n is on list, if not, check for n_link\n node_l = df.loc[(df.n_l == b.n) & (df.tag != b.tag), \"nodes_pos_l\"]\n node_r = df.loc[(df.n_r == b.n) & (df.tag != b.tag), \"nodes_pos_r\"]\n if len(node_l) == 0 and len(node_r) == 0:\n node_l = df.loc[\n (df.n_link == b.n) & (df.tag != b.tag), \"nodes_pos_l\"\n ]\n node_r = node_l\n if len(node_l):\n df.loc[df.tag == b.tag, \"nodes_pos_l\"] = node_l.values[0]\n df.loc[df.tag == b.tag, \"nodes_pos_r\"] = node_l.values[0]\n bearings_no_zloc.discard(b)\n elif len(node_r):\n df.loc[df.tag == b.tag, \"nodes_pos_l\"] = node_r.values[0]\n df.loc[df.tag == b.tag, \"nodes_pos_r\"] = node_r.values[0]\n bearings_no_zloc.discard(b)\n else:\n break\n\n dfb = df[df.type == \"BearingElement\"]\n z_positions = [pos for pos in dfb[\"nodes_pos_l\"]]\n z_positions = list(dict.fromkeys(z_positions))\n mean_od = np.mean(nodes_o_d)\n for z_pos in dfb[\"nodes_pos_l\"]:\n dfb_z_pos = dfb[dfb.nodes_pos_l == z_pos]\n dfb_z_pos = dfb_z_pos.sort_values(by=\"n_l\")\n for n, t, nlink in zip(dfb_z_pos.n, dfb_z_pos.tag, dfb_z_pos.n_link):\n if n in self.nodes:\n if z_pos == df_shaft[\"nodes_pos_l\"].iloc[0]:\n y_pos = (np.max(df_shaft[\"odl\"][df_shaft.n_l == n].values)) / 2\n elif z_pos == df_shaft[\"nodes_pos_r\"].iloc[-1]:\n y_pos = (np.max(df_shaft[\"odr\"][df_shaft.n_r == n].values)) / 2\n else:\n if not len(df_shaft[\"odl\"][df_shaft._n == n].values):\n y_pos = (\n np.max(df_shaft[\"odr\"][df_shaft._n == n - 1].values)\n ) / 2\n elif not len(df_shaft[\"odr\"][df_shaft._n == n - 1].values):\n y_pos = (\n np.max(df_shaft[\"odl\"][df_shaft._n == n].values)\n ) / 2\n else:\n y_pos = (\n np.max(\n [\n np.max(\n df_shaft[\"odl\"][df_shaft._n == n].values\n ),\n np.max(\n df_shaft[\"odr\"][df_shaft._n == n - 1].values\n ),\n ]\n )\n / 2\n )\n else:\n y_pos += 2 * mean_od * df[\"scale_factor\"][df.tag == t].values[0]\n\n if nlink in self.nodes:\n if z_pos == df_shaft[\"nodes_pos_l\"].iloc[0]:\n y_pos_sup = (\n np.min(df_shaft[\"idl\"][df_shaft.n_l == nlink].values)\n ) / 2\n elif z_pos == df_shaft[\"nodes_pos_r\"].iloc[-1]:\n y_pos_sup = (\n np.min(df_shaft[\"idr\"][df_shaft.n_r == nlink].values)\n ) / 2\n else:\n if not len(df_shaft[\"idl\"][df_shaft._n == nlink].values):\n y_pos_sup = (\n np.min(df_shaft[\"idr\"][df_shaft._n == nlink - 1].values)\n ) / 2\n elif not len(df_shaft[\"idr\"][df_shaft._n == nlink - 1].values):\n y_pos_sup = (\n np.min(df_shaft[\"idl\"][df_shaft._n == nlink].values)\n ) / 2\n else:\n y_pos_sup = (\n np.min(\n [\n np.min(\n df_shaft[\"idl\"][df_shaft._n == nlink].values\n ),\n np.min(\n df_shaft[\"idr\"][\n df_shaft._n == nlink - 1\n ].values\n ),\n ]\n )\n / 2\n )\n else:\n y_pos_sup = (\n y_pos + 2 * mean_od * df[\"scale_factor\"][df.tag == t].values[0]\n )\n\n df.loc[df.tag == t, \"y_pos\"] = y_pos\n df.loc[df.tag == t, \"y_pos_sup\"] = y_pos_sup\n\n # define position for point mass elements\n dfb = df[df.type == \"BearingElement\"]\n for p in point_mass_elements:\n z_pos = dfb[dfb.n_l == p.n][\"nodes_pos_l\"].values[0]\n y_pos = dfb[dfb.n_l == p.n][\"y_pos\"].values[0]\n df.loc[df.tag == p.tag, \"nodes_pos_l\"] = z_pos\n df.loc[df.tag == p.tag, \"nodes_pos_r\"] = z_pos\n df.loc[df.tag == p.tag, \"y_pos\"] = y_pos\n\n self.df = df\n\n\ndef rotor_example():\n \"\"\"This function returns an instance of a simple rotor with\n two shaft elements, one disk and two simple bearings.\n The purpose of this is to make available a simple model\n so that doctest can be written using this.\n\n Parameters\n ----------\n\n Returns\n -------\n An instance of a rotor object.\n\n Examples\n --------\n >>> rotor = rotor_example()\n >>> modal = rotor.run_modal(speed=0)\n >>> np.round(modal.wd[:4])\n array([ 92., 96., 275., 297.])\n \"\"\"\n # Rotor without damping with 6 shaft elements 2 disks and 2 bearings\n i_d = 0\n o_d = 0.05\n n = 6\n L = [0.25 for _ in range(n)]\n\n shaft_elem = [\n ShaftElement(\n l,\n i_d,\n o_d,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n for l in L\n ]\n\n disk0 = DiskElement.from_geometry(\n n=2, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n disk1 = DiskElement.from_geometry(\n n=4, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n\n stfx = 1e6\n stfy = 0.8e6\n bearing0 = BearingElement(0, kxx=stfx, kyy=stfy, cxx=0)\n bearing1 = BearingElement(6, kxx=stfx, kyy=stfy, cxx=0)\n\n return Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n\n\ndef coaxrotor_example():\n \"\"\"This function returns an instance of a simple rotor with\n two shafts, four disk and four bearings.\n The purpose of this is to make available a simple model for co-axial rotors\n so that doctest can be written using this.\n\n Parameters\n ----------\n\n Returns\n -------\n An instance of a rotor object.\n\n Examples\n --------\n >>> rotor = coaxrotor_example()\n >>> modal = rotor.run_modal(speed=0)\n >>> np.round(modal.wd[:4])\n array([39., 39., 99., 99.])\n \"\"\"\n i_d = 0\n o_d = 0.05\n n = 10\n L = [0.25 for _ in range(n)]\n\n axial_shaft = [ShaftElement(l, i_d, o_d, material=steel) for l in L]\n\n i_d = 0.25\n o_d = 0.30\n n = 6\n L = [0.25 for _ in range(n)]\n\n coaxial_shaft = [ShaftElement(l, i_d, o_d, material=steel) for l in L]\n\n disk0 = DiskElement.from_geometry(\n n=1, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n disk1 = DiskElement.from_geometry(\n n=9, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n disk2 = DiskElement.from_geometry(\n n=13, material=steel, width=0.07, i_d=0.20, o_d=0.48\n )\n disk3 = DiskElement.from_geometry(\n n=15, material=steel, width=0.07, i_d=0.20, o_d=0.48\n )\n\n shaft = [axial_shaft, coaxial_shaft]\n disks = [disk0, disk1, disk2, disk3]\n\n stfx = 1e6\n stfy = 1e6\n bearing0 = BearingElement(0, kxx=stfx, kyy=stfy, cxx=0)\n bearing1 = BearingElement(10, kxx=stfx, kyy=stfy, cxx=0)\n bearing2 = BearingElement(11, kxx=stfx, kyy=stfy, cxx=0)\n bearing3 = BearingElement(8, n_link=17, kxx=stfx, kyy=stfy, cxx=0)\n bearings = [bearing0, bearing1, bearing2, bearing3]\n\n return CoAxialRotor(shaft, disks, bearings)\n\n\ndef MAC(u, v):\n \"\"\"MAC - Modal Assurance Criterion\n\n MAC for a single pair of vectors.\n The Modal Assurance Criterion (MAC) analysis is used to determine\n the similarity of two mode shapes.\n\n Parameters\n ----------\n u : array\n complex modal vector\n v : array\n complex modal vector\n\n Returns\n -------\n MAC from 'u' and 'v'\n \"\"\"\n H = lambda a: a.T.conj()\n return np.absolute((H(u) @ v) ** 2 / ((H(u) @ u) * (H(v) @ v)))\n\n\ndef MAC_modes(U, V, n=None, plot=True):\n \"\"\"MAC - Modal Assurance Criterion\n\n MAC for multiple vectors\n The Modal Assurance Criterion (MAC) analysis is used to determine\n the similarity of two mode shapes.\n\n Parameters\n ----------\n U : matrix\n complex modal matrix\n V : matrix\n complex modal matrix\n n : int\n number of vectors to be analyzed\n plot : bool\n if True, returns a plot\n if False, returns the macs values\n\n Returns\n -------\n The macs values from 'U' and 'V'\n \"\"\"\n # n is the number of modes to be evaluated\n if n is None:\n n = U.shape[1]\n macs = np.zeros((n, n))\n for u in enumerate(U.T[:n]):\n for v in enumerate(V.T[:n]):\n macs[u[0], v[0]] = MAC(u[1], v[1])\n\n if not plot:\n return macs\n\n xpos, ypos = np.meshgrid(range(n), range(n))\n xpos, ypos = 0.5 + xpos.flatten(), 0.5 + ypos.flatten()\n zpos = np.zeros_like(xpos)\n dx = 0.75 * np.ones_like(xpos)\n dy = 0.75 * np.ones_like(xpos)\n dz = macs.T.flatten()\n\n fig = plt.figure(figsize=(12, 8))\n # fig.suptitle('MAC - %s vs %s' % (U.name, V.name), fontsize=12)\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.bar3d(\n xpos, ypos, zpos, dx, dy, dz, color=plt.cm.viridis(dz), alpha=0.7, zsort=\"max\"\n )\n ax.set_xticks(range(1, n + 1))\n ax.set_yticks(range(1, n + 1))\n ax.set_zlim(0, 1)\n # ax.set_xlabel('%s modes' % U.name)\n # ax.set_ylabel('%s modes' % V.name)\n\n sm = plt.cm.ScalarMappable(cmap=plt.cm.viridis, norm=plt.Normalize(vmin=0, vmax=1))\n # fake up the array of the scalar mappable\n sm._A = []\n cbar = fig.colorbar(sm, shrink=0.5, aspect=10)\n cbar.set_label(\"MAC\")\n\n return macs\n"
]
| [
[
"numpy.ones_like",
"scipy.signal.lsim",
"numpy.exp",
"numpy.mean",
"numpy.min",
"scipy.signal.lti",
"pandas.concat",
"numpy.imag",
"numpy.logspace",
"scipy.linalg.solve",
"numpy.max",
"numpy.zeros_like",
"numpy.angle",
"numpy.eye",
"scipy.linalg.eig",
"numpy.transpose",
"numpy.sqrt",
"numpy.append",
"matplotlib.pyplot.Normalize",
"numpy.around",
"matplotlib.pyplot.gca",
"numpy.log10",
"numpy.array",
"numpy.zeros",
"numpy.lexsort",
"numpy.real",
"numpy.ix_",
"matplotlib.pyplot.figure",
"scipy.linalg.inv",
"matplotlib.pyplot.style.use",
"numpy.absolute",
"numpy.hstack",
"matplotlib.rcParams.copy",
"pandas.isna",
"matplotlib.pyplot.cm.viridis",
"numpy.sum",
"scipy.sparse.linalg.eigs",
"numpy.linspace",
"numpy.diag"
]
]
|
hexiang-hu/cs231n-practice | [
"42c9ea3ff1fafd8dce7838c9afbb06a071292bfd"
]
| [
"hw1/cs231n/classifiers/k_nearest_neighbor.py"
]
| [
"import numpy as np\n\nclass KNearestNeighbor:\n \"\"\" a kNN classifier with L2 distance \"\"\"\n\n def __init__(self):\n pass\n\n def train(self, X, y):\n \"\"\"\n Train the classifier. For k-nearest neighbors this is just \n memorizing the training data.\n\n Input:\n X - A num_train x dimension array where each row is a training point.\n y - A vector of length num_train, where y[i] is the label for X[i, :]\n \"\"\"\n self.X_train = X\n self.y_train = y\n \n def predict(self, X, k=1, num_loops=0):\n \"\"\"\n Predict labels for test data using this classifier.\n\n Input:\n X - A num_test x dimension array where each row is a test point.\n k - The number of nearest neighbors that vote for predicted label\n num_loops - Determines which method to use to compute distances\n between training points and test points.\n\n Output:\n y - A vector of length num_test, where y[i] is the predicted label for the\n test point X[i, :].\n \"\"\"\n if num_loops == 0:\n dists = self.compute_distances_no_loops(X)\n elif num_loops == 1:\n dists = self.compute_distances_one_loop(X)\n elif num_loops == 2:\n dists = self.compute_distances_two_loops(X)\n else:\n raise ValueError('Invalid value %d for num_loops' % num_loops)\n\n return self.predict_labels(dists, k=k)\n\n def compute_distances_two_loops(self, X):\n \"\"\"\n Compute the distance between each test point in X and each training point\n in self.X_train using a nested loop over both the training data and the \n test data.\n\n Input:\n X - An num_test x dimension array where each row is a test point.\n\n Output:\n dists - A num_test x num_train array where dists[i, j] is the distance\n between the ith test point and the jth training point.\n \"\"\"\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in xrange(num_test):\n for j in xrange(num_train):\n #####################################################################\n # TODO: #\n # Compute the l2 distance between the ith test point and the jth #\n # training point, and store the result in dists[i, j] #\n #####################################################################\n \n # L2 distance between ith testing image and jth training image \n dists[i, j] = np.sum( (self.X_train[j,:] - X[i,:] )**2 )\n\n return dists\n\n def compute_distances_one_loop(self, X):\n \"\"\"\n Compute the distance between each test point in X and each training point\n in self.X_train using a single loop over the test data.\n\n Input / Output: Same as compute_distances_two_loops\n \"\"\"\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in xrange(num_test):\n #######################################################################\n # TODO: #\n # Compute the l2 distance between the ith test point and all training #\n # points, and store the result in dists[i, :]. #\n #######################################################################\n \n # dists[i, :] = np.sum((self.X_train - X[i, :])**2, axis=1 )\n train_2 = np.sum( (self.X_train)**2, axis=1 ).T\n test_2 = np.tile( np.sum( (X[i,:])**2 ), [1, num_train])\n test_train = X[i,:].dot(self.X_train.T)\n \n dists[i,:] = train_2 + test_2 - 2 * test_train\n\n return dists\n\n def compute_distances_no_loops(self, X):\n \"\"\"\n Compute the distance between each test point in X and each training point\n in self.X_train using no explicit loops.\n\n Input / Output: Same as compute_distances_two_loops\n \"\"\"\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train)) \n #########################################################################\n # TODO: #\n # Compute the l2 distance between all test points and all training #\n # points without using any explicit loops, and store the result in #\n # dists. #\n # HINT: Try to formulate the l2 distance using matrix multiplication #\n # and two broadcast sums. #\n #########################################################################\n \n # expand the formula and calculate each term respectively\n train_2 = np.tile( np.sum( (self.X_train)**2, axis=1), [num_test, 1])\n test_2 = np.tile( np.sum( (X)**2, axis=1), [num_train, 1]).T\n test_train = X.dot(self.X_train.T)\n\n dists = train_2 + test_2 - 2*test_train\n return dists\n\n def predict_labels(self, dists, k=1):\n \"\"\"\n Given a matrix of distances between test points and training points,\n predict a label for each test point.\n\n Input:\n dists - A num_test x num_train array where dists[i, j] gives the distance\n between the ith test point and the jth training point.\n\n Output:\n y - A vector of length num_test where y[i] is the predicted label for the\n ith test point.\n \"\"\"\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in xrange(num_test):\n # A list of length k storing the labels of the k nearest neighbors to\n # the ith test point.\n closest_y = []\n #########################################################################\n # TODO: #\n # Use the distance matrix to find the k nearest neighbors of the ith #\n # training point, and use self.y_train to find the labels of these #\n # neighbors. Store these labels in closest_y. #\n # Hint: Look up the function numpy.argsort. #\n #########################################################################\n closest_idx = np.argsort(dists[i, :])[:k].tolist()\n closest_y = self.y_train[closest_idx]\n #########################################################################\n # TODO: #\n # Now that you have found the labels of the k nearest neighbors, you #\n # need to find the most common label in the list closest_y of labels. #\n # Store this label in y_pred[i]. Break ties by choosing the smaller #\n # label. #\n #########################################################################\n \n # count the frequency of those closest labels\n counts = np.bincount(closest_y)\n\n # return the most frequent item as result\n y_pred[i] = np.argmax(counts)\n return y_pred\n\n"
]
| [
[
"numpy.bincount",
"numpy.zeros",
"numpy.sum",
"numpy.argmax",
"numpy.argsort"
]
]
|
remiadon/scikit-mine | [
"769d7d5ea0dda5d4adea33236733f4ce1ea0c815"
]
| [
"skmine/utils.py"
]
| [
"\"\"\"\nutils functions\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sortedcontainers import SortedList\n\n\ndef _check_random_state(random_state):\n if random_state is None or isinstance(random_state, int):\n random_state = np.random.RandomState(random_state)\n elif not isinstance(random_state, np.random.RandomState):\n raise TypeError(\"random_state should be an int or a RandomState instance\")\n\n return random_state\n\n\ndef _check_min_supp(min_supp, accept_absolute=True):\n if isinstance(min_supp, int):\n if not accept_absolute:\n raise ValueError(\n \"Absolute support is prohibited, please provide a float value between 0 and 1\"\n )\n if min_supp < 1:\n raise ValueError(\"Minimum support must be strictly positive\")\n elif isinstance(min_supp, float):\n if min_supp < 0 or min_supp > 1:\n raise ValueError(\"Minimum support must be between 0 and 1\")\n else:\n raise TypeError(\"Mimimum support must be of type int or float\")\n return min_supp\n\n\ndef _check_growth_rate(gr):\n if not gr > 1:\n raise ValueError(\"growth ratio should be greater than 1\")\n return gr\n\n\ndef filter_maximal(itemsets):\n \"\"\"filter maximal itemsets from a set of itemsets\n\n Parameters\n ----------\n itemsets: Iterator[frozenset]\n a set of itemsets\n\n Returns\n -------\n SortedList\n \"\"\"\n maximals = SortedList(key=len)\n itemsets = sorted(itemsets, key=len, reverse=True)\n for iset in itemsets:\n gts = maximals.irange(iset)\n # is there a superset amongst bigger itemsets ?\n if not any(map(lambda e: e > iset, gts)):\n maximals.add(iset) # O(log(len(maximals)))\n\n return maximals\n\n\ndef filter_minimal(itemsets):\n \"\"\"filter minimal itemsets from a set of itemsets\n\n Parameters\n ----------\n itemsets: Iterator[frozenset]\n a set of itemsets\n\n Returns\n -------\n SortedList\n \"\"\"\n minimals = SortedList(key=len)\n itemsets = sorted(itemsets, key=len)\n for iset in itemsets:\n lts = minimals.irange(None, iset)\n # is there a subset amongst the smaller itemsets ?\n if not any(map(lambda e: e < iset, lts)):\n minimals.add(iset)\n\n return minimals\n\n\ndef supervised_to_unsupervised(D, y):\n \"\"\"\n for sklearn compatibility, eg. sklearn.multiclass.OneVSRest\n\n Parameters\n ----------\n D: pd.DataFrame\n input transactional dataset\n\n y: np.ndarray of shape (n_samples,)\n corresponding labels\n \"\"\"\n mask = np.where(y.reshape(-1))[0]\n D = D.iloc[mask]\n\n return D\n\n\ndef _check_D_sklearn(D):\n if object in D.dtypes.values: # SKLEARN : check_dtype_object\n raise TypeError(\"argument must be a string or a number\")\n\n if D.shape[1] == 0: # SKLEARN : check_empty_data_messages\n raise ValueError(\"Empty data\")\n\n pd.options.mode.use_inf_as_na = True\n if D.isnull().values.any():\n raise ValueError(\"esimator does not check for NaN and inf\")\n pd.options.mode.use_inf_as_na = False\n\n\ndef _check_D(D):\n if isinstance(D, pd.DataFrame):\n D = D.reset_index(drop=True) # positional indexing\n elif isinstance(D, np.ndarray):\n D = pd.DataFrame(D)\n else:\n raise TypeError(\"D should be an instance of np.ndarray or pd.DataFrame\")\n\n _check_D_sklearn(D)\n\n return D\n\n\ndef _check_y(y):\n if not isinstance(y, (pd.Series, np.ndarray)):\n raise TypeError(\"y should be an instance of np.ndarray or pd.Series\")\n\n # TODO : pd.Categorical\n return y\n\n\ndef _check_D_y(D, y=None):\n D = _check_D(D)\n if y is not None:\n y = _check_y(y)\n return D, y\n"
]
| [
[
"pandas.DataFrame",
"numpy.random.RandomState"
]
]
|
YunchuZhang/Visually-Grounded-Library-of-Behaviors-for-Generalizing-Manipulation-Across-Objects-Configurations- | [
"896afda942dfc04e4aaad2ee751c32df1eb17913"
]
| [
"pytorch_disco/model.py"
]
| [
"import os\nimport time\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport copy\n# from torch.utils.tensorboard import SummaryWriter\nfrom tensorboardX import SummaryWriter\nfrom backend import saverloader, inputs\nfrom backend.inputs import MuJoCoOfflineData\n# from backend.double_pool import DoublePool\n#from torchvision import datasets, transforms\nimport utils\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom torchvision.utils import make_grid\n\nnp.set_printoptions(precision=5)\nEPS = 1e-6\nnp.random.seed(0)\nMAX_QUEUE = 10 #how many items before the summaryWriter flush\n\n\nclass Model(object):\n def __init__(self, config):\n\n print('------ CREATING NEW MODEL ------')\n print(config.run_full_name)\n self.checkpoint_dir = config.checkpoint_dir\n self.log_dir = config.log_dir\n self.config=config\n self.lr = config.lr\n self.all_inputs = inputs.get_inputs(config)\n self.big_list_of_results = list() # NOTE: this is temporary\n\n use_cuda = torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n #self.device = torch.device(\"cuda\")\n self.moc_init_flag = False\n self.tensor_in_clusters = None\n self.nn_results_dir = config.vis_dir#os.path.join(\"results_dir\", config.name)\n # only generate the directory if necessary, don't clutter stuff\n \n data_root_dir = utils.utils.get_data_dir()\n for set_name in config.data_paths:\n data_path = config.data_paths[set_name]\n if not data_path.startswith(\"/\"):\n data_path = os.path.join(data_root_dir, data_path)\n config.data_paths[set_name] = data_path\n\n if config.do_validation:\n if not os.path.exists(self.nn_results_dir):\n os.makedirs(self.nn_results_dir)\n\n\n if config.feat_init is not None:\n model_dir = config.feat_init\n self.extracted_tensor_save_dir = os.path.join(\"extracted_tensor\", model_dir, config.run_full_name)\n # again only create directory if necessary\n if config.do_generate_data:\n if not os.path.exists(self.extracted_tensor_save_dir):\n os.makedirs(self.extracted_tensor_save_dir)\n\n def save_local_variables(self):\n return dict()\n\n def infer(self):\n print('nothing to infer!')\n \n @staticmethod\n def plot_top_k_retrieval(records_list, sorted_idxs, query_records_list=None):\n # NOTE: Hard code alert but I think it should be fine because img size will not change\n vis = list()\n for i, s in enumerate(sorted_idxs):\n minivis = list()\n # get image of the query and its top 10 neighbours\n query_record = query_records_list[i]\n data = np.load(query_record, allow_pickle=True).item()\n\n qref_img = data['rgb_camXs'][-1]\n minivis.append(qref_img)\n keys_record = [records_list[k] for k in s]\n for j, k in enumerate(keys_record):\n # get the filename first, load it and then get the ref images for it,\n # TODO: this all can be precomputed once during init\n data = np.load(k, allow_pickle=True).item()\n ref_img = data['rgb_camXs'][-1]\n minivis.append(ref_img)\n vis.append(minivis)\n return vis\n\n def compute_nearest_neighbours_ed(self):\n # compute nearest neighbours using euclidean distances\n raise NotImplementedError\n\n def compute_nearest_neighbours_dp(self, val_results_list, val_cluster_id_list, re_results_list=None, re_cluster_id_list=None, vis_top_k=10):\n \"\"\"\n val_results_list: [feature_dim] * num_samples\n success_rate_list: np.array(#class): if select the top, then it counts\n \"\"\"\n records_list = list()\n object_tensors = list()\n for rr in val_results_list:\n if \"record_name\" not in rr:\n import ipdb; ipdb.set_trace()\n rec = rr['record_name']\n obj_tensor = rr['object_tensor']\n # each of them is a 2 element list\n for re, objt in zip(rec, obj_tensor):\n records_list.append(re)\n object_tensors.append(objt)\n\n # do the dot product to compute the nearest neighbours\n resized_object_tensors = torch.stack(object_tensors, dim=0)\n\n if len(resized_object_tensors.shape) == 5:\n N, C, D, H, W = list(resized_object_tensors.shape)\n\n else:\n N, C = list(resized_object_tensors.shape)\n D, H, W = 1, 1, 1\n emb_vectors = resized_object_tensors.view(N, C, -1)\n\n emb_vectors = emb_vectors.permute(0, 2, 1)\n\n if re_results_list is not None:\n re_records_list = list()\n re_object_tensors = list()\n for rr in re_results_list:\n rec = rr['record_name']\n re_obj_tensor = rr['object_tensor']\n # each of them is a 2 element list\n for re, objt in zip(rec, re_obj_tensor):\n re_records_list.append(re)\n re_object_tensors.append(objt)\n\n # do the dot product to compute the nearest neighbours\n resized_re_object_tensors = torch.stack(re_object_tensors, dim=0)\n if len(resized_re_object_tensors.shape) == 5:\n N, C, D, H, W = list(resized_re_object_tensors.shape)\n else:\n N, C = list(resized_re_object_tensors.shape)\n re_emb_vectors = resized_re_object_tensors.view(N, C, -1)\n re_emb_vectors = re_emb_vectors.permute(0, 2, 1)\n cluster_id_list = re_cluster_id_list\n query_records_list = copy.deepcopy(records_list)\n records_list = re_records_list\n\n else:\n re_emb_vectors = emb_vectors\n cluster_id_list = val_cluster_id_list\n query_records_list = records_list\n\n\n # TODO: Naive version now for each of the emb_vectors compute its distance from every emb_vector\n dists = list()\n for e in range(len(emb_vectors)):\n curr_emb = emb_vectors[e]\n curr_dist = list()\n for f in range(len(re_emb_vectors)):\n if re_results_list is None and e == f:\n curr_dist.append(1)\n continue\n other_emb = re_emb_vectors[f]\n\n dot = curr_emb * other_emb\n dot = dot.sum(axis=1)\n assert len(dot) == (D*H*W)\n avg_dot = dot.sum() / len(dot)\n curr_dist.append(avg_dot)\n dists.append(curr_dist)\n\n dists = np.stack(dists, axis=0)\n # now that you have distance of current from every one else, get the top 10 nearest neighbours\n sort_idxs = np.argsort(-dists, axis=1)[:, :vis_top_k]\n \n # plot the stuff, for now I am doing it in matplotlib, it is just easier for me\n\n\n cluster_ids = []\n batch_size = sort_idxs.shape[0]\n for bid in range(batch_size):\n sort_idxs_irow = sort_idxs[bid, :]\n cluster_id_irows = [cluster_id_list[id_] for id_ in sort_idxs_irow]\n cluster_ids.append(cluster_id_irows)\n cluster_ids = np.array(cluster_ids)\n\n\n if re_results_list is None: # test data on test data\n recall_1 = np.mean(cluster_ids[:,0] == cluster_ids[:,1])\n # ndata = cluster_ids.shape[0]\n # for data_id in range(ndata):\n # success_rate = success_rate_list[data_id]\n # cluster_ids[data_id, 0]\n #\n # import ipdb; ipdb.set_trace()\n #\n # print(\"hello\")\n\n\n else:\n recall_1 = np.mean(np.array(val_cluster_id_list) == cluster_ids[:,0])\n\n vis = self.plot_top_k_retrieval(records_list, sort_idxs, query_records_list=query_records_list)\n\n\n return vis, recall_1\n\n def get_features(self, dataloader, summ_writer, step, ndata=100000):\n results_list = list()\n cluster_id_list = list()\n for i, feed in enumerate(dataloader):\n # move everything to cuda\n feed_cuda = feed\n for k in feed:\n try:\n feed_cuda[k] = feed[k].cuda()\n except:\n # some of them are not tensor\n feed_cuda[k] = feed[k]\n \n feed_cuda['writer'] = summ_writer \n feed_cuda['global_step'] = step\n feed_cuda['set_num'] = 'val' #self.config.set_nums[data_name]\n feed_cuda['set_name'] = 1 #data_name\n feed_cuda['record'] = feed['record']\n\n # now I am ready for the forward pass, I want it to return to me\n # the 3d tensor which belongs to the object\n with torch.no_grad():\n loss, results = self.model(feed_cuda)\n # for now since I am only debugging stuff\n # a list of dictionary\n # \"object_tensor\": B x C x H x W x D\n # \"record_name\": list of strings\n bsize = len(feed_cuda[\"cluster_id\"][0])\n cluster_id_list += [feed_cuda[\"cluster_id\"][0][bid] for bid in range(bsize)]\n results_list.append(results)\n if len(cluster_id_list) >= ndata:\n break\n\n return results_list, cluster_id_list, feed\n\n def get_data(self, dataloader, summ_writer, step, ndata=100000):\n data_list = list()\n total_num_data = 0\n for i, feed in enumerate(dataloader):\n # move everything to cuda\n feed_cuda = feed\n for k in feed:\n try:\n feed_cuda[k] = feed[k].cuda()\n except:\n # some of them are not tensor\n feed_cuda[k] = feed[k]\n\n feed_cuda['writer'] = summ_writer\n feed_cuda['global_step'] = step\n feed_cuda['set_num'] = 1 #\"val\" #self.config.set_nums[data_name]\n feed_cuda['set_name'] = \"val\" #data_name\n feed_cuda['record'] = feed['record']\n\n # now I am ready for the forward pass, I want it to return to me\n # the 3d tensor which belongs to the object\n data_list.append(feed_cuda)\n total_num_data += feed_cuda[\"rgb_camXs\"].shape[0]\n if total_num_data >= ndata:\n break\n\n return data_list\n def get_features_from_data_list(self, data_list, step):\n results_list = list()\n cluster_id_list = list()\n success_rate_list = list()\n\n for feed_cuda in data_list:\n feed_cuda['global_step'] = step\n with torch.no_grad():\n loss, results = self.model(feed_cuda)\n # for now since I am only debugging stuff\n # a list of dictionary\n # \"object_tensor\": B x C x H x W x D\n # \"record_name\": list of strings\n bsize = len(feed_cuda[\"cluster_id\"][0])\n cluster_id_list += [feed_cuda[\"cluster_id\"][0][bid] for bid in range(bsize)]\n results_list.append(results)\n\n success_rate_list.append(feed_cuda[\"success_rates\"])\n\n\n return results_list, cluster_id_list, success_rate_list, feed_cuda\n\n def validate_nn_on_test_from_train(self, step, val_summ_writer=None, train_summ_writer=None):\n print(\"start validate test on train pool\")\n # first put the model in eval mode\n self.model.eval()\n # everytime make the val list empty\n\n if self.train_data_nn is None:\n print(\"make nn pool from train\")\n assert not self.model.training, \"Model should be in eval mode\"\n train_data_path = self.config.data_paths['train']\n # now form the data-loader with the valset path\n train_dataloader = torch.utils.data.DataLoader(MuJoCoOfflineData(\n config = self.config,\n dataset_path=train_data_path,\n plot=False, train=False,\n fix_view=True,\n ndata = 40\n ), batch_size=2, shuffle=True, drop_last=True)\n\n\n self.train_data_nn = self.get_data(train_dataloader, train_summ_writer, step, ndata=40)\n\n train_results_list, train_cluster_id_list, _ = self.get_features_from_data_list(self.train_data_nn, step)\n\n if self.val_data_nn is None:\n print(\"make nn pool from test\")\n test_data_path = self.config.data_paths['test']\n # now form the data-loader with the valset path\n\n val_dataloader = torch.utils.data.DataLoader(MuJoCoOfflineData(\n config = self.config,\n dataset_path=test_data_path,\n plot=False, train=False,\n fix_view=True\n ), batch_size=2, shuffle=True, drop_last=True)\n self.val_data_nn = self.get_data(val_dataloader, val_summ_writer, step)\n\n val_results_list, val_cluster_id_list, feed = self.get_features_from_data_list(self.val_data_nn, step)\n vis_nearest_neighbours, recall_1 = self.compute_nearest_neighbours_dp(val_results_list, val_cluster_id_list,\n re_results_list=train_results_list, re_cluster_id_list=train_cluster_id_list)\n\n # now the only thing that remains is plotting this on tensorboard\n # just to satisfy my paranoia I will also save the matplotlib images\n # but after 500 iterations\n if feed['global_step'] % 5000 == 0:\n n_rows = len(vis_nearest_neighbours)\n n_cols = len(vis_nearest_neighbours[0])\n fig_size = 2 * np.asarray([n_rows, n_cols])\n fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=fig_size,\n sharex=True, sharey=True)\n for i in range(n_rows):\n for j in range(n_cols):\n axes[i][j].imshow(vis_nearest_neighbours[i][j])\n\n # save the figure and you are done\n fig.savefig(f'{self.nn_results_dir}/test_train_nn_result_{step}.jpg')\n plt.close()\n\n H, W, C = list(vis_nearest_neighbours[0][1].shape)\n\n # finally add it to tensorboard and you are done !!!\n # #test_data x (#top_retrieve + 1)\n vis_nearest_neighbours = np.stack(vis_nearest_neighbours, axis=0)\n # convert to torch\n vis_nearest = torch.from_numpy(vis_nearest_neighbours).permute(0, 1, 4, 2, 3)\n # resize\n vis_nearest = vis_nearest.view(-1, C, H, W)\n # make the grid\n grid = make_grid(vis_nearest, nrow=11)\n \n # add it to the tensorboard\n feed['writer'].add_scalar('valtrain_nn_recall@1', recall_1, step)\n feed['writer'].add_image('valtrain_nn/imgs', grid, step)\n\n\n\n def validate_on_test(self, step, summ_writer=None):\n # first put the model in eval mode\n self.model.eval()\n # everytime make the val list empty\n #val_results_list = list()\n #cluster_id_list = list()\n print(\"start validate on test\")\n assert not self.model.training, \"Model should be in eval mode\"\n if self.val_data_nn is None:\n test_data_path = self.config.data_paths['test']\n # now form the data-loader with the valset path\n\n val_dataloader = torch.utils.data.DataLoader(MuJoCoOfflineData(\n config = self.config,\n dataset_path=test_data_path,\n plot=False, train=False,\n fix_view=True, num_workers=1\n ), batch_size=2, shuffle=True, drop_last=True)\n\n print(f'Length of val_data is {len(val_dataloader)}')\n self.val_data_nn = self.get_data(val_dataloader, summ_writer, step)\n\n print(\"finish loading data\")\n\n val_results_list, cluster_id_list, success_rate_list, feed = self.get_features_from_data_list(self.val_data_nn, step)\n #val_results_list, cluster_id_list, feed = self.get_features(val_dataloader, summ_writer, step)\n\n # now that you have results think about how can you collate and do nearest neighbor\n vis_nearest_neighbours, recall_1 = self.compute_nearest_neighbours_dp(val_results_list, cluster_id_list)\n\n \n # now the only thing that remains is plotting this on tensorboard\n # just to satisfy my paranoia I will also save the matplotlib images\n # but after 500 iterations\n if feed['global_step'] % 5000 == 0:\n n_rows = len(vis_nearest_neighbours)\n n_cols = len(vis_nearest_neighbours[0])\n fig_size = 2 * np.asarray([n_rows, n_cols])\n fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=fig_size,\n sharex=True, sharey=True)\n for i in range(n_rows):\n for j in range(n_cols):\n axes[i][j].imshow(vis_nearest_neighbours[i][j])\n \n # save the figure and you are done\n fig.savefig(f'{self.nn_results_dir}/nn_result_{step}.jpg')\n plt.close()\n\n H, W, C = list(vis_nearest_neighbours[0][1].shape)\n\n # finally add it to tensorboard and you are done !!!\n # #test_data x (#top_retrieve + 1)\n vis_nearest_neighbours = np.stack(vis_nearest_neighbours, axis=0)\n # convert to torch\n vis_nearest = torch.from_numpy(vis_nearest_neighbours).permute(0, 1, 4, 2, 3)\n # resize\n vis_nearest = vis_nearest.view(-1, C, H, W)\n # make the grid\n grid = make_grid(vis_nearest, nrow=11)\n \n # add it to the tensorboard\n feed['writer'].add_scalar('val_nn_recall@1', recall_1, step)\n feed['writer'].add_image('val_nn/imgs', grid, step)\n\n def validate_on_train_using_computed_center(self, step, summ_writer, normalize=True):\n return self.validate_on_test_using_computed_center(step, summ_writer, normalize=True, data_name=\"train\", ndata=40)\n\n def validate_on_test_using_computed_center(self, step, summ_writer, normalize=True, data_name=\"test\", ndata=None):\n print(f\"start validate on {data_name} using computeed center\")\n\n if self.model.is_learned_cluster_centers:\n\n tensor_in_clusters = self.model.get_centers()\n cluster_names = []\n for cluster_id in range(self.model.num_clusters):\n cluster_names.append(self.model.cluster_id_to_name[cluster_id])\n for i in range(self.model.num_clusters, self.model.max_clusters):\n cluster_names.append(f\"not_assigned_{i}\")\n\n else:\n\n tensor_in_cluster = self.compute_cluster_center_from_train(epochs=3, ndata=200, using_success_rate=True)\n tensor_in_clusters = []\n cluster_names = []\n for cluster_name in tensor_in_cluster:\n cluster_names.append(cluster_name)\n tensor_in_clusters.append(tensor_in_cluster[cluster_name])\n tensor_in_clusters = np.stack(tensor_in_clusters, axis=0)\n\n # remove \"not_assigned\" from the list\n for cluster_name in cluster_names:\n if \"not_assigned\" in cluster_name:\n cid = cluster_names.index(cluster_name)\n cluster_names.remove(cluster_name)\n tensor_in_clusters = np.concatenate([tensor_in_clusters[:cid], tensor_in_clusters[cid+1:]], axis=0)\n\n\n\n\n assert not self.model.training, \"Model should be in eval mode\"\n if data_name==\"test\" and self.val_data_nn is None:\n test_data_path = self.config.data_paths[data_name]\n # now form the data-loader with the valset path\n\n val_dataloader = torch.utils.data.DataLoader(MuJoCoOfflineData(\n config = self.config,\n dataset_path=test_data_path,\n plot=False, train=False,\n fix_view=True, num_workers=1,\n ndata=ndata,\n ), batch_size=2, shuffle=False, drop_last=True)\n\n print(f'Length of val_data is {len(val_dataloader)}')\n self.val_data_nn = self.get_data(val_dataloader, summ_writer, step)\n data_nn = self.val_data_nn\n\n elif data_name==\"train\" and self.trainval_data_nn is None:\n test_data_path = self.config.data_paths[data_name]\n # now form the data-loader with the valset path\n\n val_dataloader = torch.utils.data.DataLoader(MuJoCoOfflineData(\n config = self.config,\n dataset_path=test_data_path,\n plot=False, train=False,\n fix_view=True, num_workers=1,\n ndata=ndata,\n ), batch_size=2, shuffle=True, drop_last=True)\n\n print(f'Length of val_data is {len(val_dataloader)}')\n self.trainval_data_nn = self.get_data(val_dataloader, summ_writer, step)\n data_nn = self.trainval_data_nn\n elif data_name == \"test\":\n data_nn = self.val_data_nn\n elif data_name == \"train\":\n data_nn = self.trainval_data_nn\n\n\n #tensor_in_clusters = []\n #cluster_names = []\n #for cluster_name in tensor_in_cluster:\n # cluster_names.append(cluster_name)\n # tensor_in_clusters.append(tensor_in_cluster[cluster_name])\n #tensor_in_clusters = np.stack(tensor_in_clusters, axis=0)\n #nc, H, W, D, C = tensor_in_clusters.shape\n\n if len(tensor_in_clusters.shape) == 5:\n nc, H, W, D, C = tensor_in_clusters.shape\n emb_g_flat = torch.from_numpy(np.reshape(tensor_in_clusters, [nc, -1]))\n else:\n emb_g_flat = torch.from_numpy(tensor_in_clusters)\n\n #if normalize:\n emb_g_flat_nonorm = emb_g_flat\n #emb_g_flat = torch.nn.functional.normalize(emb_g_flat, dim=-1)\n\n correct = 0\n correct_nonorm = 0\n nsamples = 0\n success_rate_for_selected_cluster = []\n for feed_cuda in data_nn:\n with torch.no_grad():\n loss, results = self.model(feed_cuda)\n # for now since I am only debugging stuff\n # a list of dictionary\n # \"object_tensor\": B x C x H x W x D\n # \"record_name\": list of strings\n\n if len(results[\"object_tensor\"].shape) == 5: #3d tensor\n object_tensors = results[\"object_tensor\"].permute(0, 2, 3, 4, 1)\n batch_size = object_tensors.shape[0]\n emb_e_flat = torch.reshape(object_tensors, [batch_size, -1])\n else:\n batch_size = results[\"object_tensor\"].shape[0]\n emb_e_flat = results[\"object_tensor\"]\n emb_e_flat_nonorm = emb_e_flat\n #emb_e_flat = torch.nn.functional.normalize(emb_e_flat, dim=-1)\n\n scores = torch.matmul(emb_e_flat, emb_g_flat.T)\n\n # top grasp only\n\n if self.config.top_grasp_only:\n mask = torch.zeros_like(scores)\n mask[:,:12] = 1\n mask[:,23:] = 1\n min_scores = torch.min(scores)\n scores = scores * mask + (1-mask) * min_scores\n\n scores_nonorm = torch.matmul(emb_e_flat_nonorm, emb_g_flat_nonorm.T)\n for batch_id in range(batch_size):\n nsamples += 1\n best_match_id = np.argmax(scores[batch_id].numpy())\n best_match_class = cluster_names[best_match_id]\n\n #if best_match_class is not \"not_assigned\":\n selected_cluster_id = int(best_match_class[1:])\n print(nsamples, \"selected_cluster_id\", selected_cluster_id, feed_cuda[\"success_rates\"][batch_id][selected_cluster_id])\n print(nsamples, feed_cuda[\"success_rates\"][batch_id])\n \n\n\n sr = feed_cuda[\"success_rates\"][batch_id][selected_cluster_id]\n success_rate_for_selected_cluster.append(sr.cpu().numpy())\n\n if best_match_class == feed_cuda[\"cluster_id\"][0][batch_id]:\n correct += 1\n\n best_match_id_nonorm = np.argmax(scores_nonorm[batch_id].numpy())\n best_match_class_nonorm = cluster_names[best_match_id_nonorm]\n\n if best_match_class_nonorm == feed_cuda[\"cluster_id\"][0][batch_id]:\n correct_nonorm += 1\n\n print(\" avg success_rate:\", np.mean(success_rate_for_selected_cluster))\n\n #import ipdb; ipdb.set_trace()\n cluster_mean_acc = correct/nsamples\n cluster_mean_acc_nonorm = correct_nonorm/nsamples\n\n if data_name == \"train\":\n summ_writer.add_scalar('train_nn_cluster_mean', cluster_mean_acc, step)\n summ_writer.add_scalar('train_nn_cluster_mean_nonorm', cluster_mean_acc_nonorm, step)\n summ_writer.add_scalar('train_mean_success_rate', np.mean(success_rate_for_selected_cluster), step)\n else:\n summ_writer.add_scalar('val_nn_cluster_mean', cluster_mean_acc, step)\n summ_writer.add_scalar('val_nn_cluster_mean_nonorm', cluster_mean_acc_nonorm, step)\n summ_writer.add_scalar('val_mean_success_rate', np.mean(success_rate_for_selected_cluster), step)\n return cluster_mean_acc\n\n def compute_cluster_center_from_train(self, epochs=3, ndata=None, using_success_rate=False):\n # first put the model in eval mode\n self.model.eval()\n # everytime make the val list empty\n train_results_list = list()\n assert not self.model.training, \"Model should be in eval mode\"\n\n #test_data_path = self.config.data_paths['test']\n train_data_path = self.config.data_paths['train']\n # now form the data-loader with the valset path\n\n train_dataloader = torch.utils.data.DataLoader(MuJoCoOfflineData(\n config = self.config,\n dataset_path=train_data_path,\n plot=False, train=False,\n fix_view=False, ndata=ndata\n ), batch_size=self.config.B, shuffle=False, drop_last=True)\n\n\n print(f'Length of train_data is {len(train_dataloader)}')\n\n tensor_in_cluster = dict()\n\n # do the forward pass now\n for epoch_id in range(epochs):\n set_loader = iter(train_dataloader) \n for i, feed in enumerate(set_loader):\n # move everything to cuda\n feed_cuda = feed\n for k in feed:\n try:\n feed_cuda[k] = feed[k].cuda()\n except:\n # some of them are not tensor\n feed_cuda[k] = feed[k]\n \n #feed_cuda['writer'] = summ_writer \n #feed_cuda['global_step'] = step\n feed_cuda['set_num'] = 1\n #feed_cuda['set_name'] = 'val'\n #feed_cuda['record'] = feed['record']\n \n # now I am ready for the forward pass, I want it to return to me\n # the 3d tensor which belongs to the object\n with torch.no_grad():\n results = self.model.convert_objects_to_features(feed_cuda)\n \n \n if not using_success_rate:\n for object_id in range(len(feed['cluster_id'])):\n object_in_batch = feed[\"cluster_id\"][object_id]\n \n for batch_id in range(len(object_in_batch)):\n object_cluster_id = object_in_batch[batch_id]\n if object_cluster_id not in tensor_in_cluster:\n tensor_in_cluster[object_cluster_id] = []\n tensor_in_cluster[object_cluster_id].append(results[batch_id, object_id])\n else:\n\n object_id = 0\n for batch_id in range(len(feed['success_rates'])):\n success_rate = feed[\"success_rates\"][batch_id]\n\n nclusters = len(success_rate)\n for cluster_id in range(nclusters):\n if success_rate[cluster_id] >= 0.8:\n cluster_name = f\"c{cluster_id}\"\n if cluster_id not in tensor_in_cluster:\n tensor_in_cluster[cluster_name] = []\n tensor_in_cluster[cluster_name].append(results[batch_id, object_id])\n #max_sr = np.maximum(success_rate)\n\n\n\n\n for cluster_id in tensor_in_cluster:\n tensor_in_cluster[cluster_id] = np.mean(np.stack(tensor_in_cluster[cluster_id], axis=0), axis=0)\n import pickle\n\n with open(os.path.join(self.config.vis_dir, \"clusters.pkl\"), 'wb') as f:\n pickle.dump(tensor_in_cluster, f)\n return tensor_in_cluster\n\n\n @staticmethod\n def get_obj_name_and_class(full_path):\n splits = full_path[0].split('/')\n cls_name = splits[-2]\n extra_info = splits[-1]\n split_1 = extra_info.split('_')\n ob_name = split_1[-1][:-4]\n return cls_name, ob_name\n\n def go(self):\n self.start_time = time.time()\n self.infer() # defines the model\n # build the saveloader\n self.saverloader = saverloader.SaverLoader(self.config, self.model)\n self.saverloader.save_config()\n\n print(\"------ Start loading weights -----\")\n self.start_iter = self.saverloader.load_weights(optimizer=None) ## load the weights for each part\n print(f'---- self.start_iter = {self.start_iter}')\n print(\"------ Done loading weights ------\")\n\n\n if self.config.do_compute_cluster_center: # only during testing\n self.compute_cluster_center_from_train()\n return\n\n\n # ... Declare the optimzer ... #\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n print('----- Done making optimizer ---------')\n\n # for nearest_neightbor retrieval\n self.train_data_nn = None\n self.val_data_nn = None\n self.trainval_data_nn = None\n\n\n set_nums = []\n set_names = []\n set_inputs = []\n set_writers = []\n set_log_freqs = []\n set_do_backprops = []\n set_dicts = []\n set_loaders = []\n\n for set_name in self.config.set_names:\n # sets to run are determined by if the field corresponding to\n # trainset, valset, testset is None or not\n if self.config.sets_to_run[set_name]:\n set_nums.append(self.config.set_nums[set_name])\n set_names.append(set_name)\n set_inputs.append(self.all_inputs[set_name]) # dict formed in the input function\n set_writers.append(SummaryWriter(self.log_dir + '/' + set_name, max_queue = MAX_QUEUE))\n set_log_freqs.append(self.config.log_freqs[set_name])\n set_do_backprops.append(self.config.sets_to_backprop[set_name])\n set_dicts.append({})\n set_loaders.append(iter(set_inputs[-1])) #use the latest set_inputs\n\n for step in range(self.start_iter+1, self.config.max_iters+1):\n for i, (set_input) in enumerate(set_inputs):\n if step % len(set_input) == 0: #restart after one epoch. Note this does nothing for the tfrecord loader\n # this means the epoch is done, for val I want to break here\n # if the epoch is done and set is validation or test then break it\n # break\n # train: 0, val: 1, test:2\n set_num = set_nums[i]\n if set_num == 1 and self.config.do_generate_data:\n # we only want to go through once for the valset and if data-generation mode is true\n break\n else:\n # while collecting data as well, when the test set is exhausted it will just refresh\n # the iterator, the design could have so much better if just epochs would have been used.\n set_loaders[i] = iter(set_input) # refresh the iterators\n \n for (set_num,\n set_name,\n set_input,\n set_writer,\n set_log_freq,\n set_do_backprop,\n set_dict,\n set_loader\n ) in zip(\n set_nums,\n set_names,\n set_inputs,\n set_writers,\n set_log_freqs,\n set_do_backprops,\n set_dicts,\n set_loaders\n ):\n\n # this loop will run atmost 3 times, once with train, then val and then test\n # log_this for val and test is 50 and set_do_backprop is 0 hence it does not\n # evaluates to true only if the condition below is true\n log_this = np.mod(step, set_log_freq) == 0 ## so this will be true everytime since using fastest logging\n total_time, read_time, iter_time = 0.0, 0.0, 0.0\n\n if log_this or set_do_backprop: # training or logging\n #print('%s: set_num %d; log_this %d; set_do_backprop %d; ' % (set_name, set_num, log_this, set_do_backprop))\n\n read_start_time = time.time()\n feed = next(set_loader)\n feed_cuda = feed\n\n for k in feed:\n try:\n feed_cuda[k] = feed[k].cuda()\n except:\n feed_cuda[k] = feed[k]\n\n read_time = time.time() - read_start_time\n feed_cuda['writer'] = set_writer\n feed_cuda['global_step'] = step\n feed_cuda['set_num'] = set_num\n feed_cuda['set_name'] = set_name\n feed_cuda['record'] = feed['record']\n #print(f'working on {feed[\"record\"]}')\n\n iter_start_time = time.time()\n if set_do_backprop:\n self.model.train()\n loss, results = self.model(feed_cuda)\n else:\n self.model.eval()\n assert self.model.training == False, \"in eval code\"\n print('----- using the eval branch of the code ------')\n with torch.no_grad():\n loss, results = self.model(feed_cuda)\n\n loss_vis = loss.cpu().item()\n\n if set_do_backprop:\n # if not hyp.do_metric_learning:\n featnet_before = utils.basic.get_params(self.model.featnet)\n if self.config.do_occ:\n occnet_before = utils.basic.get_params(self.model.occnet)\n if self.config.do_view:\n viewnet_before = utils.basic.get_params(self.model.viewnet)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n\n\n\n featnet_after = utils.basic.get_params(self.model.featnet)\n if self.config.do_occ:\n occnet_after = utils.basic.get_params(self.model.occnet)\n if self.config.do_view:\n viewnet_after = utils.basic.get_params(self.model.viewnet)\n\n # check if they are changed\n # assert utils.basic.check_notequal(featnet_before, featnet_after)\n # if self.config.do_occ:\n # import ipdb; ipdb.set_trace()\n # assert utils.basic.check_notequal(occnet_before, occnet_after)\n # if self.config.do_view:\n # assert utils.basic.check_notequal(viewnet_before, viewnet_after)\n\n if log_this: # this is a new hyper-parameter which does nearest neighbour evaluation\n # I will write the validation function here\n if self.config.do_validation and set_name==\"test\" and step % self.config.validate_after == 0:\n #self.validate_on_test(feed['global_step'], feed_cuda['writer'])\n #self.validate_nn_on_test_from_train(feed['global_step'], val_summ_writer=feed_cuda['writer'], train_summ_writer=set_writers[0])\n self.validate_on_test_using_computed_center(feed['global_step'], summ_writer=feed_cuda['writer'])\n self.validate_on_train_using_computed_center(feed['global_step'], summ_writer=feed_cuda['writer'])\n\n iter_time = time.time()-iter_start_time\n total_time = time.time()-self.start_time\n\n print(\"%s; [%4d/%4d]; ttime: %.0f (%.2f, %.2f); loss: %.3f (%s)\" % (self.config.run_full_name,\n step,\n self.config.max_iters,\n total_time,\n read_time,\n iter_time,\n loss_vis,\n set_name))\n\n if np.mod(step, self.config.snap_freq) == 0:\n self.saverloader.save(step, self.optimizer)\n\n\n for writer in set_writers: #close writers to flush cache into file\n writer.close()\n\n"
]
| [
[
"torch.stack",
"numpy.set_printoptions",
"numpy.load",
"numpy.mean",
"torch.cuda.is_available",
"torch.reshape",
"numpy.concatenate",
"matplotlib.pyplot.subplots",
"torch.zeros_like",
"numpy.mod",
"matplotlib.use",
"torch.device",
"numpy.array",
"numpy.reshape",
"torch.min",
"matplotlib.pyplot.close",
"numpy.stack",
"numpy.argsort",
"torch.matmul",
"numpy.asarray",
"numpy.random.seed",
"torch.no_grad",
"torch.from_numpy"
]
]
|
CSMdevs/Picton | [
"943b9c2fdcee971491b10ddd075b8e32e983fb48"
]
| [
"train.py"
]
| [
"import tensorflow as tf \nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\nimport pickle\nfrom keras.models import model_from_json\nfrom keras.models import load_model\nimport matplotlib.pyplot as plt\n\n# Opening the files about data\nX = pickle.load(open(\"datasets/X.pickle\", \"rb\"))\ny = pickle.load(open(\"datasets/y.pickle\", \"rb\"))\n\n# normalizing data (a pixel goes from 0 to 255)\nX = X/255.0\n\n# Building the model\nmodel = Sequential()\n# 3 convolutional layers\nmodel.add(Conv2D(32, (3, 3), input_shape = X.shape[1:]))\nmodel.add(Activation(\"relu\"))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation(\"relu\"))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation(\"relu\"))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\n\n# 2 hidden layers\nmodel.add(Flatten())\nmodel.add(Dense(512))\nmodel.add(Activation(\"relu\"))\n\nmodel.add(Dense(64))\nmodel.add(Activation(\"relu\"))\n\n# The output layer with 13 neurons, for 13 classes\nmodel.add(Dense(13))\nmodel.add(Activation(\"softmax\"))\n\n# Compiling the model using some basic parameters\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n\t\t\t\toptimizer=\"adam\",\n\t\t\t\tmetrics=[\"accuracy\"])\n\n# Training the model, with 40 iterations\n# validation_split corresponds to the percentage of images used for the validation phase compared to all the images\nhistory = model.fit(X, y, batch_size=32, epochs=10, validation_split=0.1)\n\n# Saving the model\nmodel_json = model.to_json()\nwith open(\"datasets/model.json\", \"w\") as json_file :\n\tjson_file.write(model_json)\n\nmodel.save_weights(\"datasets/model.h5\")\nprint(\"Saved model to disk\")\n\nmodel.save('datasets/CNN.model')\n\n# Printing a graph showing the accuracy changes during the training phase\nprint(history.history.keys())\nplt.figure(1)\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')"
]
| [
[
"tensorflow.keras.layers.Flatten",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.layers.Activation",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.models.Sequential"
]
]
|
rsiverd/ultracool | [
"cbeb2e0e4aee0acc9f8ed2bde7ecdf8be5fa85a1"
]
| [
"02_clean_all_spitzer.py"
]
| [
"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :\n#\n# Clean CBCD Spitzer images by removing cosmic rays and/or removing\n# large-scale background.\n#\n# NOTE: This script assumes a directory structure as created by the\n# related fetch_sha_data.py script. Specifically, the images to be\n# processed are expected to reside in a structure like:\n# object_dir/r<AOR_number>/SPITZER*_cbcd.fits\n#\n# The object_dir contains data for a specific target/sky position to\n# be reduced. <AOR_number> represents a Spitzer AOR (a visit to a sky\n# position at which data were obtained).\n#\n# Rob Siverd\n# Created: 2019-10-30\n# Last modified: 2021-04-21\n#--------------------------------------------------------------------------\n#**************************************************************************\n#--------------------------------------------------------------------------\n\n## Logging setup:\nimport logging\n#logging.basicConfig(level=logging.DEBUG)\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n#logger.setLevel(logging.DEBUG)\nlogger.setLevel(logging.INFO)\n\n## Current version:\n__version__ = \"0.5.1\"\n\n## Python version-agnostic module reloading:\ntry:\n reload # Python 2.7\nexcept NameError:\n try:\n from importlib import reload # Python 3.4+\n except ImportError:\n from imp import reload # Python 3.0 - 3.3\n\n## Modules:\nimport argparse\nimport random\nimport signal\nimport os\nimport sys\nimport time\nimport numpy as np\n#from numpy.lib.recfunctions import append_fields\n#from functools import partial\n#from collections import OrderedDict\n#from collections.abc import Iterable\n#import multiprocessing as mp\n#np.set_printoptions(suppress=True, linewidth=160)\n#import itertools as itt\n_have_np_vers = float('.'.join(np.__version__.split('.')[:2]))\n\n## LACOSMIC cosmic ray removal:\ntry:\n from lacosmic import lacosmic\nexcept ImportError:\n logger.error(\"failed to import lacosmic module!\")\n sys.exit(1)\n\n## Spitzer pipeline filesystem helpers:\ntry:\n import spitz_fs_helpers\n reload(spitz_fs_helpers)\nexcept ImportError:\n logger.error(\"failed to import spitz_fs_helpers module!\")\n sys.exit(1)\nsfh = spitz_fs_helpers\n\n## HORIZONS ephemeris tools:\ntry:\n import jpl_eph_helpers\n reload(jpl_eph_helpers)\nexcept ImportError:\n logger.error(\"failed to import jpl_eph_helpers module!\")\n sys.exit(1)\neee = jpl_eph_helpers.EphTool()\n\n## Parallax pipeline coordinate helpers:\ntry:\n import coord_helpers\n reload(coord_helpers)\nexcept ImportError:\n logger.error(\"failed to import coord_helpers module!\")\n sys.exit(1)\ncfr = coord_helpers.CoordFileReader()\nwcc = coord_helpers.WCSCoordChecker()\n\n##--------------------------------------------------------------------------##\n## Disable buffering on stdout/stderr:\nclass Unbuffered(object):\n def __init__(self, stream):\n self.stream = stream\n def write(self, data):\n self.stream.write(data)\n self.stream.flush()\n def __getattr__(self, attr):\n return getattr(self.stream, attr)\n\nsys.stdout = Unbuffered(sys.stdout)\nsys.stderr = Unbuffered(sys.stderr)\n\n##--------------------------------------------------------------------------##\n\n## Various from astropy:\ntry:\n# import astropy.io.ascii as aia\n import astropy.io.fits as pf\nexcept ImportError:\n logger.error(\"astropy module not found! Install and retry.\")\n sys.exit(1)\n\n## Star extraction:\ntry:\n import easy_sep\n reload(easy_sep)\nexcept ImportError:\n logger.error(\"easy_sep module not found! Install and retry.\")\n sys.stderr.write(\"Error: easy_sep module not found!\\n\\n\")\n sys.exit(1)\npse = easy_sep.EasySEP()\n\n##--------------------------------------------------------------------------##\n## Colors for fancy terminal output:\nNRED = '\\033[0;31m' ; BRED = '\\033[1;31m'\nNGREEN = '\\033[0;32m' ; BGREEN = '\\033[1;32m'\nNYELLOW = '\\033[0;33m' ; BYELLOW = '\\033[1;33m'\nNBLUE = '\\033[0;34m' ; BBLUE = '\\033[1;34m'\nNMAG = '\\033[0;35m' ; BMAG = '\\033[1;35m'\nNCYAN = '\\033[0;36m' ; BCYAN = '\\033[1;36m'\nNWHITE = '\\033[0;37m' ; BWHITE = '\\033[1;37m'\nENDC = '\\033[0m'\n\n## Suppress colors in cron jobs:\nif (os.getenv('FUNCDEF') == '--nocolors'):\n NRED = '' ; BRED = ''\n NGREEN = '' ; BGREEN = ''\n NYELLOW = '' ; BYELLOW = ''\n NBLUE = '' ; BBLUE = ''\n NMAG = '' ; BMAG = ''\n NCYAN = '' ; BCYAN = ''\n NWHITE = '' ; BWHITE = ''\n ENDC = ''\n\n## Fancy text:\ndegree_sign = u'\\N{DEGREE SIGN}'\n\n## Dividers:\nhalfdiv = '-' * 40\nfulldiv = '-' * 80\n\n##--------------------------------------------------------------------------##\n## Catch interruption cleanly:\ndef signal_handler(signum, frame):\n sys.stderr.write(\"\\nInterrupted!\\n\\n\")\n sys.exit(1)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n##--------------------------------------------------------------------------##\n## Save FITS image with clobber (astropy / pyfits):\ndef qsave(iname, idata, header=None, padkeys=1000, **kwargs):\n this_func = sys._getframe().f_code.co_name\n parent_func = sys._getframe(1).f_code.co_name\n sys.stderr.write(\"Writing to '%s' ... \" % iname)\n if header:\n while (len(header) < padkeys):\n header.append() # pad header\n if os.path.isfile(iname):\n os.remove(iname)\n pf.writeto(iname, idata, header=header, **kwargs)\n sys.stderr.write(\"done.\\n\")\n\n##--------------------------------------------------------------------------##\n##------------------ Parse Command Line ----------------##\n##--------------------------------------------------------------------------##\n\n## Parse arguments and run script:\nclass MyParser(argparse.ArgumentParser):\n def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)\n\n## Enable raw text AND display of defaults:\nclass CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n## Parse the command line:\nif __name__ == '__main__':\n\n # ------------------------------------------------------------------\n prog_name = os.path.basename(__file__)\n descr_txt = \"\"\"\n Clean Spitzer images by removing cosmic rays and/or large-scale\n background light.\n\n Version: %s\n \"\"\" % __version__\n parser = MyParser(prog=prog_name, description=descr_txt,\n formatter_class=argparse.RawTextHelpFormatter)\n # ------------------------------------------------------------------\n parser.set_defaults(ignore_short=True)\n parser.set_defaults(gather_headers=False)\n parser.set_defaults(delete_ignored=True)\n parser.set_defaults(skip_existing=True)\n #parser.set_defaults(diag_frac=0.25)\n parser.set_defaults(diag_frac=0.25)\n # ------------------------------------------------------------------\n #parser.add_argument('firstpos', help='first positional argument')\n #parser.add_argument('-w', '--whatever', required=False, default=5.0,\n # help='some option with default [def: %(default)s]', type=float)\n #parser.add_argument('remainder', help='other stuff', nargs='*')\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n iogroup = parser.add_argument_group('File I/O')\n iogroup.add_argument('--overwrite', required=False, dest='skip_existing',\n action='store_false', help='overwrite existing image files')\n iogroup.add_argument('-E', '--ephem_data', default=None, required=True,\n help='CSV file with SST ephemeris data', type=str)\n iogroup.add_argument('-I', '--image_folder', default=None, required=True,\n help='where to find CBCD images', type=str)\n iogroup.add_argument('-t', '--target_list', required=False, default=None,\n help='provide a list of targets of interest', type=str)\n iogroup.add_argument('-W', '--walk', default=False, action='store_true',\n help='recursively walk subfolders to find CBCD images')\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n jobgroup = parser.add_argument_group('Processing Options')\n jobgroup.add_argument('--ignore_off_target', default=False,\n help='skip images that do not cover a target position',\n action='store_true', required=False)\n jobgroup.add_argument('-r', '--random', default=False,\n help='randomize image processing order\\n(for parallel processing)',\n action='store_true', required=False)\n #iogroup.add_argument('-R', '--ref_image', default=None, required=True,\n # help='KELT image with WCS')\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n # Miscellany:\n miscgroup = parser.add_argument_group('Miscellany')\n miscgroup.add_argument('--debug', dest='debug', default=False,\n help='Enable extra debugging messages', action='store_true')\n miscgroup.add_argument('-q', '--quiet', action='count', default=0,\n help='less progress/status reporting')\n miscgroup.add_argument('-v', '--verbose', action='count', default=0,\n help='more progress/status reporting')\n # ------------------------------------------------------------------\n\n context = parser.parse_args()\n context.vlevel = 99 if context.debug else (context.verbose-context.quiet)\n context.prog_name = prog_name\n\n # header examination only needed for certain options:\n if context.ignore_off_target or context.ignore_short:\n context.gather_headers = True\n\n##--------------------------------------------------------------------------##\n##--------------------------------------------------------------------------##\n##--------------------------------------------------------------------------##\n\n## Ensure input folder exists:\nif not os.path.isdir(context.image_folder):\n sys.stderr.write(\"\\nError! Folder not found:\\n\")\n sys.stderr.write(\"--> %s\\n\\n\" % context.image_folder)\n sys.exit(1)\n\n## Get list of CBCD files:\niflav = 'cbcd'\nif context.walk:\n all_cbcd_files = sfh.get_files_walk(context.image_folder, flavor=iflav)\nelse:\n all_cbcd_files = sfh.get_files_single(context.image_folder, flavor=iflav)\nn_cbcd_initial = len(all_cbcd_files)\nuse_cbcd_files = [x for x in all_cbcd_files]\nsys.stderr.write(\"Identified %d '%s' FITS images.\\n\"\n % (n_cbcd_initial, iflav))\n\n## Stop here if nothing to process:\nif not use_cbcd_files:\n logger.warning(\"No images to process, exiting!\\n\")\n sys.exit(1)\n\n## Retrieve FITS headers if needed:\ncbcd_headers = {}\nif context.gather_headers:\n sys.stderr.write(\"Loading FITS headers for all files ... \")\n cbcd_headers = {x:pf.getheader(x) for x in all_cbcd_files}\n sys.stderr.write(\"done.\\n\")\n\n##--------------------------------------------------------------------------##\n##------------------ Target Coordinates and Checks ----------------##\n##--------------------------------------------------------------------------##\n\n## Load coordinates if provided:\ntargets = []\nif context.target_list:\n if not os.path.isfile(context.target_list):\n sys.stderr.write(\"\\nError: target list file not found:\\n\")\n sys.stderr.write(\"--> %s\\n\\n\" % context.target_list)\n sys.exit(1)\n targets += cfr.load_coords(context.target_list)\n\n## Remove off-target frames (if requested):\nif context.ignore_off_target:\n\n # halt if targets not provided:\n if not targets:\n logger.error(\"Required targets not provided.\\n\")\n sys.exit(1)\n\n sys.stderr.write(\"%s\\n\" % fulldiv)\n tik = time.time()\n keep_cbcd = []\n drop_cbcd = []\n sys.stderr.write(\"Checking for off-target frames.\\n\")\n ntotal = len(use_cbcd_files)\n\n for ii,ipath in enumerate(use_cbcd_files, 1):\n sys.stderr.write(\"\\rChecking image %d of %d ... \" % (ii, ntotal))\n thdr = cbcd_headers[ipath]\n wcc.set_header(thdr)\n #if wcc.image_covers_position_any(targets):\n if wcc.fdiag_covers_position_any(targets, dfrac=context.diag_frac):\n keep_cbcd.append(ipath)\n else:\n drop_cbcd.append(ipath)\n pass\n sys.stderr.write(\"done.\\n\")\n sys.stderr.write(\"Found %d on-target and %d off-target image(s).\\n\"\n % (len(keep_cbcd), len(drop_cbcd)))\n use_cbcd_files = [x for x in keep_cbcd]\n tok = time.time()\n sys.stderr.write(\"Off-target check took %.3f seconds.\\n\" % (tok-tik))\n\n # halt with specific warning if all images dropped:\n if not use_cbcd_files:\n logger.warning(\"No images survived off-target check.\")\n logger.warning(\"Wrong images or incomplete target list?\")\n sys.exit(1)\n\n##--------------------------------------------------------------------------##\n##------------------ Load SST Ephemeris Data ----------------##\n##--------------------------------------------------------------------------##\n\n## Ephemeris data file must exist:\nif not context.ephem_data:\n logger.error(\"context.ephem_data not set?!?!\")\n sys.exit(1)\nif not os.path.isfile(context.ephem_data):\n logger.error(\"Ephemeris file not found: %s\" % context.ephem_data)\n sys.exit(1)\n\n## Load ephemeris data:\neee.load(context.ephem_data)\n\n##--------------------------------------------------------------------------##\n##--------------------------------------------------------------------------##\n##--------------------------------------------------------------------------##\n\n\n## Inspect headers and ignore short/medium frames:\nif context.ignore_short:\n sys.stderr.write(\"%s\\n\" % fulldiv)\n tik = time.time()\n keep_cbcd = []\n drop_cbcd = []\n sys.stderr.write(\"Checking for short frames ... \")\n trouble = 'PTGCPD'\n for ipath in use_cbcd_files:\n thdr = cbcd_headers[ipath]\n if (trouble in thdr.keys()):\n drop_cbcd.append(ipath)\n else:\n keep_cbcd.append(ipath)\n pass\n sys.stderr.write(\"done. Found %d short and %d long image(s).\\n\"\n % (len(drop_cbcd), len(keep_cbcd)))\n sys.stderr.write(\"Dropped short frames!\\n\")\n use_cbcd_files = [x for x in keep_cbcd]\n tok = time.time()\n sys.stderr.write(\"Short-exposure check took %.3f seconds.\\n\" % (tok-tik))\n #with open('non_long.txt', 'w') as f:\n # f.write('\\n'.join(drop_cbcd))\n\n## Randomize image order on request (for parallel processing):\nif context.random:\n random.shuffle(use_cbcd_files) # for parallel operation\n\n## Abort with warning if no files identified:\nif not use_cbcd_files:\n sys.stderr.write(\"\\nError: no usable cbcd files found in location:\\n\")\n sys.stderr.write(\"--> %s\\n\\n\" % context.image_folder)\n sys.exit(1)\n\n\n##--------------------------------------------------------------------------##\n##------------------ Image Path Variants ----------------##\n##--------------------------------------------------------------------------##\n\ndef io_paths_from_cbcd(cbcd_ipath):\n # input paths:\n ipaths = {}\n ipaths[ 'cbcd'] = cbcd_ipath\n ipaths['cbunc'] = cbcd_ipath.replace('cbcd', 'cbunc')\n # output paths:\n opaths = {}\n opaths[ 'vmed'] = cbcd_ipath.replace('cbcd', 'vmed')\n opaths['hcfix'] = cbcd_ipath.replace('cbcd', 'hcfix')\n opaths['clean'] = cbcd_ipath.replace('cbcd', 'clean')\n opaths['crmsk'] = cbcd_ipath.replace('cbcd', 'crmsk')\n return ipaths, opaths\n\n##--------------------------------------------------------------------------##\n##------------------ Ignored Image Removal ----------------##\n##--------------------------------------------------------------------------##\n\n## Reconstruct list of ignored images:\nignored_cbcd_files = sorted(list(set(all_cbcd_files) - set(use_cbcd_files)))\nif context.delete_ignored:\n sys.stderr.write(\"%s\\n\" % fulldiv)\n sys.stderr.write(\"Removing and extant outputs from 'ignore' list ...\\n\")\n total = len(ignored_cbcd_files)\n for ii,cbcd_path in enumerate(ignored_cbcd_files, 1):\n ipaths, opaths = io_paths_from_cbcd(cbcd_path)\n sys.stderr.write(\"\\rChecking %s (%d of %d) ... \" \n % (opaths['clean'], ii, total))\n removed_last = False\n existing = [os.path.isfile(x) for x in opaths.values()]\n if any(existing):\n sys.stderr.write(\"OUTPUT FILES FOUND!\\n\")\n removed_last = True\n removals = [pp for ee,pp in zip(existing, opaths.values())]\n for thing in removals:\n sys.stderr.write(\"--> removing %s\\n\" % thing)\n os.remove(thing)\n sys.stderr.write(\"\\n\")\n pass\n pass\n if not removed_last:\n sys.stderr.write(\"done.\\n\")\n sys.stderr.write(\"Existing 'ignore' files have been removed.\\n\")\n\n\n##--------------------------------------------------------------------------##\n##--------------------------------------------------------------------------##\n\n## LA Cosmic config:\ndef fresh_cr_args():\n return {'contrast': 12.0,\n 'cr_threshold':6.0,\n 'neighbor_threshold':4.0,}\n\n## Clean up each image:\nsys.stderr.write(\"%s\\n\" % fulldiv)\nsys.stderr.write(\"Processing listed images.\\n\")\nntodo = 0\nnproc = 0\ntotal = len(use_cbcd_files)\nfor ii,cbcd_path in enumerate(use_cbcd_files, 1):\n #sys.stderr.write(\"%s\\n\" % fulldiv)\n ipaths, opaths = io_paths_from_cbcd(cbcd_path)\n sys.stderr.write(\"\\rFile %s (%d of %d) ... \" \n % (opaths['clean'], ii, total))\n done_list = list(opaths.values())\n if context.skip_existing:\n if all([os.path.isfile(x) for x in done_list]):\n sys.stderr.write(\"already done! \")\n continue\n sys.stderr.write(\"not found, processing ... \\n\")\n else:\n sys.stderr.write(\"processing ... \\n\")\n nproc += 1\n\n # load data:\n idata, ihdrs = pf.getdata(ipaths[ 'cbcd'], header=True)\n udata, uhdrs = pf.getdata(ipaths['cbunc'], header=True)\n #fdata, fhdrs = fitsio.read(img_ipath, header=True)\n\n # augment header with ephemeris data:\n addme = eee.make_header_keys(ipaths['cbcd'], as_basename=True)\n if not addme:\n sys.stderr.write(\"Failed to load ephemeris data!\\n\")\n sys.exit(1)\n ihdrs.extend(addme)\n\n # get median image value:\n ignore = np.isnan(idata) | np.isinf(idata)\n medval = np.median(idata[~ignore])\n\n # vertical median to fix hot columns:\n itemp = idata.copy()\n itemp[ignore] = medval\n vstack = np.median(itemp, axis=0)\n #qsave(vst_ipath, vstack)\n qsave(opaths['vmed'], vstack)\n idata -= vstack[np.newaxis, :]\n #qsave(hcf_ipath, idata, header=ihdrs)\n qsave(opaths['hcfix'], idata, header=ihdrs)\n\n # CR removal:\n lakw = fresh_cr_args()\n lakw['mask'] = np.isnan(idata)\n lakw['error'] = udata\n sys.stderr.write(\"Running LACOSMIC ... \")\n tik = time.time()\n cleaned, cr_mask = lacosmic(idata, **lakw)\n tok = time.time()\n sys.stderr.write(\"done. (%.3f s)\\n\" % (tok-tik))\n \n # save results:\n #qsave(cln_ipath, cleaned, header=ihdrs)\n #qsave(msk_ipath, cr_mask.astype('uint8'), header=ihdrs)\n qsave(opaths['clean'], cleaned, header=ihdrs)\n qsave(opaths['crmsk'], cr_mask.astype('uint8'), header=ihdrs)\n #fitsio.write(msk_ipath, cr_mask.astype('uint8'), header=fhdrs, \n # clobber=True, compress='RICE')\n sys.stderr.write(\"%s\\n\" % fulldiv)\n\n if (ntodo > 0) and (nproc >= ntodo):\n break\n \nsys.stderr.write(\"\\n\")\n\n\n\n\n\n\n######################################################################\n# CHANGELOG (02_clean_all_spitzer.py):\n#---------------------------------------------------------------------\n#\n# 2019-10-30:\n# -- Increased __version__ to 0.1.0.\n# -- First created 02_clean_all_spitzer.py.\n#\n"
]
| [
[
"numpy.median",
"numpy.isnan",
"numpy.isinf",
"numpy.__version__.split"
]
]
|
YosephKS/qiskit-terra | [
"3a8e289a26073b16e9bf434b5b2e13cab78b0fa7"
]
| [
"test/python/qobj/test_pulse_converter.py"
]
| [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Converter Test.\"\"\"\n\nimport hashlib\nimport numpy as np\n\nfrom qiskit.pulse import LoConfig, Kernel, Discriminator\nfrom qiskit.pulse.channels import (DriveChannel, ControlChannel, MeasureChannel, AcquireChannel,\n MemorySlot, RegisterSlot)\nfrom qiskit.pulse.instructions import (SetPhase, ShiftPhase, SetFrequency, ShiftFrequency, Play,\n Delay, Acquire, Snapshot)\nfrom qiskit.pulse.library import Waveform, Gaussian, GaussianSquare, Constant, Drag\nfrom qiskit.pulse.schedule import ParameterizedSchedule, Schedule\nfrom qiskit.qobj import (PulseQobjInstruction, PulseQobjExperimentConfig, PulseLibraryItem,\n QobjMeasurementOption)\nfrom qiskit.qobj.converters import (InstructionToQobjConverter, QobjToInstructionConverter,\n LoConfigConverter)\nfrom qiskit.test import QiskitTestCase\n\n\nclass TestInstructionToQobjConverter(QiskitTestCase):\n \"\"\"Pulse converter tests.\"\"\"\n\n def test_drive_instruction(self):\n \"\"\"Test converted qobj from Play.\"\"\"\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n instruction = Play(Waveform(np.arange(0, 0.01), name='linear'), DriveChannel(0))\n valid_qobj = PulseQobjInstruction(\n name='linear',\n ch='d0',\n t0=0)\n self.assertEqual(converter(0, instruction), valid_qobj)\n\n def test_gaussian_pulse_instruction(self):\n \"\"\"Test that parametric pulses are correctly converted to PulseQobjInstructions.\"\"\"\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n instruction = Play(Gaussian(duration=25, sigma=15, amp=-0.5 + 0.2j), DriveChannel(0))\n valid_qobj = PulseQobjInstruction(\n name='parametric_pulse',\n pulse_shape='gaussian',\n ch='d0',\n t0=0,\n parameters={'duration': 25, 'sigma': 15, 'amp': -0.5 + 0.2j})\n self.assertEqual(converter(0, instruction), valid_qobj)\n\n def test_gaussian_square_pulse_instruction(self):\n \"\"\"Test that parametric pulses are correctly converted to PulseQobjInstructions.\"\"\"\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n instruction = Play(GaussianSquare(duration=1500, sigma=15, amp=-0.5 + 0.2j, width=1300),\n MeasureChannel(1))\n\n valid_qobj = PulseQobjInstruction(\n name='parametric_pulse',\n pulse_shape='gaussian_square',\n ch='m1',\n t0=10,\n parameters={'duration': 1500, 'sigma': 15, 'amp': -0.5 + 0.2j, 'width': 1300})\n self.assertEqual(converter(10, instruction), valid_qobj)\n\n def test_constant_pulse_instruction(self):\n \"\"\"Test that parametric pulses are correctly converted to PulseQobjInstructions.\"\"\"\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n instruction = Play(Constant(duration=25, amp=1), ControlChannel(2))\n\n valid_qobj = PulseQobjInstruction(\n name='parametric_pulse',\n pulse_shape='constant',\n ch='u2',\n t0=20,\n parameters={'duration': 25, 'amp': 1})\n self.assertEqual(converter(20, instruction), valid_qobj)\n\n def test_drag_pulse_instruction(self):\n \"\"\"Test that parametric pulses are correctly converted to PulseQobjInstructions.\"\"\"\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n instruction = Play(Drag(duration=25, sigma=15, amp=-0.5 + 0.2j, beta=0.5), DriveChannel(0))\n\n valid_qobj = PulseQobjInstruction(\n name='parametric_pulse',\n pulse_shape='drag',\n ch='d0',\n t0=30,\n parameters={'duration': 25, 'sigma': 15, 'amp': -0.5 + 0.2j, 'beta': 0.5})\n self.assertEqual(converter(30, instruction), valid_qobj)\n\n def test_frame_change(self):\n \"\"\"Test converted qobj from ShiftPhase.\"\"\"\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n valid_qobj = PulseQobjInstruction(\n name='fc',\n ch='d0',\n t0=0,\n phase=0.1\n )\n instruction = ShiftPhase(0.1, DriveChannel(0))\n self.assertEqual(converter(0, instruction), valid_qobj)\n\n def test_set_phase(self):\n \"\"\"Test converted qobj from ShiftPhase.\"\"\"\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n instruction = SetPhase(3.14, DriveChannel(0))\n\n valid_qobj = PulseQobjInstruction(\n name='setp',\n ch='d0',\n t0=0,\n phase=3.14\n )\n\n self.assertEqual(converter(0, instruction), valid_qobj)\n\n def test_set_frequency(self):\n \"\"\"Test converted qobj from SetFrequency.\"\"\"\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n instruction = SetFrequency(8.0e9, DriveChannel(0))\n\n valid_qobj = PulseQobjInstruction(\n name='setf',\n ch='d0',\n t0=0,\n frequency=8.0\n )\n\n self.assertEqual(converter(0, instruction), valid_qobj)\n\n def test_shift_frequency(self):\n \"\"\"Test converted qobj from ShiftFrequency.\"\"\"\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n instruction = ShiftFrequency(8.0e9, DriveChannel(0))\n\n valid_qobj = PulseQobjInstruction(\n name='shiftf',\n ch='d0',\n t0=0,\n frequency=8.0\n )\n\n self.assertEqual(converter(0, instruction), valid_qobj)\n\n def test_acquire(self):\n \"\"\"Test converted qobj from AcquireInstruction.\"\"\"\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n instruction = Acquire(10, AcquireChannel(0), MemorySlot(0), RegisterSlot(0))\n valid_qobj = PulseQobjInstruction(\n name='acquire',\n t0=0,\n duration=10,\n qubits=[0],\n memory_slot=[0],\n register_slot=[0])\n self.assertEqual(converter(0, instruction), valid_qobj)\n\n # without register\n instruction = Acquire(10, AcquireChannel(0), MemorySlot(0))\n valid_qobj = PulseQobjInstruction(\n name='acquire',\n t0=0,\n duration=10,\n qubits=[0],\n memory_slot=[0])\n self.assertEqual(converter(0, instruction), valid_qobj)\n\n def test_snapshot(self):\n \"\"\"Test converted qobj from Snapshot.\"\"\"\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n instruction = Snapshot(label='label', snapshot_type='type')\n\n valid_qobj = PulseQobjInstruction(\n name='snapshot',\n t0=0,\n label='label',\n type='type'\n )\n\n self.assertEqual(converter(0, instruction), valid_qobj)\n\n\nclass TestQobjToInstructionConverter(QiskitTestCase):\n \"\"\"Pulse converter tests.\"\"\"\n\n def setUp(self):\n super().setUp()\n self.linear = Waveform(np.arange(0, 0.01), name='linear')\n self.pulse_library = [PulseLibraryItem(name=self.linear.name,\n samples=self.linear.samples.tolist())]\n\n self.converter = QobjToInstructionConverter(self.pulse_library, buffer=0)\n self.num_qubits = 2\n\n def test_drive_instruction(self):\n \"\"\"Test converted qobj from PulseInstruction.\"\"\"\n instruction = Play(self.linear, DriveChannel(0))\n qobj = PulseQobjInstruction(name='linear', ch='d0', t0=10)\n converted_instruction = self.converter(qobj)\n self.assertEqual(converted_instruction.instructions[0][-1], instruction)\n\n def test_parametric_pulses(self):\n \"\"\"Test converted qobj from ParametricInstruction.\"\"\"\n instruction = Play(Gaussian(duration=25, sigma=15, amp=-0.5 + 0.2j, name='pulse1'),\n DriveChannel(0))\n qobj = PulseQobjInstruction(\n name='parametric_pulse',\n label='pulse1',\n pulse_shape='gaussian',\n ch='d0',\n t0=0,\n parameters={'duration': 25, 'sigma': 15, 'amp': -0.5 + 0.2j})\n converted_instruction = self.converter(qobj)\n self.assertEqual(converted_instruction.start_time, 0)\n self.assertEqual(converted_instruction.duration, 25)\n self.assertEqual(converted_instruction.instructions[0][-1], instruction)\n self.assertEqual(converted_instruction.instructions[0][-1].pulse.name, 'pulse1')\n\n def test_parametric_pulses_no_label(self):\n \"\"\"Test converted qobj from ParametricInstruction without label.\"\"\"\n base_str = \"gaussian_[('amp', (-0.5+0.2j)), ('duration', 25), ('sigma', 15)]\"\n short_pulse_id = hashlib.md5(base_str.encode('utf-8')).hexdigest()[:4]\n pulse_name = 'gaussian_{}'.format(short_pulse_id)\n\n qobj = PulseQobjInstruction(\n name='parametric_pulse',\n pulse_shape='gaussian',\n ch='d0',\n t0=0,\n parameters={'duration': 25, 'sigma': 15, 'amp': -0.5 + 0.2j})\n converted_instruction = self.converter(qobj)\n self.assertEqual(converted_instruction.instructions[0][-1].pulse.name, pulse_name)\n\n def test_frame_change(self):\n \"\"\"Test converted qobj from ShiftPhase.\"\"\"\n qobj = PulseQobjInstruction(name='fc', ch='m0', t0=0, phase=0.1)\n converted_instruction = self.converter(qobj)\n\n instruction = ShiftPhase(0.1, MeasureChannel(0))\n self.assertEqual(converted_instruction.start_time, 0)\n self.assertEqual(converted_instruction.duration, 0)\n self.assertEqual(converted_instruction.instructions[0][-1], instruction)\n\n def test_parameterized_frame_change(self):\n \"\"\"Test converted qobj from ShiftPhase.\"\"\"\n instruction = ShiftPhase(4., MeasureChannel(0))\n shifted = instruction << 10\n\n qobj = PulseQobjInstruction(name='fc', ch='m0', t0=10, phase='P1**2')\n converted_instruction = self.converter(qobj)\n\n self.assertIsInstance(converted_instruction, ParameterizedSchedule)\n\n evaluated_instruction = converted_instruction.bind_parameters(2.)\n\n self.assertEqual(evaluated_instruction.start_time, shifted.start_time)\n self.assertEqual(evaluated_instruction.duration, shifted.duration)\n self.assertEqual(evaluated_instruction.instructions[0][-1], instruction)\n\n def test_set_phase(self):\n \"\"\"Test converted qobj from SetPhase.\"\"\"\n qobj = PulseQobjInstruction(name='setp', ch='m0', t0=0, phase=3.14)\n converted_instruction = self.converter(qobj)\n\n instruction = SetPhase(3.14, MeasureChannel(0))\n self.assertEqual(converted_instruction.start_time, 0)\n self.assertEqual(converted_instruction.duration, 0)\n self.assertEqual(converted_instruction.instructions[0][-1], instruction)\n\n def test_parameterized_set_phase(self):\n \"\"\"Test converted qobj from SetPhase, with parameterized phase.\"\"\"\n qobj = PulseQobjInstruction(name='setp', ch='m0', t0=0, phase='p/2')\n converted_instruction = self.converter(qobj)\n self.assertIsInstance(converted_instruction, ParameterizedSchedule)\n\n evaluated_instruction = converted_instruction.bind_parameters(3.14)\n\n instruction = SetPhase(3.14 / 2, MeasureChannel(0))\n self.assertEqual(evaluated_instruction.start_time, 0)\n self.assertEqual(evaluated_instruction.duration, 0)\n self.assertEqual(evaluated_instruction.instructions[0][-1], instruction)\n\n def test_set_frequency(self):\n \"\"\"Test converted qobj from SetFrequency.\"\"\"\n instruction = SetFrequency(8.0e9, DriveChannel(0))\n\n qobj = PulseQobjInstruction(name='setf', ch='d0', t0=0, frequency=8.0)\n converted_instruction = self.converter(qobj)\n\n self.assertEqual(converted_instruction.start_time, 0)\n self.assertEqual(converted_instruction.duration, 0)\n self.assertEqual(converted_instruction.instructions[0][-1], instruction)\n self.assertTrue('frequency' in qobj.to_dict())\n\n def test_parameterized_set_frequency(self):\n \"\"\"Test converted qobj from SetFrequency, when passing a parameterized frequency.\"\"\"\n qobj = PulseQobjInstruction(name='setf', ch='d0', t0=2, frequency='f')\n self.assertTrue('frequency' in qobj.to_dict())\n\n converted_instruction = self.converter(qobj)\n self.assertIsInstance(converted_instruction, ParameterizedSchedule)\n\n evaluated_instruction = converted_instruction.bind_parameters(2.)\n\n instruction = SetFrequency(2.e9, DriveChannel(0))\n\n self.assertEqual(evaluated_instruction.start_time, 2)\n self.assertEqual(evaluated_instruction.duration, 2)\n self.assertEqual(evaluated_instruction.instructions[0][-1], instruction)\n\n def test_shift_frequency(self):\n \"\"\"Test converted qobj from ShiftFrequency.\"\"\"\n instruction = ShiftFrequency(8.0e9, DriveChannel(0))\n\n qobj = PulseQobjInstruction(name='shiftf', ch='d0', t0=0, frequency=8.0)\n converted_instruction = self.converter(qobj)\n\n self.assertEqual(converted_instruction.start_time, 0)\n self.assertEqual(converted_instruction.duration, 0)\n self.assertEqual(converted_instruction.instructions[0][-1], instruction)\n self.assertTrue('frequency' in qobj.to_dict())\n\n def test_parameterized_shift_frequency(self):\n \"\"\"Test converted qobj from ShiftFrequency, with a parameterized frequency.\"\"\"\n instruction = ShiftFrequency(8.0e9, DriveChannel(0))\n\n qobj = PulseQobjInstruction(name='shiftf', ch='d0', t0=1, frequency='f / 1000')\n self.assertTrue('frequency' in qobj.to_dict())\n\n converted_instruction = self.converter(qobj)\n self.assertIsInstance(converted_instruction, ParameterizedSchedule)\n\n evaluated_instruction = converted_instruction.bind_parameters(3.14)\n\n instruction = ShiftFrequency(3.14e6, DriveChannel(0))\n\n self.assertEqual(evaluated_instruction.start_time, 1)\n self.assertEqual(evaluated_instruction.duration, 1)\n self.assertEqual(evaluated_instruction.instructions[0][-1], instruction)\n\n def test_delay(self):\n \"\"\"Test converted qobj from Delay.\"\"\"\n instruction = Delay(10, DriveChannel(0))\n\n qobj = PulseQobjInstruction(name='delay', ch='d0', t0=0, duration=10)\n converted_instruction = self.converter(qobj)\n\n self.assertTrue('delay' in qobj.to_dict().values())\n self.assertEqual(converted_instruction.duration, instruction.duration)\n self.assertEqual(converted_instruction.instructions[0][-1], instruction)\n\n def test_acquire(self):\n \"\"\"Test converted qobj from Acquire.\"\"\"\n schedule = Schedule()\n for i in range(self.num_qubits):\n schedule |= Acquire(10, AcquireChannel(i), MemorySlot(i), RegisterSlot(i),\n kernel=Kernel(name='test_kern', test_params='test'),\n discriminator=Discriminator(name='test_disc',\n test_params=1.0))\n\n qobj = PulseQobjInstruction(name='acquire', t0=0, duration=10, qubits=[0, 1],\n memory_slot=[0, 1], register_slot=[0, 1],\n kernels=[QobjMeasurementOption(\n name='test_kern', params={'test_params': 'test'})],\n discriminators=[QobjMeasurementOption(\n name='test_disc', params={'test_params': 1.0})])\n converted_instruction = self.converter(qobj)\n\n self.assertEqual(converted_instruction.start_time, 0)\n self.assertEqual(converted_instruction.duration, 10)\n self.assertEqual(converted_instruction.instructions[0][-1].duration, 10)\n self.assertEqual(converted_instruction.instructions[0][-1].kernel.params,\n {'test_params': 'test'})\n self.assertEqual(converted_instruction.instructions[1][-1].channel, AcquireChannel(1))\n\n def test_snapshot(self):\n \"\"\"Test converted qobj from SnapShot.\"\"\"\n instruction = Snapshot(label='label', snapshot_type='type')\n shifted = instruction << 10\n\n qobj = PulseQobjInstruction(name='snapshot', t0=10, label='label', type='type')\n converted_instruction = self.converter(qobj)\n\n self.assertEqual(converted_instruction.start_time, shifted.start_time)\n self.assertEqual(converted_instruction.duration, shifted.duration)\n self.assertEqual(converted_instruction.instructions[0][-1], instruction)\n\n\nclass TestLoConverter(QiskitTestCase):\n \"\"\"LO converter tests.\"\"\"\n\n def test_qubit_los(self):\n \"\"\"Test qubit channel configuration.\"\"\"\n user_lo_config = LoConfig({DriveChannel(0): 1.3e9})\n converter = LoConfigConverter(PulseQobjExperimentConfig,\n [1.2e9], [3.4e9], [(0., 5e9)], [(0., 5e9)])\n\n valid_qobj = PulseQobjExperimentConfig(qubit_lo_freq=[1.3])\n\n self.assertEqual(converter(user_lo_config), valid_qobj)\n\n def test_meas_los(self):\n \"\"\"Test measurement channel configuration.\"\"\"\n user_lo_config = LoConfig({MeasureChannel(0): 3.5e9})\n converter = LoConfigConverter(PulseQobjExperimentConfig,\n [1.2e9], [3.4e9], [(0., 5e9)], [(0., 5e9)])\n\n valid_qobj = PulseQobjExperimentConfig(meas_lo_freq=[3.5])\n\n self.assertEqual(converter(user_lo_config), valid_qobj)\n"
]
| [
[
"numpy.arange"
]
]
|
varshakirani/thesis | [
"73e832688bac32d89bd4c2f563db8cf980b22712"
]
| [
"src/visualization.py"
]
| [
"import os\nimport json\nfrom argparse import ArgumentParser\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import array\nfrom statistics import mean\n\n\n\ndef parse_options():\n parser = ArgumentParser()\n parser.add_argument(\"-o\", \"--output\", required=False,\n default=\"outputs\", type=str,\n help=\"Path to output folder\")\n\n options = parser.parse_args()\n return options\n\n\ndef main():\n print(\"Visualization of cross validation score\")\n options = parse_options()\n classes = [\"12\", \"23\", \"31\", \"123\"]\n\n print(\"Visualization of Training scores\")\n fig, axes = plt.subplots(nrows=2, ncols=2)\n axs = axes.ravel()\n i = 0\n for c in classes:\n\n results_train = {}\n\n for json_file in os.listdir(options.output):\n data_type = json_file.split(\".\")[0].split(\"_\")[-2]\n\n if json_file.split(\".\")[0].split(\"_\")[-1] == c and data_type == \"train\":\n model_name = json_file.split(\".\")[0][8:]\n results_train[model_name] = json.load(open(options.output+\"/\"+json_file))\n for model, scores in results_train.items():\n scores = array(scores)\n mean_scores = scores.mean(axis=1)\n axs[i].hist(mean_scores, 100, label=\"%s. Mean:%s\" % (model, mean_scores.mean(axis=0)))\n axs[i].legend(loc = \"upper right\")\n axs[i].set_title(c)\n\n i = i+1\n plt.suptitle(\"Training Scores\")\n mng = plt.get_current_fig_manager()\n mng.resize(*mng.window.maxsize())\n\n plt.savefig(\"out/visualization/train_hist.png\")\n plt.show()\n\n print(\"Visualization of testing scores\")\n fig, axes = plt.subplots(nrows=2, ncols=2)\n axs = axes.ravel()\n i = 0\n\n for c in classes:\n\n results_test = {}\n\n for json_file in os.listdir(options.output):\n data_type = json_file.split(\".\")[0].split(\"_\")[-2]\n\n if json_file.split(\".\")[0].split(\"_\")[-1] == c and data_type == \"test\":\n\n model_name = json_file.split(\".\")[0][8:]\n results_test[model_name] = json.load(open(options.output + \"/\" + json_file))\n\n for model, scores in results_test.items():\n scores = array(scores)\n axs[i].hist(scores, 100, label=\"%s. Mean:%s\" % (model, mean(scores)))\n axs[i].legend(loc=\"upper right\")\n axs[i].set_title(c)\n\n i = i + 1\n plt.suptitle(\"Testing Scores\")\n mng = plt.get_current_fig_manager()\n mng.resize(*mng.window.maxsize())\n\n plt.savefig(\"out/visualization/test_hist.png\")\n plt.show()\n print(\"Visualization of both train and test \")\n fig, axes = plt.subplots(nrows=4, ncols=3)\n axs = axes.ravel()\n i = 0\n\n results_test = {}\n results_train = {}\n for json_file in os.listdir(options.output):\n data_type = json_file.split(\".\")[0].split(\"_\")[-2]\n if data_type == \"train\":\n model_name = json_file.split(\".\")[0][8:]\n results_train[model_name] = json.load(open(options.output + \"/\" + json_file))\n\n elif data_type == \"test\":\n model_name = json_file.split(\".\")[0][8:]\n results_test[model_name] = json.load(open(options.output + \"/\" + json_file))\n\n for (model_train, scores_train), (model_test, scores_test) in zip(results_train.items(), results_test.items()):\n class_det = model_train.split(\"_\")[-1]\n model_name = model_train.split(\"_\")[0:-2]\n model_name = \" \".join(model_name)\n model_name = model_name + \" \" + class_det\n axs[i].plot(array(scores_train).mean(axis=1), color='green', alpha=0.8, label='Train %s'%(mean(array(scores_train).mean(axis=1))))\n axs[i].plot(array(scores_test), color='magenta', alpha=0.8, label='Test %s'%(mean(scores_test)))\n axs[i].set_title(\"%s\" % (model_name), fontsize=14)\n # axs[i].xlabel('Epochs')\n axs[i].legend(loc='upper left')\n i = i + 1\n\n plt.suptitle(\"Both Training and testing\")\n\n mng = plt.get_current_fig_manager()\n mng.resize(*mng.window.maxsize())\n\n plt.savefig(\"out/visualization/train_test.png\")\n plt.show()\n\n\n####################\n fig, axes = plt.subplots(nrows=2, ncols=2)\n axs = axes.ravel()\n i = 0\n\n for c in classes:\n\n results_test = {}\n\n for json_file in os.listdir(options.output):\n data_type = json_file.split(\".\")[0].split(\"_\")[-2]\n\n if json_file.split(\".\")[0].split(\"_\")[-1] == c and data_type == \"test\":\n\n model_name = json_file.split(\".\")[0][8:]\n results_test[model_name] = json.load(open(options.output + \"/\" + json_file))\n\n for model, scores in results_test.items():\n scores = np.array(scores)\n x_values = np.arange(len(scores))\n label_str = \"%s. Mean:%s\" % (' '.join(map(str, model.split(\"_\")[:-2])), mean(scores))\n axs[i].plot(x_values, scores, label=label_str)\n axs[i].legend(loc=\"upper right\")\n axs[i].set_title(c)\n axs[i].set_ylim([0, 1])\n\n i = i + 1\n plt.suptitle(\"Testing Scores\")\n mng = plt.get_current_fig_manager()\n mng.resize(*mng.window.maxsize())\n\n plt.savefig(\"out/visualization/test_line.png\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()"
]
| [
[
"numpy.array",
"matplotlib.pyplot.get_current_fig_manager",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
]
]
|
UBCDingXin/RepDistiller | [
"dcc043277f2820efafd679ffb82b8e8195b7e222"
]
| [
"helper/loops.py"
]
| [
"from __future__ import print_function, division\n\nimport sys\nimport time\nimport torch\n\nfrom .util import AverageMeter, accuracy\n\n\ndef train_vanilla(epoch, train_loader, model, criterion, optimizer, opt):\n \"\"\"vanilla training\"\"\"\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n end = time.time()\n for idx, (input, target) in enumerate(train_loader):\n data_time.update(time.time() - end)\n\n input = input.float()\n if torch.cuda.is_available():\n input = input.cuda()\n target = target.cuda()\n\n # ===================forward=====================\n output = model(input)\n loss = criterion(output, target)\n\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # ===================backward=====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # ===================meters=====================\n batch_time.update(time.time() - end)\n end = time.time()\n\n # tensorboard logger\n pass\n\n # print info\n if idx % opt.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, idx, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n sys.stdout.flush()\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, losses.avg\n\n\ndef train_distill(epoch, train_loader, module_list, criterion_list, optimizer, opt):\n \"\"\"One epoch distillation\"\"\"\n # set modules as train()\n for module in module_list:\n module.train()\n # set teacher as eval()\n module_list[-1].eval()\n\n if opt.distill == 'abound':\n module_list[1].eval()\n elif opt.distill == 'factor':\n module_list[2].eval()\n\n criterion_cls = criterion_list[0]\n criterion_div = criterion_list[1]\n criterion_kd = criterion_list[2]\n\n model_s = module_list[0]\n model_t = module_list[-1]\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n end = time.time()\n for idx, data in enumerate(train_loader):\n if opt.distill in ['crd']:\n input, target, index, contrast_idx = data\n else:\n input, target, index = data\n data_time.update(time.time() - end)\n\n input = input.float()\n if torch.cuda.is_available():\n input = input.cuda()\n target = target.cuda()\n index = index.cuda()\n if opt.distill in ['crd']:\n contrast_idx = contrast_idx.cuda()\n\n # ===================forward=====================\n preact = False\n if opt.distill in ['abound']:\n preact = True\n feat_s, logit_s = model_s(input, is_feat=True, preact=preact)\n with torch.no_grad():\n feat_t, logit_t = model_t(input, is_feat=True, preact=preact)\n feat_t = [f.detach() for f in feat_t]\n\n # cls + kl div\n loss_cls = criterion_cls(logit_s, target)\n loss_div = criterion_div(logit_s, logit_t)\n\n # other kd beyond KL divergence\n if opt.distill == 'kd':\n loss_kd = 0\n elif opt.distill == 'hint':\n f_s = module_list[1](feat_s[opt.hint_layer])\n f_t = feat_t[opt.hint_layer]\n loss_kd = criterion_kd(f_s, f_t)\n elif opt.distill == 'crd':\n f_s = feat_s[-1]\n f_t = feat_t[-1]\n loss_kd = criterion_kd(f_s, f_t, index, contrast_idx)\n elif opt.distill == 'attention':\n g_s = feat_s[1:-1]\n g_t = feat_t[1:-1]\n loss_group = criterion_kd(g_s, g_t)\n loss_kd = sum(loss_group)\n elif opt.distill == 'nst':\n g_s = feat_s[1:-1]\n g_t = feat_t[1:-1]\n loss_group = criterion_kd(g_s, g_t)\n loss_kd = sum(loss_group)\n elif opt.distill == 'similarity':\n g_s = [feat_s[-2]]\n g_t = [feat_t[-2]]\n loss_group = criterion_kd(g_s, g_t)\n loss_kd = sum(loss_group)\n elif opt.distill == 'rkd':\n f_s = feat_s[-1]\n f_t = feat_t[-1]\n loss_kd = criterion_kd(f_s, f_t)\n elif opt.distill == 'pkt':\n f_s = feat_s[-1]\n f_t = feat_t[-1]\n loss_kd = criterion_kd(f_s, f_t)\n elif opt.distill == 'kdsvd':\n g_s = feat_s[1:-1]\n g_t = feat_t[1:-1]\n loss_group = criterion_kd(g_s, g_t)\n loss_kd = sum(loss_group)\n elif opt.distill == 'correlation':\n f_s = module_list[1](feat_s[-1])\n f_t = module_list[2](feat_t[-1])\n loss_kd = criterion_kd(f_s, f_t)\n elif opt.distill == 'vid':\n g_s = feat_s[1:-1]\n g_t = feat_t[1:-1]\n loss_group = [c(f_s, f_t) for f_s, f_t, c in zip(g_s, g_t, criterion_kd)]\n loss_kd = sum(loss_group)\n elif opt.distill == 'abound':\n # can also add loss to this stage\n loss_kd = 0\n elif opt.distill == 'fsp':\n # can also add loss to this stage\n loss_kd = 0\n elif opt.distill == 'factor':\n factor_s = module_list[1](feat_s[-2])\n factor_t = module_list[2](feat_t[-2], is_factor=True)\n loss_kd = criterion_kd(factor_s, factor_t)\n else:\n raise NotImplementedError(opt.distill)\n\n loss = opt.gamma * loss_cls + opt.alpha * loss_div + opt.beta * loss_kd\n\n acc1, acc5 = accuracy(logit_s, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # ===================backward=====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # ===================meters=====================\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print info\n if idx % opt.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, idx, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n sys.stdout.flush()\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, losses.avg\n\n\ndef validate(val_loader, model, criterion, opt):\n \"\"\"validation\"\"\"\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for idx, (input, target) in enumerate(val_loader):\n\n input = input.float()\n if torch.cuda.is_available():\n input = input.cuda()\n target = target.cuda()\n\n # compute output\n output = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if idx % opt.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n idx, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg, losses.avg\n"
]
| [
[
"torch.no_grad",
"torch.cuda.is_available"
]
]
|
xlrshop/Parl | [
"5171ed0a3b555846cb5174c99bc6faace3ac1d99"
]
| [
"parlai/core/pytorch_data_teacher.py"
]
| [
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\"\"\"\n (NOTE: To use this class, please follow the tutorial here:\n http://parl.ai/static/docs/tutorial_worlds.html#multiprocessed-pytorch-dataloader)\n\n\"\"\"\nfrom .teachers import FixedDialogTeacher\nfrom parlai.scripts.build_pytorch_data import build_data\nfrom .agents import get_agent_module\nimport json\nimport math\nimport random\nfrom functools import wraps\nimport importlib\nimport copy\ntry:\n import torch\nexcept Exception as e:\n raise ImportError('Need to install Pytorch: go to pytorch.org')\nfrom torch.utils.data import Dataset, DataLoader, sampler\nfrom torch.multiprocessing import Lock, Value\nimport ctypes\nfrom threading import Thread, Condition, RLock\n\n\n'''\n Maps episode length to dictionary with following keys:\n current_idx: which episode in the list are we at (if simply indexing\n into list)\n ep_list: list of episodes of the length of the key\n bucket_complete: if there are no more episodes left to consider in\n the bucket\n'''\nlength_to_eps = {} # Maps episode length to list\n # of episodes\nbatches = [] # List of batches if popping\n # batches\nload_complete = Value(ctypes.c_bool, False) # If all episodes have been\n # loaded into memory\nbatches_lock = Lock() # Lock to access batches\ncache_lock = Lock() # Lock to access length_to_eps\nfill_cache_lock = RLock() # Lock for condition variables\nadd_to_cache_cv = Condition(lock=fill_cache_lock) # Condition notifying Loader\n # to add to cache\ncache_filled_cv = Condition(lock=fill_cache_lock) # Condition notifying teacher\n # that cache has episodes\n\n\ndef batch_cache(function):\n max_cache_size = 10000 # Max unseen eps\n min_cache_size = 1000 # Min unseen eps\n\n def get_cache_size():\n '''Returns number of available episodes '''\n return sum(len(v['ep_list']) - v['current_idx']for k, v in length_to_eps.items())\n\n def get_available_buckets(bsz):\n '''Returns buckets where there are enough episodes for a batch'''\n if load_complete.value:\n return {k: v for k, v in length_to_eps.items() if not v['bucket_complete'] or len(v['ep_list']) - v['current_idx'] > 0}\n else:\n return {k: v for k, v in length_to_eps.items() if len(v['ep_list']) - v['current_idx'] >= bsz}\n\n def reset():\n '''Resets the indices into the buckets'''\n with cache_lock:\n for idx in length_to_eps:\n length_to_eps[idx]['current_idx'] = 0\n length_to_eps[idx]['bucket_complete'] = False\n\n def consolidate(caller):\n '''Consolidate remaining episodes into batches'''\n load_complete.value = True\n bsz = caller.bsz\n batch = []\n sorted_lengths = sorted(length_to_eps.keys())\n with cache_lock:\n if caller.batch_cache_type == 'index':\n for length in sorted_lengths:\n current_idx = length_to_eps[length]['current_idx']\n ep_list = length_to_eps[length]['ep_list']\n unseen_eps = ep_list[current_idx:]\n length_to_eps[length]['ep_list'] = ep_list[:current_idx]\n batch = unseen_eps + batch\n while len(batch) >= bsz:\n length_to_eps[length]['ep_list'] += batch[:bsz]\n batch = batch[bsz:]\n if len(batch) > 0:\n length_to_eps[-1] = {\n 'current_idx': 0,\n 'ep_list': batch,\n 'bucket_complete': False\n }\n elif caller.batch_cache_type == 'pop':\n for length in sorted_lengths:\n batch += length_to_eps[length]['ep_list']\n with batches_lock:\n while len(batch) >= bsz:\n batches.append(batch[:bsz])\n batch = batch[bsz:]\n if len(batch) > 0:\n with batches_lock:\n batches.append(batch)\n\n def flatten(l):\n '''Helper function for flattening a list'''\n return [item for sublist in l for item in sublist]\n\n def put_in_cache(ep_idx, episode, caller):\n '''Put episode `ep_idx` into cache'''\n length = episode['text'].count(' ')\n lengths = [length] + flatten([[length + i, length + (i * -1)] for i in range(1, caller.batch_length_range)])\n lengths = [max(i, 1) for i in lengths]\n in_cache = False\n for l in lengths:\n if l in length_to_eps:\n with cache_lock:\n length_to_eps[l]['ep_list'] += [(ep_idx, episode)]\n in_cache = True\n break\n if not in_cache:\n with cache_lock:\n length_to_eps[length] = {\n 'current_idx': 0,\n 'ep_list': [(ep_idx, episode)],\n 'bucket_complete': False\n }\n if ep_idx == caller.dataset.num_episodes() - 1:\n consolidate(caller)\n with add_to_cache_cv:\n cache_filled_cv.notify_all()\n\n @wraps(function)\n def wrapper(*args):\n caller = args[0]\n batch_cache_type = caller.batch_cache_type\n bsz = caller.bsz\n if batch_cache_type == 'none' or not caller.datatype.startswith('train'):\n return function(*args)\n # If Loader, put episodes in cache\n if isinstance(caller, LoaderProcess):\n with add_to_cache_cv:\n while get_cache_size() >= max_cache_size and len(get_available_buckets(bsz)) > 0:\n cache_filled_cv.notify_all()\n add_to_cache_cv.wait()\n idx_and_batch = function(*args)\n if idx_and_batch is None:\n return None\n for ep_index, ep in idx_and_batch[1]:\n put_in_cache(ep_index, ep, caller)\n return idx_and_batch\n # If teacher, return batch of episodes\n else:\n teacher = caller\n num_batches = teacher.num_batches\n while True:\n with cache_filled_cv:\n while (not load_complete.value and\n (get_cache_size() <= min_cache_size or len(get_available_buckets(bsz)) == 0)):\n add_to_cache_cv.notify()\n cache_filled_cv.wait()\n available_buckets = get_available_buckets(bsz)\n if load_complete.value and batch_cache_type == 'pop':\n return teacher.batch_idx + 1, random.choice(batches)\n batch = None\n available_buckets = get_available_buckets(bsz)\n if len(available_buckets) != 0:\n # Pick length index at random\n length = random.choice(list(available_buckets.keys()))\n with cache_lock:\n current_idx = length_to_eps[length]['current_idx']\n ep_list = length_to_eps[length]['ep_list']\n num_eps = len(ep_list)\n if num_eps - current_idx >= bsz:\n if batch_cache_type == 'pop':\n batch = ep_list[:bsz]\n length_to_eps[length]['ep_list'] = ep_list[bsz:]\n else:\n batch = ep_list[current_idx: current_idx + bsz]\n length_to_eps[length]['current_idx'] = (current_idx + bsz)\n elif load_complete.value and num_eps > 0:\n if batch_cache_type == 'pop':\n batch = ep_list\n elif num_eps - current_idx > 0:\n batch = ep_list[current_idx:]\n length_to_eps[length]['current_idx'] = num_eps - 1\n length_to_eps[length]['bucket_complete'] = True\n\n if batch is not None:\n if batch_cache_type == 'pop':\n with batches_lock:\n batches.append(batch)\n elif teacher.batch_idx + 1 >= num_batches:\n reset()\n return teacher.batch_idx + 1, batch\n\n return wrapper\n\n\nclass LoaderProcess(Thread):\n \"\"\"A background process that submits jobs to the DataLoader\n to load examples into cache\n \"\"\"\n def __init__(self, opt):\n super().__init__(daemon=True)\n self.dataset = opt['dataset_class'](opt)\n self.bsz = opt.get('batchsize', 1)\n self.num_workers = opt.get('num_workers', 4)\n collate_fn = opt.get('collate_fn', default_collate)\n self.dataloader = DataLoader(\n self.dataset,\n batch_size=self.bsz,\n shuffle=False,\n sampler=sampler.SequentialSampler(self.dataset),\n num_workers=self.num_workers,\n collate_fn=collate_fn,\n pin_memory=False,\n drop_last=False,\n )\n self.datatype = opt.get('datatype')\n self.data = enumerate(self.dataloader)\n self.batch_cache_type = opt.get('batch_sort_cache')\n self.batch_length_range = opt.get('batch_length_range')\n\n def run(self):\n while True:\n idx_and_batch = self.load_next()\n if idx_and_batch is None:\n return\n\n @batch_cache\n def load_next(self):\n try:\n return next(self.data)\n except StopIteration:\n return None\n\n\n# Default collate function (for how to prepare a batch)\ndef default_collate(batch):\n new_batch = []\n for b in batch:\n idx = b[0]\n if type(b[1]) is list:\n ep = b[1][0]\n else:\n ep = b[1]\n new_batch.append((idx, ep))\n return new_batch\n\n\nclass StreamDataset(Dataset):\n \"\"\"A Pytorch Dataset utilizing streaming\"\"\"\n def __init__(self, opt):\n self.opt = opt\n self.datatype = opt.get('datatype')\n self.datafile = build_data(self.opt)\n self.data_gen = self._data_generator(self.datafile)\n self.length_datafile = self.datafile + \".length\"\n self.num_epochs = self.opt.get('num_epochs', 0)\n self.training = self.datatype.startswith('train')\n self._load_lens()\n\n def __getitem__(self, index):\n while True:\n index %= self.num_episodes()\n idx, ep = next(self.data_gen)\n if idx == index:\n return (index, ep)\n\n def __len__(self):\n num_epochs = self.num_epochs if self.num_epochs > 0 else 1000\n num_iters = num_epochs if self.training else 1\n return int(num_iters * self.num_episodes())\n\n def _load_lens(self):\n with open(self.length_datafile) as length:\n lengths = json.load(length)\n self.num_eps = lengths['num_eps']\n self.num_exs = lengths['num_exs']\n\n def _data_generator(self, datafile):\n while True:\n for idx, episode in self._read_episode(self.datafile):\n yield idx, episode\n\n def _read_episode(self, datafile):\n read = open(datafile)\n episode = []\n for idx, line in enumerate(read):\n example = json.loads(line)\n episode.append(example)\n if example['episode_done']:\n yield idx, episode\n episode = []\n read.close()\n\n def num_episodes(self):\n return self.num_eps\n\n def num_examples(self):\n return self.num_exs\n\n\nclass PytorchDataTeacher(FixedDialogTeacher):\n\n def __init__(self, opt, shared=None):\n opt['batch_sort'] = False\n super().__init__(opt, shared)\n self.use_batch_act = self.bsz > 1\n self.num_workers = opt['numworkers']\n self.batch_cache_type = opt.get('batch_sort_cache')\n # One can specify a collate function to use for preparing a batch\n self.opt = copy.deepcopy(opt)\n self.is_shared = shared is not None\n dataset_class, self.collate_fn = self.get_dataset_class(opt)\n opt['dataset_class'] = dataset_class\n opt['collate_fn'] = self.collate_fn\n\n if not shared:\n self.dataset = dataset_class(opt)\n if self.datatype == 'train' and not isinstance(self.dataset, StreamDataset):\n data_sampler = sampler.RandomSampler(self.dataset)\n else:\n data_sampler = sampler.SequentialSampler(self.dataset)\n pin_memory = not isinstance(self.dataset, StreamDataset)\n self.pytorch_dataloader = DataLoader(\n self.dataset,\n batch_size=self.bsz,\n shuffle=False,\n sampler=data_sampler,\n num_workers=self.num_workers,\n collate_fn=self.collate_fn,\n pin_memory=pin_memory,\n drop_last=False,\n )\n self.lastYs = [None] * self.bsz\n if self.batch_cache_type != 'none':\n self.loader_process = LoaderProcess(opt)\n self.loader_process.start()\n self.data = enumerate(self.pytorch_dataloader)\n else:\n self.dataset = shared['dataset']\n self.pytorch_dataloader = shared['pytorch_dataloader']\n self.lastYs = shared['lastYs']\n self.data = shared['data']\n\n self.num_batches = math.ceil(self.dataset.num_episodes()/self.bsz)\n self.reset()\n\n def get_dataset_class(self, opt):\n \"\"\" To use a custom dataset (as opposed to the StreamDataset above),\n you can subclass the pytorch Dataset class and specify its\n location on the command line.\n\n For example, the VQA v1 task provides a custom dataset, which can\n be specified on the command line as follows:\n ``-pytd vqa_v1:VQADataset``\n\n Note that if the dataset is named ``DefaultDataset``, then you do\n not need to specify its name following the colon; e.g., it\n would just be:\n ``-pytd vqa_v1``\n \"\"\"\n dataset_name = opt.get('pytorch_teacher_dataset')\n if not dataset_name:\n return StreamDataset, default_collate\n sp = dataset_name.strip()\n repo = 'parlai'\n if sp.startswith('internal:'):\n # To switch to local repo, useful for non-public projects\n # (make a directory called 'parlai_internal' with your private agents)\n repo = 'parlai_internal'\n sp = sp[9:]\n sp = sp.split(':')\n if '.' in sp[0]:\n module_name = sp[0]\n else:\n dataset = sp[0].lower()\n module_name = '{}.tasks.{}.agents'.format(repo, dataset)\n if len(sp) > 1:\n sp[1] = sp[1][0].upper() + sp[1][1:]\n dataset = sp[1]\n if '.' not in sp[0] and 'Dataset' not in dataset:\n # Reformat from underscore to CamelCase and append \"Dataset\" to\n # class name by default if a complete path is not given.\n words = dataset.split('_')\n teacher_name = ''\n for w in words:\n teacher_name += (w[0].upper() + w[1:])\n dataset = teacher_name + 'Dataset'\n else:\n dataset = 'DefaultDataset'\n my_module = importlib.import_module(module_name)\n dataset_class = getattr(my_module, dataset)\n\n collate = default_collate\n if hasattr(dataset_class, 'collate'):\n collate = dataset_class.collate\n elif opt.get('model', False):\n agent_class = get_agent_module(opt.get('model'))\n if hasattr(agent_class, 'collate'):\n collate = agent_class.collate\n return dataset_class, collate\n\n def reset(self):\n \"\"\"Reset the dialog so that it is at the start of the epoch,\n and all metrics are reset.\n \"\"\"\n super().reset()\n self.reset_data()\n\n def reset_data(self):\n if not self.training and not self.is_shared:\n self.data = enumerate(self.pytorch_dataloader)\n self.lastY = None\n self.epochDone = False\n self.episode = None\n self.episode_done = True\n self.episode_idx = 0\n self.batch_idx = 0\n\n def share(self):\n shared = super().share()\n shared['pytorch_dataloader'] = self.pytorch_dataloader\n shared['dataset'] = self.dataset\n shared['data'] = self.data\n return shared\n\n def next_example(self):\n if self.epochDone:\n if not self.training:\n return {'episode_done': True, 'id': self.getID()}, True\n else:\n # Reset the data because it is streaming data\n self.reset_data()\n if self.episode_done:\n try:\n self.episode_idx, self.episode = next(self.data)\n self.entry_idx = 0\n epoch_done = False\n except StopIteration:\n ex = {'episode_done': True, 'id': self.getID()}\n epoch_done = True\n else:\n self.entry_idx += 1\n\n if not epoch_done:\n if self.collate_fn == default_collate:\n self.episode[self.entry_idx] = self.episode[self.entry_idx][1]\n ex = self.episode[self.entry_idx]\n self.episode_done = ex['episode_done']\n if (self.episode_done\n and self.episode_idx + self.bsz >= self.num_episodes()):\n epoch_done = True\n return ex, epoch_done\n\n @batch_cache\n def get_next_batch(self):\n # employs a cache to see if there is a batch of equal size ready\n return next(self.data)\n\n def next_batch(self):\n if self.epochDone:\n if not self.training:\n return [{'episode_done': True, 'id': self.getID()}] * self.bsz\n else:\n # Reset the data because it is streaming data\n self.reset_data()\n try:\n self.batch_idx, batch = self.get_next_batch()\n if self.collate_fn == default_collate:\n batch = [b[1] for b in batch]\n epoch_done = False\n except StopIteration:\n batch = [{'episode_done': True, 'id': self.getID()}] * self.bsz\n epoch_done = True\n if not epoch_done and self.batch_idx == self.num_batches:\n epoch_done = True\n self.epochDone = epoch_done\n return batch\n\n def num_episodes(self):\n \"\"\"Get the number of episodes in this dataset.\"\"\"\n return self.dataset.num_episodes()\n\n def num_examples(self):\n \"\"\"Get the total number of examples in this dataset.\"\"\"\n return self.dataset.num_examples()\n\n def act(self):\n \"\"\"Send new dialog message.\"\"\"\n action = super().act()\n self.lastY = action.get('labels', action.get('eval_labels', None))\n return action\n\nclass DefaultTeacher(PytorchDataTeacher):\n pass\n"
]
| [
[
"torch.utils.data.sampler.RandomSampler",
"torch.multiprocessing.Lock",
"torch.multiprocessing.Value",
"torch.utils.data.sampler.SequentialSampler",
"torch.utils.data.DataLoader"
]
]
|
Mohamed-Hamdy/Brain-Tumor-segmentation | [
"183c7b4047f0270ec2b50ec2c17e855e1ce08ac9"
]
| [
"Code/GUI.py"
]
| [
"\r\nimport sys\r\n\r\nimport imageio\r\nimport nibabel as nib\r\nimport PIL\r\nfrom tkinter import filedialog\r\nimport PIL.Image, PIL.ImageTk\r\nfrom glob import glob\r\n\r\nsys.path.append('..')\r\n\r\ntry:\r\n import Tkinter as tk\r\nexcept ImportError:\r\n import tkinter as tk\r\n\r\ntry:\r\n import ttk\r\n\r\n py3 = False\r\nexcept ImportError:\r\n import tkinter.ttk as ttk\r\n\r\n py3 = True\r\n\r\nimport numpy as np\r\nimport ui.DeepBrainSegUI_support\r\nfrom ui.helpers import *\r\nimport predict\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport cv2\r\nfrom PIL import Image\r\nimport tkinter as tk\r\nfrom PIL import Image, ImageTk\r\nfrom itertools import count\r\n\r\n#from DeepBrainSeg import deepSeg\r\n\r\n#get_brainsegmentation = deepSeg(quick=False)\r\n\r\n\r\ndef vp_start_gui():\r\n '''Starting point when module is the main routine.'''\r\n global val, w, root\r\n root = tk.Tk()\r\n ui.DeepBrainSegUI_support.set_Tk_var()\r\n top = DeepBrainSegUI(root)\r\n ui.DeepBrainSegUI_support.init(root, top)\r\n root.mainloop()\r\n\r\n\r\nw = None\r\n\r\n\r\ndef create_DeepBrainSegUI(root, *args, **kwargs):\r\n '''Starting point when module is imported by another program.'''\r\n global w, w_win, rt\r\n rt = root\r\n w = tk.Toplevel(root)\r\n ui.DeepBrainSegUI_support.set_Tk_var()\r\n top = DeepBrainSegUI(w)\r\n ui.DeepBrainSegUI_support.init(w, top, *args, **kwargs)\r\n return (w, top)\r\n\r\n\r\ndef destroy_DeepBrainSegUI():\r\n global w\r\n w.destroy()\r\n w = None\r\n\r\n\r\ndef plot_normalize(img):\r\n img = 255. * ((img - img.min()) / (img.max() - img.min()))\r\n return np.uint8(img)\r\n\r\n\r\ndef create_img(img):\r\n return np.dstack((img, img, img))\r\n\r\n\r\ndef create_mask(pred):\r\n return_img = np.zeros((pred.shape[0], pred.shape[1], 3))\r\n print(np.unique(pred))\r\n x, y = np.where(pred == 1)\r\n return_img[x, y, :] = [255, 0, 0]\r\n x, y = np.where(pred == 2)\r\n return_img[x, y, :] = [0, 255, 0]\r\n x, y = np.where(pred == 4)\r\n return_img[x, y, :] = [0, 0, 255]\r\n return np.uint8(return_img)\r\n\r\n\r\nclass ImageLabel(tk.Label):\r\n \"\"\"a label that displays images, and plays them if they are gifs\"\"\"\r\n def load(self, im):\r\n if isinstance(im, str):\r\n im = Image.open(im)\r\n self.loc = 0\r\n self.frames = []\r\n\r\n try:\r\n for i in count(1):\r\n self.frames.append(ImageTk.PhotoImage(im.copy()))\r\n im.seek(i)\r\n except EOFError:\r\n pass\r\n\r\n try:\r\n self.delay = im.info['duration']\r\n except:\r\n self.delay = 100\r\n\r\n if len(self.frames) == 1:\r\n self.config(image=self.frames[0])\r\n else:\r\n self.next_frame()\r\n return im\r\n\r\n def unload(self):\r\n self.config(image=\"\")\r\n self.frames = None\r\n\r\n def next_frame(self):\r\n if self.frames:\r\n self.loc += 1\r\n self.loc %= len(self.frames)\r\n self.config(image=self.frames[self.loc])\r\n self.after(self.delay, self.next_frame)\r\n\r\n\r\nclass DeepBrainSegUI:\r\n def __init__(self, top=None):\r\n '''This class configures and populates the toplevel window.\r\n top is the toplevel containing window.'''\r\n _bgcolor = '#d9d9d9' # X11 color: 'gray85'\r\n _fgcolor = '#000000' # X11 color: 'black'\r\n _compcolor = '#d9d9d9' # X11 color: 'gray85'\r\n _ana1color = '#d9d9d9' # X11 color: 'gray85'\r\n _ana2color = '#ececec' # Closest X11 color: 'gray92'\r\n self.style = ttk.Style()\r\n if sys.platform == \"win32\":\r\n self.style.theme_use('winnative')\r\n self.style.configure('.', background=_bgcolor)\r\n self.style.configure('.', foreground=_fgcolor)\r\n self.style.map('.', background=\r\n [('selected', _compcolor), ('active', _ana2color)])\r\n\r\n top.geometry(\"1385x947+327+65\")\r\n top.title(\"Brain Tumor Detection\")\r\n top.configure(highlightcolor=\"black\")\r\n self.progress_bar = 0\r\n\r\n self.Frame1 = tk.Frame(top)\r\n self.Frame1.place(relx=0.007, rely=0.137, relheight=0.85, relwidth=0.986)\r\n\r\n self.Frame1.configure(relief='groove')\r\n self.Frame1.configure(borderwidth=\"2\")\r\n self.Frame1.configure(relief=\"groove\")\r\n\r\n self.Frame2 = tk.Frame(self.Frame1)\r\n self.Frame2.place(relx=0.007, rely=0.0, relheight=0.602, relwidth=0.985)\r\n self.Frame2.configure(relief='groove')\r\n self.Frame2.configure(borderwidth=\"2\")\r\n self.Frame2.configure(relief=\"groove\")\r\n\r\n self.Frame9 = tk.Frame(self.Frame2)\r\n self.Frame9.place(relx=0.007, rely=0.021, relheight=0.918\r\n , relwidth=0.985)\r\n self.Frame9.configure(relief='groove')\r\n self.Frame9.configure(borderwidth=\"2\")\r\n self.Frame9.configure(relief=\"groove\")\r\n\r\n self.Frame10 = tk.Frame(self.Frame9)\r\n self.Frame10.place(relx=0.008, rely=0.022, relheight=0.955\r\n , relwidth=0.321)\r\n self.Frame10.configure(relief='groove')\r\n self.Frame10.configure(borderwidth=\"2\")\r\n self.Frame10.configure(relief=\"groove\")\r\n\r\n self.AxialCanvas = tk.Canvas(self.Frame10)\r\n self.AxialCanvas.place(relx=0.024, rely=0.024, relheight=0.944\r\n , relwidth=0.873)\r\n self.AxialCanvas.configure(borderwidth=\"2\")\r\n self.AxialCanvas.configure(relief=\"ridge\")\r\n self.AxialCanvas.configure(selectbackground=\"#c4c4c4\")\r\n\r\n self.Frame11 = tk.Frame(self.Frame9)\r\n self.Frame11.place(relx=0.34, rely=0.022, relheight=0.955\r\n , relwidth=0.321)\r\n self.Frame11.configure(relief='groove')\r\n self.Frame11.configure(borderwidth=\"2\")\r\n self.Frame11.configure(relief=\"groove\")\r\n\r\n self.SagitalCanvas = tk.Canvas(self.Frame11)\r\n self.SagitalCanvas.place(relx=0.024, rely=0.024, relheight=0.944\r\n , relwidth=0.873)\r\n self.SagitalCanvas.configure(borderwidth=\"2\")\r\n self.SagitalCanvas.configure(relief=\"ridge\")\r\n self.SagitalCanvas.configure(selectbackground=\"#c4c4c4\")\r\n\r\n self.Frame12 = tk.Frame(self.Frame9)\r\n self.Frame12.place(relx=0.672, rely=0.022, relheight=0.955\r\n , relwidth=0.321)\r\n self.Frame12.configure(relief='groove')\r\n self.Frame12.configure(borderwidth=\"2\")\r\n self.Frame12.configure(relief=\"groove\")\r\n\r\n self.CorronalCanvas = tk.Canvas(self.Frame12)\r\n self.CorronalCanvas.place(relx=0.024, rely=0.024, relheight=0.944\r\n , relwidth=0.873)\r\n self.CorronalCanvas.configure(borderwidth=\"2\")\r\n self.CorronalCanvas.configure(relief=\"ridge\")\r\n self.CorronalCanvas.configure(selectbackground=\"#c4c4c4\")\r\n\r\n self.LoadFrame = tk.Frame(self.Frame1)\r\n self.LoadFrame.place(relx=0.007, rely=0.621, relheight=0.366\r\n , relwidth=0.985)\r\n self.LoadFrame.configure(relief='groove')\r\n self.LoadFrame.configure(borderwidth=\"2\")\r\n self.LoadFrame.configure(relief=\"groove\")\r\n\r\n self.Flair_Frame = tk.Frame(self.LoadFrame)\r\n self.Flair_Frame.place(relx=0.007, rely=0.034, relheight=0.932\r\n , relwidth=0.175)\r\n self.Flair_Frame.configure(relief='groove')\r\n self.Flair_Frame.configure(borderwidth=\"2\")\r\n self.Flair_Frame.configure(relief=\"groove\")\r\n\r\n self.Button1 = tk.Button(self.Flair_Frame)\r\n self.Button1.place(relx=0.255, rely=0.836, height=35, width=110)\r\n self.Button1.configure(activebackground=\"#f9f9f9\")\r\n self.Button1.configure(command=self.Load_Flair)\r\n self.Button1.configure(compound='center')\r\n self.Button1.configure(text='''Load Flair''')\r\n\r\n self.Flair_canvas = tk.Canvas(self.Flair_Frame)\r\n self.Flair_canvas.place(relx=0.043, rely=0.036, relheight=0.767\r\n , relwidth=0.898)\r\n self.Flair_canvas.configure(borderwidth=\"2\")\r\n self.Flair_canvas.configure(relief=\"ridge\")\r\n self.Flair_canvas.configure(selectbackground=\"#c4c4c4\")\r\n\r\n self.T1Frame = tk.Frame(self.LoadFrame)\r\n self.T1Frame.place(relx=0.201, rely=0.034, relheight=0.932\r\n , relwidth=0.182)\r\n self.T1Frame.configure(relief='groove')\r\n self.T1Frame.configure(borderwidth=\"2\")\r\n self.T1Frame.configure(relief=\"groove\")\r\n\r\n self.Button2 = tk.Button(self.T1Frame)\r\n self.Button2.place(relx=0.286, rely=0.836, height=35, width=110)\r\n self.Button2.configure(activebackground=\"#f9f9f9\")\r\n self.Button2.configure(command=self.Load_T1)\r\n self.Button2.configure(compound='center')\r\n self.Button2.configure(text='''Load T1''')\r\n\r\n self.T1_canvas = tk.Canvas(self.T1Frame)\r\n self.T1_canvas.place(relx=0.041, rely=0.036, relheight=0.767\r\n , relwidth=0.902)\r\n self.T1_canvas.configure(borderwidth=\"2\")\r\n self.T1_canvas.configure(relief=\"ridge\")\r\n self.T1_canvas.configure(selectbackground=\"#c4c4c4\")\r\n\r\n self.T1ceFrame = tk.Frame(self.LoadFrame)\r\n self.T1ceFrame.place(relx=0.401, rely=0.034, relheight=0.932\r\n , relwidth=0.19)\r\n self.T1ceFrame.configure(relief='groove')\r\n self.T1ceFrame.configure(borderwidth=\"2\")\r\n self.T1ceFrame.configure(relief=\"groove\")\r\n\r\n self.Button3 = tk.Button(self.T1ceFrame)\r\n self.Button3.place(relx=0.314, rely=0.836, height=35, width=100)\r\n self.Button3.configure(activebackground=\"#f9f9f9\")\r\n self.Button3.configure(command=self.Load_T1ce)\r\n self.Button3.configure(compound='center')\r\n self.Button3.configure(text='''Load T1ce''')\r\n\r\n self.T1ce_canvas = tk.Canvas(self.T1ceFrame)\r\n self.T1ce_canvas.place(relx=0.039, rely=0.036, relheight=0.767\r\n , relwidth=0.906)\r\n self.T1ce_canvas.configure(borderwidth=\"2\")\r\n self.T1ce_canvas.configure(relief=\"ridge\")\r\n self.T1ce_canvas.configure(selectbackground=\"#c4c4c4\")\r\n\r\n self.T2Frame = tk.Frame(self.LoadFrame)\r\n self.T2Frame.place(relx=0.61, rely=0.034, relheight=0.932\r\n , relwidth=0.182)\r\n self.T2Frame.configure(relief='groove')\r\n self.T2Frame.configure(borderwidth=\"2\")\r\n self.T2Frame.configure(relief=\"groove\")\r\n\r\n self.Button4 = tk.Button(self.T2Frame)\r\n self.Button4.place(relx=0.327, rely=0.836, height=35, width=100)\r\n self.Button4.configure(activebackground=\"#f9f9f9\")\r\n self.Button4.configure(command=self.Load_T2)\r\n self.Button4.configure(compound='center')\r\n self.Button4.configure(text='''Load T2''')\r\n\r\n self.T2_canvas = tk.Canvas(self.T2Frame)\r\n self.T2_canvas.place(relx=0.041, rely=0.036, relheight=0.767\r\n , relwidth=0.902)\r\n self.T2_canvas.configure(borderwidth=\"2\")\r\n self.T2_canvas.configure(relief=\"ridge\")\r\n self.T2_canvas.configure(selectbackground=\"#c4c4c4\")\r\n\r\n self.SegFrame = tk.Frame(self.LoadFrame)\r\n self.SegFrame.place(relx=0.81, rely=0.034, relheight=0.932\r\n , relwidth=0.175)\r\n self.SegFrame.configure(relief='groove')\r\n self.SegFrame.configure(borderwidth=\"2\")\r\n self.SegFrame.configure(relief=\"groove\")\r\n\r\n self.Button5 = tk.Button(self.SegFrame)\r\n self.Button5.place(relx=0.213, rely=0.836, height=35, width=160)\r\n self.Button5.configure(activebackground=\"#f9f9f9\")\r\n self.Button5.configure(command=self.Get_Segmentation)\r\n self.Button5.configure(compound='center')\r\n self.Button5.configure(text='''Get Segmentation''')\r\n\r\n self.seg_canvas = tk.Canvas(self.SegFrame)\r\n self.seg_canvas.place(relx=0.043, rely=0.036, relheight=0.767\r\n , relwidth=0.898)\r\n self.seg_canvas.configure(borderwidth=\"2\")\r\n self.seg_canvas.configure(relief=\"ridge\")\r\n self.seg_canvas.configure(selectbackground=\"#c4c4c4\")\r\n\r\n self.LogoCanvas = tk.Canvas(top)\r\n self.LogoCanvas.place(relx=0.007, rely=0.011, relheight=0.117\r\n , relwidth=0.983)\r\n self.LogoCanvas.configure(borderwidth=\"2\")\r\n self.LogoCanvas.configure(relief=\"ridge\")\r\n self.LogoCanvas.configure(selectbackground=\"#c4c4c4\")\r\n logo = PIL.Image.open('logo.png')\r\n true_size = logo.size\r\n #size = (int(1885 * 0.983), int(947 * 0.117))\r\n\r\n size = (int(1585 * 0.983), int(947 * 0.117))\r\n\r\n self.logo_image = PIL.ImageTk.PhotoImage(image=logo.resize(size))\r\n self.LogoCanvas.create_image(0, 0, image=self.logo_image, anchor=tk.NW)\r\n\r\n self.slice1 = 0\r\n self.slice2 = 0\r\n self.slice3 = 0\r\n\r\n ########### radio buttons ###############\r\n self.Button6 = tk.Button(self.Frame2)\r\n self.Button6.place(relx=0.097, rely=0.0, height=25, width=91)\r\n self.Button6.configure(command=self.FlairView)\r\n self.Button6.configure(text='''FlairView''')\r\n\r\n self.Button7 = tk.Button(self.Frame2)\r\n self.Button7.place(relx=0.275, rely=0.0, height=25, width=70)\r\n self.Button7.configure(command=self.T1View)\r\n self.Button7.configure(text='''T1View''')\r\n\r\n self.Button8 = tk.Button(self.Frame2)\r\n self.Button8.place(relx=0.454, rely=0.0, height=25, width=84)\r\n self.Button8.configure(command=self.T1ceView)\r\n self.Button8.configure(text='''T1ceView''')\r\n\r\n self.Button9 = tk.Button(self.Frame2)\r\n self.Button9.place(relx=0.647, rely=0.0, height=25, width=70)\r\n self.Button9.configure(command=self.T2View)\r\n self.Button9.configure(text='''T2View''')\r\n\r\n self.Button10 = tk.Button(self.Frame2)\r\n self.Button10.place(relx=0.803, rely=0.0, height=25\r\n , width=161)\r\n self.Button10.configure(command=self.SegmentationOverlay)\r\n self.Button10.configure(text='''SegmentationOverlay''')\r\n\r\n def init_scales(self, vol):\r\n \"\"\"\r\n \"\"\"\r\n self.slice1 = vol.shape[0] // 2\r\n self.slice2 = vol.shape[1] // 2\r\n self.slice3 = vol.shape[2] // 2\r\n x_size, y_size, z_size = vol.shape\r\n\r\n self.Scale1 = tk.Scale(self.Frame10, from_=0.0, to=z_size)\r\n self.Scale1.place(relx=0.871, rely=0.025, relwidth=0.0, relheight=0.942\r\n , width=46, bordermode='ignore')\r\n self.Scale1.configure(activebackground=\"#f9f9f9\")\r\n self.Scale1.configure(command=self.AxialScroll)\r\n self.Scale1.configure(length=\"368\")\r\n self.Scale1.configure(troughcolor=\"#d9d9d9\")\r\n\r\n self.Scale2 = tk.Scale(self.Frame11, from_=0.0, to=y_size)\r\n self.Scale2.place(relx=0.871, rely=0.025, relwidth=0.0, relheight=0.942\r\n , width=46, bordermode='ignore')\r\n self.Scale2.configure(activebackground=\"#f9f9f9\")\r\n self.Scale2.configure(command=self.SagitalScroll)\r\n self.Scale2.configure(digits=\"50\")\r\n self.Scale2.configure(length=\"368\")\r\n self.Scale2.configure(troughcolor=\"#d9d9d9\")\r\n\r\n self.Scale3 = tk.Scale(self.Frame12, from_=0.0, to=x_size)\r\n self.Scale3.place(relx=0.871, rely=0.025, relwidth=0.0, relheight=0.942\r\n , width=46, bordermode='ignore')\r\n self.Scale3.configure(activebackground=\"#f9f9f9\")\r\n self.Scale3.configure(command=self.CorronalScroll)\r\n self.Scale3.configure(length=\"368\")\r\n self.Scale3.configure(troughcolor=\"#d9d9d9\")\r\n\r\n self.TProgressbar1 = ttk.Progressbar(self.Frame2)\r\n self.TProgressbar1.place(relx=0.007, rely=0.948, relwidth=0.981\r\n , relheight=0.0, height=19)\r\n self.TProgressbar1.configure(variable=self.progress_bar)\r\n\r\n self.overlay_flag = False\r\n\r\n # =================================================================\r\n\r\n def update_main_view(self, vol, slice1, slice2, slice3):\r\n \"\"\"\r\n \"\"\"\r\n self.main_vol = vol\r\n true_size = vol.shape[:2]\r\n size = (self.AxialCanvas.winfo_width(), self.AxialCanvas.winfo_height())\r\n size = (size[0], int(true_size[0] / true_size[1]) * size[1]) if size[0] < size[1] else (\r\n int(true_size[1] / true_size[0]) * size[0], size[1])\r\n\r\n self.AxialCanvas_image = PIL.ImageTk.PhotoImage(\r\n image=PIL.Image.fromarray(plot_normalize(vol[:, :, slice3].T)).resize(size))\r\n self.AxialCanvas.create_image(0, 0, image=self.AxialCanvas_image, anchor=tk.NW)\r\n\r\n true_size = (vol.shape[0], vol.shape[2])\r\n size = (self.AxialCanvas.winfo_width(), self.AxialCanvas.winfo_height())\r\n size = (size[0], int(true_size[0] / true_size[1]) * size[1]) if size[0] < size[1] else (\r\n int(true_size[1] / true_size[0]) * size[0], size[1])\r\n\r\n self.SagitalCanvas_image = PIL.ImageTk.PhotoImage(\r\n image=PIL.Image.fromarray(plot_normalize(np.flipud(vol[:, slice2, :].T))).resize(size))\r\n self.SagitalCanvas.create_image(0, 0, image=self.SagitalCanvas_image, anchor=tk.NW)\r\n\r\n true_size = (vol.shape[1], vol.shape[2])\r\n size = (self.AxialCanvas.winfo_width(), self.AxialCanvas.winfo_height())\r\n size = (size[0], int(true_size[0] / true_size[1]) * size[1]) if size[0] < size[1] else (\r\n int(true_size[1] / true_size[0]) * size[0], size[1])\r\n\r\n self.CorronalCanvas_image = PIL.ImageTk.PhotoImage(\r\n image=PIL.Image.fromarray(plot_normalize(np.flipud(vol[slice1, :, :].T))).resize(size))\r\n self.CorronalCanvas.create_image(0, 0, image=self.CorronalCanvas_image, anchor=tk.NW)\r\n\r\n\r\n def update_main_view_overlay(self, vol, prediction, slice1, slice2, slice3, alpha_val=0.5):\r\n \"\"\"\r\n \"\"\"\r\n self.main_vol = vol\r\n pred = prediction[:, :, slice3].T\r\n alpha = np.zeros_like(pred).astype(\"float\")\r\n alpha[pred > 0] = alpha_val\r\n alpha = alpha[..., None]\r\n print(np.unique(alpha))\r\n\r\n true_size = vol.shape[:2]\r\n size = (self.AxialCanvas.winfo_width(), self.AxialCanvas.winfo_height())\r\n size = (size[0], int(true_size[0] / true_size[1]) * size[1]) if size[0] < size[1] else (\r\n int(true_size[1] / true_size[0]) * size[0], size[1])\r\n\r\n img = (1 - alpha) * plot_normalize(create_img(vol[:, :, slice3].T)) + alpha * create_mask(pred)\r\n self.AxialCanvas_image = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(plot_normalize(img)).resize(size))\r\n self.AxialCanvas.create_image(0, 0, image=self.AxialCanvas_image, anchor=tk.NW)\r\n\r\n pred = np.flipud(prediction[:, slice2, :].T)\r\n alpha = np.zeros_like(pred).astype(\"float\")\r\n alpha[pred > 0] = alpha_val\r\n alpha = alpha[..., None]\r\n true_size = (vol.shape[0], vol.shape[2])\r\n size = (self.AxialCanvas.winfo_width(), self.AxialCanvas.winfo_height())\r\n size = (size[0], int(true_size[0] / true_size[1]) * size[1]) if size[0] < size[1] else (\r\n int(true_size[1] / true_size[0]) * size[0], size[1])\r\n\r\n img = (1 - alpha) * plot_normalize(create_img(np.flipud(vol[:, slice2, :].T))) + alpha * create_mask(pred)\r\n self.SagitalCanvas_image = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(plot_normalize(img)).resize(size))\r\n self.SagitalCanvas.create_image(0, 0, image=self.SagitalCanvas_image, anchor=tk.NW)\r\n\r\n pred = np.flipud(prediction[slice1, :, :].T)\r\n alpha = np.zeros_like(pred).astype(\"float\")\r\n alpha[pred > 0] = alpha_val\r\n alpha = alpha[..., None]\r\n true_size = (vol.shape[1], vol.shape[2])\r\n size = (self.AxialCanvas.winfo_width(), self.AxialCanvas.winfo_height())\r\n size = (size[0], int(true_size[0] / true_size[1]) * size[1]) if size[0] < size[1] else (\r\n int(true_size[1] / true_size[0]) * size[0], size[1])\r\n\r\n img = (1 - alpha) * plot_normalize(create_img(np.flipud(vol[slice1, :, :].T))) + alpha * create_mask(pred)\r\n self.CorronalCanvas_image = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(plot_normalize(img)).resize(size))\r\n self.CorronalCanvas.create_image(0, 0, image=self.CorronalCanvas_image, anchor=tk.NW)\r\n\r\n # =================================================================\r\n\r\n def T1View(self):\r\n print(\"T1View\")\r\n self.main_vol = self.T1_vol\r\n self.update_main_view(self.main_vol, self.slice1, self.slice2, self.slice3)\r\n pass\r\n\r\n def T2View(self):\r\n print(\"T2 view\")\r\n self.main_vol = self.T2_vol\r\n self.update_main_view(self.main_vol, self.slice1, self.slice2, self.slice3)\r\n pass\r\n\r\n def T1ceView(self):\r\n print(\"T1ce view\")\r\n self.main_vol = self.T1ce_vol\r\n self.update_main_view(self.main_vol, self.slice1, self.slice2, self.slice3)\r\n pass\r\n\r\n def FlairView(self):\r\n print(\"Flair View\")\r\n self.main_vol = self.Flair_vol\r\n self.update_main_view(self.main_vol, self.slice1, self.slice2, self.slice3)\r\n pass\r\n\r\n def SegmentationView(self):\r\n print(\"segmentation view\")\r\n self.main_vol = self.prediction\r\n self.update_main_view(self.main_vol, self.slice1, self.slice2, self.slice3)\r\n pass\r\n\r\n def SegmentationOverlay(self):\r\n print(\"overlay view\")\r\n self.overlay_flag = True\r\n self.update_main_view_overlay(self.main_vol, self.prediction, self.slice1, self.slice2, self.slice3)\r\n pass\r\n\r\n # =================================================================\r\n\r\n def AxialScroll(self, *args):\r\n print('AxialScroll', int(args[0]))\r\n self.slice3 = max(0, int(args[0]) - 1)\r\n print(self.overlay_flag)\r\n if not self.overlay_flag:\r\n self.update_main_view(self.main_vol, self.slice1, self.slice2, self.slice3)\r\n else:\r\n self.update_main_view_overlay(self.main_vol, self.prediction, self.slice1, self.slice2, self.slice3)\r\n pass\r\n\r\n def SagitalScroll(self, *args):\r\n print('SagitalScroll', int(args[0]))\r\n self.slice2 = max(0, int(args[0]) - 1)\r\n if not self.overlay_flag:\r\n self.update_main_view(self.main_vol, self.slice1, self.slice2, self.slice3)\r\n else:\r\n self.update_main_view_overlay(self.main_vol, self.prediction, self.slice1, self.slice2, self.slice3)\r\n pass\r\n\r\n def CorronalScroll(self, *args):\r\n print('CorronalScroll', int(args[0]))\r\n self.slice1 = max(0, int(args[0]) - 1)\r\n if not self.overlay_flag:\r\n self.update_main_view(self.main_vol, self.slice1, self.slice2, self.slice3)\r\n else:\r\n self.update_main_view_overlay(self.main_vol, self.prediction, self.slice1, self.slice2, self.slice3)\r\n pass\r\n\r\n # =================================================================\r\n\r\n def Load_T2(self, event=None):\r\n \"\"\"\r\n \"\"\"\r\n self.T2filename = filedialog.askopenfilename()\r\n nib_vol = nib.load(self.T2filename)\r\n self.affine = nib_vol.affine\r\n self.T2_vol = nib_vol.get_data()\r\n\r\n mid_slice = self.T2_vol.shape[2] // 2\r\n\r\n true_size = self.T2_vol.shape[:2]\r\n size = (self.T2_canvas.winfo_width(), self.T2_canvas.winfo_height())\r\n size = (size[0], int(true_size[0] / true_size[1]) * size[1]) if size[0] < size[1] else (\r\n int(true_size[1] / true_size[0]) * size[0], size[1])\r\n\r\n self.T2_canvas_image = PIL.ImageTk.PhotoImage(\r\n image=PIL.Image.fromarray(plot_normalize(self.T2_vol[:, :, mid_slice].T)).resize(size))\r\n self.T2_canvas.create_image(0, 0, image=self.T2_canvas_image, anchor=tk.NW)\r\n self.update_main_view(self.T2_vol, self.T2_vol.shape[0] // 2, self.T2_vol.shape[1] // 2,\r\n self.T2_vol.shape[2] // 2)\r\n self.init_scales(self.T2_vol)\r\n\r\n def Load_T1(self, event=None):\r\n self.T1filename = filedialog.askopenfilename()\r\n nib_vol = nib.load(self.T1filename)\r\n self.affine = nib_vol.affine\r\n self.T1_vol = nib_vol.get_data()\r\n\r\n mid_slice = self.T1_vol.shape[2] // 2\r\n\r\n true_size = self.T1_vol.shape[:2]\r\n size = (self.T1_canvas.winfo_width(), self.T1_canvas.winfo_height())\r\n size = (size[0], int(true_size[0] / true_size[1]) * size[1]) if size[0] < size[1] else (\r\n int(true_size[1] / true_size[0]) * size[0], size[1])\r\n\r\n self.T1_canvas_image = PIL.ImageTk.PhotoImage(\r\n image=PIL.Image.fromarray(plot_normalize(self.T1_vol[:, :, mid_slice].T)).resize(size))\r\n self.T1_canvas.create_image(0, 0, image=self.T1_canvas_image, anchor=tk.NW)\r\n self.update_main_view(self.T1_vol, self.T1_vol.shape[0] // 2, self.T1_vol.shape[1] // 2,\r\n self.T1_vol.shape[2] // 2)\r\n self.init_scales(self.T1_vol)\r\n\r\n def Load_Flair(self, event=None):\r\n self.Flairfilename = filedialog.askopenfilename()\r\n nib_vol = nib.load(self.Flairfilename)\r\n self.affine = nib_vol.affine\r\n self.Flair_vol = nib_vol.get_data()\r\n\r\n mid_slice = self.Flair_vol.shape[2] // 2\r\n\r\n true_size = self.Flair_vol.shape[:2]\r\n size = (self.Flair_canvas.winfo_width(), self.Flair_canvas.winfo_height())\r\n size = (size[0], int(true_size[0] / true_size[1]) * size[1]) if size[0] < size[1] else (\r\n int(true_size[1] / true_size[0]) * size[0], size[1])\r\n\r\n self.Flair_canvas_image = PIL.ImageTk.PhotoImage(\r\n image=PIL.Image.fromarray(plot_normalize(self.Flair_vol[:, :, mid_slice].T)).resize(size))\r\n self.Flair_canvas.create_image(0, 0, image=self.Flair_canvas_image, anchor=tk.NW)\r\n self.update_main_view(self.Flair_vol, self.Flair_vol.shape[0] // 2, self.Flair_vol.shape[1] // 2,\r\n self.Flair_vol.shape[2] // 2)\r\n self.init_scales(self.Flair_vol)\r\n\r\n def Load_T1ce(self, event=None):\r\n self.T1cefilename = filedialog.askopenfilename()\r\n nib_vol = nib.load(self.T1cefilename)\r\n self.affine = nib_vol.affine\r\n self.T1ce_vol = nib_vol.get_data()\r\n\r\n mid_slice = self.T1ce_vol.shape[2] // 2\r\n\r\n true_size = self.T1ce_vol.shape[:2]\r\n size = (self.T1ce_canvas.winfo_width(), self.T1ce_canvas.winfo_height())\r\n size = (size[0], int(true_size[0] / true_size[1]) * size[1]) if size[0] < size[1] else (\r\n int(true_size[1] / true_size[0]) * size[0], size[1])\r\n\r\n self.T1ce_canvas_image = PIL.ImageTk.PhotoImage(\r\n image=PIL.Image.fromarray(plot_normalize(self.T1ce_vol[:, :, mid_slice].T)).resize(size))\r\n self.T1ce_canvas.create_image(0, 0, image=self.T1ce_canvas_image, anchor=tk.NW)\r\n self.update_main_view(self.T1ce_vol, self.T1ce_vol.shape[0] // 2, self.T1ce_vol.shape[1] // 2,\r\n self.T1ce_vol.shape[2] // 2)\r\n self.init_scales(self.T1ce_vol)\r\n\r\n # =================================================================\r\n\r\n def readGif(filename):\r\n \"\"\"\r\n \"\"\"\r\n\r\n # Check PIL\r\n if PIL is None:\r\n raise RuntimeError(\"Need PIL to read animated gif files.\")\r\n\r\n # Check whether it exists\r\n if not os.path.isfile(filename):\r\n raise IOError('File not found: ' + str(filename))\r\n\r\n # Load file using PIL\r\n pilIm = PIL.Image.open(filename)\r\n pilIm.seek(0)\r\n\r\n # Read all images inside\r\n ims = []\r\n try:\r\n while True:\r\n # Get image as numpy array\r\n tmp = pilIm.convert() # Make without palette\r\n a = np.asarray(tmp)\r\n if len(a.shape) == 0:\r\n raise MemoryError(\"Too little memory to convert PIL image to array\")\r\n # Store, and next\r\n ims.append(a)\r\n pilIm.seek(pilIm.tell() + 1)\r\n except EOFError:\r\n pass\r\n\r\n # Done\r\n return ims\r\n def Get_Segmentation(self, event=None):\r\n try:\r\n if (self.T1_vol != None) and (self.T2_vol != None) and (self.T1ce_vol != None) and (self.Flair_vol != None):\r\n pass\r\n else:\r\n pass\r\n except:\r\n ValueError\r\n\r\n # set arguments\r\n model_to_load = \"./All_dataset_Training_Model.hdf5\"\r\n # paths for the testing data\r\n Test_data = glob(os.path.dirname(self.T2filename))\r\n\r\n np.random.seed(2022)\r\n np.random.shuffle(Test_data)\r\n # compile the model\r\n brain_seg_pred = predict.Prediction(batch_size_test=2, load_model_path=model_to_load)\r\n\r\n # predicts each volume and save the results in np array\r\n brain_seg_pred.predict_multiple_volumes(Test_data, save=True, show=False)\r\n\r\n print(\"End of Prediction\")\r\n\r\n self.prediction = nib.load('Result/seg.nii.gz').get_data()\r\n temp = self.prediction.copy()\r\n\r\n for k in range(155):\r\n f_dir = 'Result/final_axial_prediction_result/' + str(k) + '.png'\r\n image = cv2.imread(f_dir, cv2.IMREAD_GRAYSCALE)\r\n temp[:, : , k] = image.copy()\r\n\r\n '''\r\n final_prediction_images_gif = []\r\n final_gt_images_gif = []\r\n for k in range(155):\r\n f_dir = './final_prediction_result/' + str(k) + '.png'\r\n final_prediction_images_gif.append(imageio.imread(f_dir))\r\n temp[:, : , k] = final_prediction_images_gif[k].copy()\r\n '''\r\n\r\n mid_slice = self.prediction.shape[0] // 2\r\n\r\n true_size = self.T1ce_vol.shape[:2]\r\n size = (self.T1ce_canvas.winfo_width(), self.T1ce_canvas.winfo_height())\r\n size = (size[0], int(true_size[0] / true_size[1]) * size[1]) if size[0] < size[1] else (\r\n int(true_size[1] / true_size[0]) * size[0], size[1])\r\n\r\n '''\r\n lbl = ImageLabel(root)\r\n lbl.pack()\r\n img = lbl.load('final_prediction.gif')\r\n root.mainloop()\r\n '''\r\n\r\n self.seg_canvas_image = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(plot_normalize(temp[:, :, 83])).resize(size))\r\n self.seg_canvas.create_image(0, 0, image=self.seg_canvas_image, anchor=tk.NW)\r\n\r\n self.update_main_view(self.prediction, self.prediction.shape[0] // 2, self.prediction.shape[1] // 2 , self.prediction.shape[2] // 2)\r\n \r\n\r\n pass\r\n\r\nif __name__ == '__main__':\r\n vp_start_gui()\r\n"
]
| [
[
"numpy.uint8",
"numpy.zeros_like",
"numpy.asarray",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.shuffle",
"numpy.flipud",
"numpy.where",
"numpy.dstack",
"numpy.unique"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.