code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return 3120.* (self.mass(*args) / (self.radius(*args)**2 * np.sqrt(self.Teff(*args)/5777.)))
def nu_max(self, *args)
Returns asteroseismic nu_max in uHz reference: https://arxiv.org/pdf/1312.3853v1.pdf, Eq (3)
9.158925
7.779014
1.177389
ages = np.arange(self.minage, self.maxage, 0.01) rs = self.radius(m, ages, feh) w = np.where(np.isfinite(rs))[0] return ages[w[0]],ages[w[-1]]
def agerange(self, m, feh=0.0)
For a given mass and feh, returns the min and max allowed ages.
3.262974
2.942643
1.108858
if minage is None: minage = self.minage if maxage is None: maxage = self.maxage ages = np.arange(minage,maxage,dage) Ms = self.mass(m,ages,feh) Rs = self.radius(m,ages,feh) logLs = self.logL(m,ages,feh) loggs = self.logg(m,ages,feh) Teffs = self.Teff(m,ages,feh) mags = {band:self.mag[band](m,ages,feh) for band in self.bands} props = {'age':ages,'mass':Ms,'radius':Rs,'logL':logLs, 'logg':loggs, 'Teff':Teffs, 'mag':mags} if not return_df: return props else: d = {} for key in props.keys(): if key=='mag': for m in props['mag'].keys(): d['{}_mag'.format(m)] = props['mag'][m] else: d[key] = props[key] try: df = pd.DataFrame(d) except ValueError: df = pd.DataFrame(d, index=[0]) return df
def evtrack(self,m,feh=0.0,minage=None,maxage=None,dage=0.02, return_df=True)
Returns evolution track for a single initial mass and feh. :param m: Initial mass of desired evolution track. :param feh: (optional) Metallicity of desired track. Default = 0.0 (solar) :param minage, maxage: (optional) Minimum and maximum log(age) of desired track. Will default to min and max age of model isochrones. :param dage: (optional) Spacing in log(age) at which to evaluate models. Default = 0.02 :param return_df: (optional) Whether to return a ``DataFrame`` or dicionary. Default is ``True``. :return: Either a :class:`pandas.DataFrame` or dictionary representing the evolution track---fixed mass, sampled at chosen range of ages.
1.96756
2.091261
0.940849
if minm is None: minm = self.minmass if maxm is None: maxm = self.maxmass ms = np.arange(minm,maxm,dm) ages = np.ones(ms.shape)*age Ms = self.mass(ms,ages,feh) Rs = self.radius(ms,ages,feh) logLs = self.logL(ms,ages,feh) loggs = self.logg(ms,ages,feh) Teffs = self.Teff(ms,ages,feh) mags = {band:self.mag[band](ms,ages,feh) for band in self.bands} #for band in self.bands: # mags[band] = self.mag[band](ms,ages) if distance is not None: dm = 5*np.log10(distance) - 5 for band in mags: A = AV*EXTINCTION[band] mags[band] = mags[band] + dm + A props = {'M':Ms,'R':Rs,'logL':logLs,'logg':loggs, 'Teff':Teffs,'mag':mags} if not return_df: return props else: d = {} for key in props.keys(): if key=='mag': for m in props['mag'].keys(): d['{}_mag'.format(m)] = props['mag'][m] else: d[key] = props[key] try: df = pd.DataFrame(d) except ValueError: df = pd.DataFrame(d, index=[0]) return df
def isochrone(self,age,feh=0.0,minm=None,maxm=None,dm=0.02, return_df=True,distance=None,AV=0.0)
Returns stellar models at constant age and feh, for a range of masses :param age: log10(age) of desired isochrone. :param feh: (optional) Metallicity of desired isochrone (default = 0.0) :param minm, maxm: (optional) Mass range of desired isochrone (will default to max and min available) :param dm: (optional) Spacing in mass of desired isochrone. Default = 0.02 Msun. :param return_df: (optional) Whether to return a :class:``pandas.DataFrame`` or dictionary. Default is ``True``. :param distance: Distance in pc. If passed, then mags will be converted to apparent mags based on distance (and ``AV``). :param AV: V-band extinction (magnitudes). :return: :class:`pandas.DataFrame` or dictionary containing results.
2.24359
2.309316
0.971539
if minmass is None: minmass = self.minmass if maxmass is None: maxmass = self.maxmass if minage is None: minage = self.minage if maxage is None: maxage = self.maxage if minfeh is None: minfeh = self.minfeh if maxfeh is None: maxfeh = self.maxfeh ms = rand.uniform(minmass,maxmass,size=n) ages = rand.uniform(minage,maxage,size=n) fehs = rand.uniform(minage,maxage,size=n) Rs = self.radius(ms,ages,fehs) bad = np.isnan(Rs) nbad = bad.sum() while nbad > 0: ms[bad] = rand.uniform(minmass,maxmass,size=nbad) ages[bad] = rand.uniform(minage,maxage,size=nbad) fehs[bad] = rand.uniform(minfeh,maxfeh,size=nbad) Rs = self.radius(ms,ages,fehs) bad = np.isnan(Rs) nbad = bad.sum() return ms,ages,fehs
def random_points(self,n,minmass=None,maxmass=None, minage=None,maxage=None, minfeh=None,maxfeh=None)
Returns n random mass, age, feh points, none of which are out of range. :param n: Number of desired points. :param minmass, maxmass: (optional) Desired allowed range. Default is mass range of ``self``. :param minage, maxage: (optional) Desired allowed range. Default is log10(age) range of ``self``. :param minfehs, maxfeh: (optional) Desired allowed range. Default is feh range of ``self``. :return: :class:`np.ndarray` arrays of randomly selected mass, log10(age), and feh values within allowed ranges. Used, e.g., to initialize random walkers for :class:`StarModel` fits. .. todo:: Should change this to drawing from priors! Current implementation is a bit outdated.
1.427784
1.467264
0.973093
m = re.search('([a-zA-Z0-9]+)(_\d+)?', kw) if m: if m.group(1) in cls._not_a_band: return None else: return m.group(1)
def _parse_band(cls, kw)
Returns photometric band from inifile keyword
3.367295
2.90276
1.160032
logging.debug('Building ObservationTree...') tree = ObservationTree() for k,v in kwargs.items(): if k in self.ic.bands: if np.size(v) != 2: logging.warning('{}={} ignored.'.format(k,v)) # continue v = [v, np.nan] o = Observation('', k, 99) #bogus resolution=99 s = Source(v[0], v[1]) o.add_source(s) logging.debug('Adding {} ({})'.format(s,o)) tree.add_observation(o) self.obs = tree
def _build_obs(self, **kwargs)
Builds ObservationTree out of keyword arguments Ignores anything that is not a photometric bandpass. This should not be used if there are multiple stars observed. Creates self.obs
5.806506
4.679349
1.240879
for k,v in kwargs.items(): if k=='parallax': self.obs.add_parallax(v) elif k in ['Teff', 'logg', 'feh', 'density']: par = {k:v} self.obs.add_spectroscopy(**par) elif re.search('_', k): m = re.search('^(\w+)_(\w+)$', k) prop = m.group(1) tag = m.group(2) self.obs.add_spectroscopy(**{prop:v, 'label':'0_{}'.format(tag)})
def _add_properties(self, **kwargs)
Adds non-photometry properties to ObservationTree
3.511225
3.296952
1.064991
if not hasattr(self, '_mnest_basename'): s = self.labelstring if s=='0_0': s = 'single' elif s=='0_0-0_1': s = 'binary' elif s=='0_0-0_1-0_2': s = 'triple' s = '{}-{}'.format(self.ic.name, s) self._mnest_basename = os.path.join('chains', s+'-') if os.path.isabs(self._mnest_basename): return self._mnest_basename else: return os.path.join(self.directory, self._mnest_basename)
def mnest_basename(self)
Full path to basename
3.171166
3.134463
1.011709
if basename is not None: #Should this even be allowed? self.mnest_basename = basename basename = self.mnest_basename if verbose: logging.info('MultiNest basename: {}'.format(basename)) folder = os.path.abspath(os.path.dirname(basename)) if not os.path.exists(folder): os.makedirs(folder) #If previous fit exists, see if it's using the same # observed properties prop_nomatch = False propfile = '{}properties.json'.format(basename) if refit or overwrite: files = glob.glob('{}*'.format(basename)) [os.remove(f) for f in files] short_basename = self._mnest_basename mnest_kwargs = dict(n_live_points=n_live_points, outputfiles_basename=short_basename, verbose=verbose) for k,v in kwargs.items(): mnest_kwargs[k] = v if test: print('pymultinest.run() with the following kwargs: {}'.format(mnest_kwargs)) else: wd = os.getcwd() os.chdir(os.path.join(folder, '..')) pymultinest.run(self.mnest_loglike, self.mnest_prior, self.n_params, **mnest_kwargs) os.chdir(wd) #with open(propfile, 'w') as f: # json.dump(self.properties, f, indent=2) self._make_samples()
def fit_multinest(self, n_live_points=1000, basename=None, verbose=True, refit=False, overwrite=False, test=False, **kwargs)
Fits model using MultiNest, via pymultinest. :param n_live_points: Number of live points to use for MultiNest fit. :param basename: Where the MulitNest-generated files will live. By default this will be in a folder named `chains` in the current working directory. Calling this will define a `_mnest_basename` attribute for this object. :param verbose: Whether you want MultiNest to talk to you. :param refit, overwrite: Set either of these to true if you want to delete the MultiNest files associated with the given basename and start over. :param **kwargs: Additional keyword arguments will be passed to :func:`pymultinest.run`.
3.412596
3.296739
1.035143
def fn(p): return -self.lnpost(p) if 'method' not in kwargs: kwargs['method'] = 'Nelder-Mead' p0 = [0.8, 9.5, 0.0, 200, 0.2] fit = scipy.optimize.minimize(fn, p0, **kwargs) return fit
def maxlike(self, p0, **kwargs)
Finds (local) optimum in parameter space.
3.69307
3.425961
1.077966
#clear any saved _samples if self._samples is not None: self._samples = None npars = self.n_params if p0 is None: p0 = self.emcee_p0(nwalkers) if initial_burn: sampler = emcee.EnsembleSampler(nwalkers,npars,self.lnpost, **kwargs) #ninitial = 300 #should this be parameter? pos, prob, state = sampler.run_mcmc(p0, ninitial) # Choose walker with highest final lnprob to seed new one i,j = np.unravel_index(sampler.lnprobability.argmax(), sampler.shape) p0_best = sampler.chain[i,j,:] print("After initial burn, p0={}".format(p0_best)) p0 = p0_best * (1 + rand.normal(size=p0.shape)*0.001) print(p0) else: p0 = np.array(p0) p0 = rand.normal(size=(nwalkers,npars))*0.01 + p0.T[None,:] sampler = emcee.EnsembleSampler(nwalkers,npars,self.lnpost) pos, prob, state = sampler.run_mcmc(p0, nburn) sampler.reset() sampler.run_mcmc(pos, niter, rstate0=state) self._sampler = sampler return sampler
def fit_mcmc(self,nwalkers=300,nburn=200,niter=100, p0=None,initial_burn=None, ninitial=50, loglike_kwargs=None, **kwargs)
Fits stellar model using MCMC. :param nwalkers: (optional) Number of walkers to pass to :class:`emcee.EnsembleSampler`. Default is 200. :param nburn: (optional) Number of iterations for "burn-in." Default is 100. :param niter: (optional) Number of for-keeps iterations for MCMC chain. Default is 200. :param p0: (optional) Initial parameters for emcee. If not provided, then chains will behave according to whether inital_burn is set. :param initial_burn: (optional) If `True`, then initialize walkers first with a random initialization, then cull the walkers, keeping only those with > 15% acceptance rate, then reinitialize sampling. If `False`, then just do normal burn-in. Default is `None`, which will be set to `True` if fitting for distance (i.e., if there are apparent magnitudes as properties of the model), and `False` if not. :param ninitial: (optional) Number of iterations to test walkers for acceptance rate before re-initializing. :param loglike_args: Any arguments to pass to :func:`StarModel.loglike`, such as what priors to use. :param **kwargs: Additional keyword arguments passed to :class:`emcee.EnsembleSampler` constructor. :return: :class:`emcee.EnsembleSampler` object.
2.958719
3.04471
0.971757
if not hasattr(self,'sampler') and self._samples is None: raise AttributeError('Must run MCMC (or load from file) '+ 'before accessing samples') if self._samples is not None: df = self._samples else: self._make_samples() df = self._samples return df
def samples(self)
Dataframe with samples drawn from isochrone according to posterior Columns include both the sampling parameters from the MCMC fit (mass, age, Fe/H, [distance, A_V]), and also evaluation of the :class:`Isochrone` at each of these sample points---this is how chains of physical/observable parameters get produced.
5.385777
4.842962
1.112083
samples = self.samples inds = rand.randint(len(samples),size=int(n)) newsamples = samples.iloc[inds] newsamples.reset_index(inplace=True) return newsamples
def random_samples(self, n)
Returns a random sampling of given size from the existing samples. :param n: Number of samples :return: :class:`pandas.DataFrame` of length ``n`` with random samples.
4.512217
4.86663
0.927175
tot_mags = [] names = [] truths = [] rng = [] for n in self.obs.get_obs_nodes(): labels = [l.label for l in n.get_model_nodes()] band = n.band mags = [self.samples['{}_mag_{}'.format(band, l)] for l in labels] tot_mag = addmags(*mags) if n.relative: name = '{} $\Delta${}'.format(n.instrument, n.band) ref = n.reference if ref is None: continue ref_labels = [l.label for l in ref.get_model_nodes()] ref_mags = [self.samples['{}_mag_{}'.format(band, l)] for l in ref_labels] tot_ref_mag = addmags(*ref_mags) tot_mags.append(tot_mag - tot_ref_mag) truths.append(n.value[0] - ref.value[0]) else: name = '{} {}'.format(n.instrument, n.band) tot_mags.append(tot_mag) truths.append(n.value[0]) names.append(name) rng.append((min(truths[-1], np.percentile(tot_mags[-1],0.5)), max(truths[-1], np.percentile(tot_mags[-1],99.5)))) tot_mags = np.array(tot_mags).T return corner.corner(tot_mags, labels=names, truths=truths, range=rng, **kwargs)
def corner_observed(self, **kwargs)
Makes corner plot for each observed node magnitude
2.64214
2.563676
1.030606
if os.path.exists(filename): with pd.HDFStore(filename) as store: if path in store: if overwrite: os.remove(filename) elif not append: raise IOError('{} in {} exists. Set either overwrite or append option.'.format(path,filename)) if self.samples is not None: self.samples.to_hdf(filename, path+'/samples') else: pd.DataFrame().to_hdf(filename, path+'/samples') self.obs.save_hdf(filename, path+'/obs', append=True) with pd.HDFStore(filename) as store: # store = pd.HDFStore(filename) attrs = store.get_storer('{}/samples'.format(path)).attrs attrs.ic_type = type(self.ic) attrs.ic_bands = list(self.ic.bands) attrs.use_emcee = self.use_emcee if hasattr(self, '_mnest_basename'): attrs._mnest_basename = self._mnest_basename attrs._bounds = self._bounds attrs._priors = self._priors attrs.name = self.name store.close()
def save_hdf(self, filename, path='', overwrite=False, append=False)
Saves object data to HDF file (only works if MCMC is run) Samples are saved to /samples location under given path, :class:`ObservationTree` is saved to /obs location under given path. :param filename: Name of file to save to. Should be .h5 file. :param path: (optional) Path within HDF file structure to save to. :param overwrite: (optional) If ``True``, delete any existing file by the same name before writing. :param append: (optional) If ``True``, then if a file exists, then just the path within the file will be updated.
3.086534
3.099708
0.99575
if not os.path.exists(filename): raise IOError('{} does not exist.'.format(filename)) store = pd.HDFStore(filename) try: samples = store[path+'/samples'] attrs = store.get_storer(path+'/samples').attrs except: store.close() raise try: ic = attrs.ic_type(attrs.ic_bands) except AttributeError: ic = attrs.ic_type use_emcee = attrs.use_emcee mnest = True try: basename = attrs._mnest_basename except AttributeError: mnest = False bounds = attrs._bounds priors = attrs._priors if name is None: try: name = attrs.name except: name = '' store.close() obs = ObservationTree.load_hdf(filename, path+'/obs', ic=ic) mod = cls(ic, obs=obs, use_emcee=use_emcee, name=name) mod._samples = samples if mnest: mod._mnest_basename = basename mod._directory = os.path.dirname(filename) return mod
def load_hdf(cls, filename, path='', name=None)
A class method to load a saved StarModel from an HDF5 file. File must have been created by a call to :func:`StarModel.save_hdf`. :param filename: H5 file to load. :param path: (optional) Path within HDF file. :return: :class:`StarModel` object.
3.820513
3.840188
0.994876
grids = {} df = pd.DataFrame() for bnd in self.bands: s,b = self.get_band(bnd, **self.kwargs) logging.debug('loading {} band from {}'.format(b,s)) if s not in grids: grids[s] = self.get_hdf(s) if self.common_columns[0] not in df: df[list(self.common_columns)] = grids[s][list(self.common_columns)] col = grids[s][b] n_nan = np.isnan(col).sum() if n_nan > 0: logging.debug('{} NANs in {} column'.format(n_nan, b)) df.loc[:, bnd] = col.values #dunno why it has to be this way; something # funny with indexing. return df
def _get_df(self)
Returns stellar model grid with desired bandpasses and with standard column names bands must be iterable, and are parsed according to :func:``get_band``
5.212175
4.505116
1.156946
if not os.path.exists(cls.master_tarball_file): cls.download_grids() with tarfile.open(os.path.join(ISOCHRONES, cls.master_tarball_file)) as tar: logging.info('Extracting {}...'.format(cls.master_tarball_file)) tar.extractall(ISOCHRONES)
def extract_master_tarball(cls)
Unpack tarball of tarballs
2.897122
2.787698
1.039253
df = pd.concat([self.to_df(f) for f in self.get_filenames(phot)]) return df
def df_all(self, phot)
Subclasses may want to sort this
4.768185
4.322183
1.103189
if path is None: local_filename = os.path.join(directory, url.split('/')[-1]) else: local_filename = path if os.path.exists(local_filename) and not clobber: logging.info('{} exists; not downloading.'.format(local_filename)) return local_filename # NOTE the stream=True parameter r = requests.get(url, stream=True) with open(local_filename, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) #f.flush() commented by recommendation from J.F.Sebastian return local_filename
def download_file(url, path=None, clobber=False)
thanks to: https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py path : str local path to download to.
1.60246
1.596467
1.003754
tot = 0 uncs = [] for mag in mags: try: tot += 10**(-0.4*mag) except: m, dm = mag f = 10**(-0.4*m) tot += f unc = f * (1 - 10**(-0.4*dm)) uncs.append(unc) totmag = -2.5*np.log10(tot) if len(uncs) > 0: f_unc = np.sqrt(np.array([u**2 for u in uncs]).sum()) return totmag, -2.5*np.log10(1 - f_unc/tot) else: return totmag
def addmags(*mags)
mags is either list of magnitudes or list of (mag, err) pairs
2.663997
2.609617
1.020838
r0, pa0 = pos0 #logging.debug('r0={}, pa0={} (from {})'.format(r0, pa0, self)) ra0 = r0*np.sin(pa0*np.pi/180) dec0 = r0*np.cos(pa0*np.pi/180) r1, pa1 = pos1 #logging.debug('r1={}, pa1={} (from {})'.format(r0, pa0, other)) ra1 = r1*np.sin(pa1*np.pi/180) dec1 = r1*np.cos(pa1*np.pi/180) dra = (ra1 - ra0) ddec = (dec1 - dec0) return np.sqrt(dra**2 + ddec**2)
def distance(pos0, pos1)
distance between two positions defined by (separation, PA)
1.925552
1.812343
1.062465
mags = self.get_photometry(brightest=brightest, convert=False) VT, dVT = mags['VT'] BT, dBT = mags['BT'] if (-0.25 < BT - VT < 2.0): (a, b, c, d) = (0.00097, 0.1334, 0.05486, 0.01998) V = (VT + a - b * (BT - VT) + c * (BT - VT)**2 - d * (BT - VT)**3) dVdVT = 1 + b - 2*c*(BT-VT) + 3*d*(BT-VT)**2 dVdBT = -b + 2*c*(BT-VT) - 3*d*(BT-VT)**2 dV = np.sqrt((dVdVT**2 * dVT**2) + (dVdBT**2*dBT**2)) else: raise ValueError('BT-VT outside of range to convert') return V, dV
def V(self, brightest=False)
http://www.aerith.net/astro/color_conversion.html
3.705336
3.691318
1.003798
phot = None # Default to SDSS for these if b in ['u','g','r','i','z']: phot = 'SDSS' band = 'SDSS_{}'.format(b) elif b in ['U','B','V','R','I']: phot = 'UBVRIplus' band = 'Bessell_{}'.format(b) elif b in ['J','H','Ks']: phot = 'UBVRIplus' band = '2MASS_{}'.format(b) elif b=='K': phot = 'UBVRIplus' band = '2MASS_Ks' elif b in ['kep','Kepler','Kp']: phot = 'UBVRIplus' band = 'Kepler_Kp' elif b=='TESS': phot = 'UBVRIplus' band = 'TESS' elif b in ['W1','W2','W3','W4']: phot = 'WISE' band = 'WISE_{}'.format(b) elif b in ('G', 'BP', 'RP'): phot = 'UBVRIplus' band = 'Gaia_{}'.format(b) if 'version' in kwargs: if kwargs['version']=='1.1': band += '_DR2Rev' else: m = re.match('([a-zA-Z]+)_([a-zA-Z_]+)',b) if m: if m.group(1) in cls.phot_systems: phot = m.group(1) if phot=='PanSTARRS': band = 'PS_{}'.format(m.group(2)) else: band = m.group(0) elif m.group(1) in ['UK','UKIRT']: phot = 'UKIDSS' band = 'UKIDSS_{}'.format(m.group(2)) if phot is None: for system, bands in cls.phot_bands.items(): if b in bands: phot = system band = b break if phot is None: raise ValueError('MIST grids cannot resolve band {}!'.format(b)) return phot, band
def get_band(cls, b, **kwargs)
Defines what a "shortcut" band name refers to. Returns phot_system, band
2.68293
2.593271
1.034574
ind = None for i,c in enumerate(self.children): if c.label==label: ind = i if ind is None: logging.warning('No child labeled {}.'.format(label)) return self.children.pop(ind) self._clear_all_leaves()
def remove_child(self, label)
Removes node by label
3.608979
3.692495
0.977382
if self.is_leaf: return [self] if re.search(name, self.label) else [] else: leaves = [] if re.search(name, self.label): for c in self.children: leaves += c._get_leaves() #all leaves else: for c in self.children: leaves += c.select_leaves(name) #only matching ones return leaves
def select_leaves(self, name)
Returns all leaves under all nodes matching name
3.424753
3.13423
1.092693
obs_leaves = [] for n in self: if n.is_leaf: if isinstance(n, ModelNode): l = n.parent else: l = n if l not in obs_leaves: obs_leaves.append(l) return obs_leaves
def get_obs_leaves(self)
Returns the last obs nodes that are leaves
3.472381
3.104983
1.118325
return distance((self.separation, self.pa), (other.separation, other.pa))
def distance(self, other)
Coordinate distance from another ObsNode
7.203133
7.36125
0.97852
if self._Nstars is None: N = {} for n in self.get_model_nodes(): if n.index not in N: N[n.index] = 1 else: N[n.index] += 1 self._Nstars = N return self._Nstars
def Nstars(self)
dictionary of number of stars per system
2.950993
2.766814
1.066567
if type(index) in [list,tuple]: if len(index) != N: raise ValueError('If a list, index must be of length N.') else: index = [index]*N for idx in index: existing = self.get_system(idx) tag = len(existing) self.add_child(ModelNode(ic, index=idx, tag=tag))
def add_model(self, ic, N=1, index=0)
Should only be able to do this to a leaf node. Either N and index both integers OR index is list of length=N
4.99947
4.347126
1.150063
if pardict == self._cache_key and use_cache: #print('{}: using cached'.format(self)) return self._cache_val #print('{}: calculating'.format(self)) self._cache_key = pardict # Generate appropriate parameter vector from dictionary p = [] for l in self.leaf_labels: p.extend(pardict[l]) assert len(p) == self.n_params tot = np.inf #print('Building {} mag for {}:'.format(self.band, self)) for i,m in enumerate(self.leaves): mag = m.evaluate(p[i*5:(i+1)*5], self.band) # logging.debug('{}: mag={}'.format(self,mag)) #print('{}: {}({}) = {}'.format(m,self.band,p[i*5:(i+1)*5],mag)) tot = addmags(tot, mag) self._cache_val = tot return tot
def model_mag(self, pardict, use_cache=True)
pardict is a dictionary of parameters for all leaves gets converted back to traditional parameter vector
4.186612
4.051898
1.033247
mag, dmag = self.value if np.isnan(dmag): return 0 if self.relative: # If this *is* the reference, just return if self.reference is None: return 0 mod = (self.model_mag(pardict, use_cache=use_cache) - self.reference.model_mag(pardict, use_cache=use_cache)) mag -= self.reference.value[0] else: mod = self.model_mag(pardict, use_cache=use_cache) lnl = -0.5*(mag - mod)**2 / dmag**2 # logging.debug('{} {}: mag={}, mod={}, lnlike={}'.format(self.instrument, # self.band, # mag,mod,lnl)) return lnl
def lnlike(self, pardict, use_cache=True)
returns log-likelihood of this observation pardict is a dictionary of parameters for all leaves gets converted back to traditional parameter vector
3.462255
3.612713
0.958353
if ic is None: ic = get_ichrone('mist') if len(stars) > 2: raise NotImplementedError('No support yet for > 2 synthetic stars') mags = [ic(*s.pars)['{}_mag'.format(self.band)].values[0] for s in stars] d = stars[0].distance(stars[1]) if d < self.resolution: mag = addmags(*mags) + unc*np.random.randn() sources = [Source(mag, unc, stars[0].separation, stars[0].pa, relative=self.relative)] else: mags = np.array([m + unc*np.random.randn() for m in mags]) if self.relative: mags -= mags.min() sources = [Source(m, unc, s.separation, s.pa, relative=self.relative) for m,s in zip(mags, stars)] for s in sources: self.add_source(s) self._set_reference()
def observe(self, stars, unc, ic=None)
Creates and adds appropriate synthetic Source objects for list of stars (max 2 for now)
4.110995
3.865771
1.063435
if not type(source)==Source: raise TypeError('Can only add Source object.') if len(self.sources)==0: self.sources.append(source) else: ind = 0 for s in self.sources: # Keep sorted order of separation if source.separation < s.separation: break ind += 1 self.sources.insert(ind, source)
def add_source(self, source)
Adds source to observation, keeping sorted order (in separation)
3.380056
2.594726
1.302664
tree = cls(**kwargs) for (n,b), g in df.groupby(['name','band']): #g.sort('separation', inplace=True) #ensures that the first is reference sources = [Source(**s[['mag','e_mag','separation','pa','relative']]) for _,s in g.iterrows()] obs = Observation(n, b, g.resolution.mean(), sources=sources, relative=g.relative.any()) tree.add_observation(obs) # For all relative mags, set reference to be brightest return tree
def from_df(cls, df, **kwargs)
DataFrame must have the right columns. these are: name, band, resolution, mag, e_mag, separation, pa
10.485391
7.584102
1.382549
df = pd.DataFrame() name = [] band = [] resolution = [] mag = [] e_mag = [] separation = [] pa = [] relative = [] for o in self._observations: for s in o.sources: name.append(o.name) band.append(o.band) resolution.append(o.resolution) mag.append(s.mag) e_mag.append(s.e_mag) separation.append(s.separation) pa.append(s.pa) relative.append(s.relative) return pd.DataFrame({'name':name,'band':band,'resolution':resolution, 'mag':mag,'e_mag':e_mag,'separation':separation, 'pa':pa,'relative':relative})
def to_df(self)
Returns DataFrame with photometry from observations organized. This DataFrame should be able to be read back in to reconstruct the observation.
2.080841
1.995416
1.042811
if os.path.exists(filename): store = pd.HDFStore(filename) if path in store: store.close() if overwrite: os.remove(filename) elif not append: raise IOError('{} in {} exists. Set either overwrite or append option.'.format(path,filename)) else: store.close() df = self.to_df() df.to_hdf(filename, path+'/df') with pd.HDFStore(filename) as store: # store = pd.HDFStore(filename) attrs = store.get_storer(path+'/df').attrs attrs.spectroscopy = self.spectroscopy attrs.parallax = self.parallax attrs.N = self._N attrs.index = self._index store.close()
def save_hdf(self, filename, path='', overwrite=False, append=False)
Writes all info necessary to recreate object to HDF file Saves table of photometry in DataFrame Saves model specification, spectroscopy, parallax to attrs
3.008923
2.732792
1.101044
store = pd.HDFStore(filename) try: samples = store[path+'/df'] attrs = store.get_storer(path+'/df').attrs except: store.close() raise df = store[path+'/df'] new = cls.from_df(df) if ic is None: ic = get_ichrone('mist') new.define_models(ic, N=attrs.N, index=attrs.index) new.spectroscopy = attrs.spectroscopy new.parallax = attrs.parallax store.close() return new
def load_hdf(cls, filename, path='', ic=None)
Loads stored ObservationTree from file. You can provide the isochrone to use; or it will default to MIST TODO: saving and loading must be fixed! save ic type, bands, etc.
4.493838
4.456419
1.008397
if len(self._observations)==0: self._observations.append(obs) else: res = obs.resolution ind = 0 for o in self._observations: if res > o.resolution: break ind += 1 self._observations.insert(ind, obs) self._build_tree() self._clear_cache()
def add_observation(self, obs)
Adds an observation to observation list, keeping proper order
3.107959
2.990376
1.039321
if label not in self.leaf_labels: raise ValueError('No model node named {} (must be in {}). Maybe define models first?'.format(label, self.leaf_labels)) for k,v in props.items(): if k not in self.spec_props: raise ValueError('Illegal property {} (only {} allowed).'.format(k, self.spec_props)) if len(v) != 2: raise ValueError('Must provide (value, uncertainty) for {}.'.format(k)) if label not in self.spectroscopy: self.spectroscopy[label] = {} for k,v in props.items(): self.spectroscopy[label][k] = v self._clear_cache()
def add_spectroscopy(self, label='0_0', **props)
Adds spectroscopic measurement to particular star(s) (corresponding to individual model node) Default 0_0 should be primary star legal inputs are 'Teff', 'logg', 'feh', and in form (val, err)
3.247815
3.024774
1.073738
if label not in self.leaf_labels: raise ValueError('No model node named {} (must be in {}). Maybe define models first?'.format(label, self.leaf_labels)) for k,v in props.items(): if k not in self.spec_props: raise ValueError('Illegal property {} (only {} allowed).'.format(k, self.spec_props)) if len(v) != 2: raise ValueError('Must provide (min, max) for {}. (`None` is allowed value)'.format(k)) if label not in self.limits: self.limits[label] = {} for k,v in props.items(): vmin, vmax = v if vmin is None: vmin = -np.inf if vmax is None: vmax = np.inf self.limits[label][k] = (vmin, vmax) self._clear_cache()
def add_limit(self, label='0_0', **props)
Define limits to spectroscopic property of particular stars. Usually will be used for 'logg', but 'Teff' and 'feh' will also work. In form (min, max): e.g., t.add_limit(logg=(3.0,None)) None will be converted to (-)np.inf
3.264007
3.133533
1.041638
self.clear_models() if leaves is None: leaves = self._get_leaves() elif type(leaves)==type(''): leaves = self.select_leaves(leaves) # Sort leaves by distance, to ensure system 0 will be assigned # to the main reference star. if np.isscalar(N): N = (np.ones(len(leaves))*N) #if np.size(index) > 1: # index = [index] N = np.array(N).astype(int) if np.isscalar(index): index = (np.ones_like(N)*index) index = np.array(index).astype(int) # Add the appropriate number of model nodes to each # star in the highest-resoluion image for s,n,i in zip(leaves, N, index): # Remove any previous model nodes (should do some checks here?) s.remove_children() s.add_model(ic, n, i) # For each system, make sure tag _0 is the brightest. self._fix_labels() self._N = N self._index = index self._clear_all_leaves()
def define_models(self, ic, leaves=None, N=1, index=0)
N, index are either integers or lists of integers. N : number of model stars per observed star index : index of physical association leaves: either a list of leaves, or a pattern by which the leaves are selected (via `select_leaves`) If these are lists, then they are defined individually for each leaf. If `index` is a list, then each entry must be either an integer or a list of length `N` (where `N` is the corresponding entry in the `N` list.) This bugs up if you call it multiple times. If you want to re-do a call to this function, please re-define the tree.
5.747342
5.357908
1.072684
for s in self.systems: mag0 = np.inf n0 = None for n in self.get_system(s): if isinstance(n.parent, DummyObsNode): continue mag, _ = n.parent.value if mag < mag0: mag0 = mag n0 = n # If brightest is not tag _0, then switch them. if n0 is not None and n0.tag != 0: n_other = self.get_leaf('{}_{}'.format(s,0)) n_other.tag = n0.tag n0.tag = 0
def _fix_labels(self)
For each system, make sure tag _0 is the brightest, and make sure system 0 contains the brightest star in the highest-resolution image
6.581632
4.819067
1.365748
return [n for n in self.get_obs_nodes() if n.obsname==name]
def select_observations(self, name)
Returns nodes whose instrument-band matches 'name'
7.287307
5.765885
1.263866
# Only allow leaves to stay on list (highest-resolution) level return for l in self._levels[-2::-1]: for n in l: if n.is_leaf: n.parent.remove_child(n.label) self._clear_all_leaves()
def trim(self)
Trims leaves from tree that are not observed at highest-resolution level This is a bit hacky-- what it does is
13.167135
9.829171
1.339598
d = {} N = self.Nstars i = 0 for s in self.systems: age, feh, dist, AV = p[i+N[s]:i+N[s]+4] for j in xrange(N[s]): l = '{}_{}'.format(s,j) mass = p[i+j] d[l] = [mass, age, feh, dist, AV] i += N[s] + 4 return d
def p2pardict(self, p)
Given leaf labels, turns parameter vector into pardict
4.331603
4.055231
1.068152
if use_cache and self._cache_key is not None and np.all(p==self._cache_key): return self._cache_val self._cache_key = p pardict = self.p2pardict(p) # lnlike from photometry lnl = 0 for n in self: if n is not self: lnl += n.lnlike(pardict, use_cache=use_cache) if not np.isfinite(lnl): self._cache_val = -np.inf return -np.inf # lnlike from spectroscopy for l in self.spectroscopy: for prop,(val,err) in self.spectroscopy[l].items(): mod = self.get_leaf(l).evaluate(pardict[l], prop) lnl += -0.5*(val - mod)**2/err**2 if not np.isfinite(lnl): self._cache_val = -np.inf return -np.inf # enforce limits for l in self.limits: for prop,(vmin,vmax) in self.limits[l].items(): mod = self.get_leaf(l).evaluate(pardict[l], prop) if mod < vmin or mod > vmax or not np.isfinite(mod): self._cache_val = -np.inf return -np.inf # lnlike from parallax for s,(val,err) in self.parallax.items(): dist = pardict['{}_0'.format(s)][3] mod = 1./dist * 1000. lnl += -0.5*(val-mod)**2/err**2 if not np.isfinite(lnl): self._cache_val = -np.inf return -np.inf self._cache_val = lnl return lnl
def lnlike(self, p, use_cache=True)
takes parameter vector, constructs pardict, returns sum of lnlikes of non-leaf nodes
2.521445
2.410589
1.045987
dmin = np.inf nclose = None ds = [] nodes = [] ds.append(np.inf) nodes.append(self) for n in self: if n is n0: continue try: if n._in_same_observation(n0): continue ds.append(n.distance(n0)) nodes.append(n) except AttributeError: pass inds = np.argsort(ds) ds = [ds[i] for i in inds] nodes = [nodes[i] for i in inds] for d,n in zip(ds, nodes): try: if d < n.resolution or n.resolution==-1: return n except AttributeError: pass # If nothing else works return self
def _find_closest(self, n0)
returns the node in the tree that is closest to n0, but not in the same observation
3.314106
2.940618
1.12701
if q < qmin or q > 1: return 0 C = 1/(1/(gamma+1)*(1 - qmin**(gamma+1))) return C*q**gamma
def q_prior(q, m=1, gamma=0.3, qmin=0.1)
Default prior on mass ratio q ~ q^gamma
5.636202
5.650561
0.997459
fehdist= 0.8/0.15*np.exp(-0.5*(feh-0.016)**2./0.15**2.)\ +0.2/0.22*np.exp(-0.5*(feh+0.15)**2./0.22**2.) return fehdist
def local_fehdist(feh)
feh PDF based on local SDSS distribution From Jo Bovy: https://github.com/jobovy/apogee/blob/master/apogee/util/__init__.py#L3 2D gaussian fit based on Casagrande (2011)
3.151578
3.335711
0.9448
if not os.path.isabs(ini_file): ini_file = os.path.join(folder,ini_file) config = ConfigObj(ini_file) kwargs = {} for kw in config.keys(): try: kwargs[kw] = float(config[kw]) except: kwargs[kw] = (float(config[kw][0]), float(config[kw][1])) return cls(ic, **kwargs)
def from_ini(cls, ic, folder='.', ini_file='star.ini')
Initialize a StarModel from a .ini file File should contain all arguments with which to initialize StarModel.
2.265012
2.378402
0.952325
remove = [] for p in self.properties.keys(): if not hasattr(self.ic, p) and \ p not in self.ic.bands and p not in ['parallax','feh','age','mass_B','mass_C'] and \ not re.search('delta_',p): remove.append(p) for p in remove: del self.properties[p] if len(remove) > 0: logging.warning('Properties removed from Model because ' + 'not present in {}: {}'.format(type(self.ic),remove)) remove = [] for p in self.properties.keys(): try: val = self.properties[p][0] if not np.isfinite(val): remove.append(p) except: pass for p in remove: del self.properties[p] if len(remove) > 0: logging.warning('Properties removed from Model because ' + 'value is nan or inf: {}'.format(remove)) self._props_cleaned = True
def _clean_props(self)
Makes sure all properties are legit for isochrone. Not done in __init__ in order to save speed on loading.
3.349248
3.207736
1.044116
for kw,val in kwargs.iteritems(): self.properties[kw] = val
def add_props(self,**kwargs)
Adds observable properties to ``self.properties``.
4.420141
3.956718
1.117123
for arg in args: if arg in self.properties: del self.properties[arg]
def remove_props(self,*args)
Removes desired properties from ``self.properties``.
3.349071
2.589317
1.293419
for prop in self.properties.keys(): if prop in self.ic.bands: return True return False
def fit_for_distance(self)
``True`` if any of the properties are apparent magnitudes.
13.510265
7.927742
1.704176
if not self._props_cleaned: self._clean_props() if not self.use_emcee: fit_for_distance = True mass, age, feh, dist, AV = (p[0], p[1], p[2], p[3], p[4]) else: if len(p)==5: fit_for_distance = True mass,age,feh,dist,AV = p elif len(p)==3: fit_for_distance = False mass,age,feh = p if mass < self.ic.minmass or mass > self.ic.maxmass \ or age < self.ic.minage or age > self.ic.maxage \ or feh < self.ic.minfeh or feh > self.ic.maxfeh: return -np.inf if fit_for_distance: if dist < 0 or AV < 0 or dist > self.max_distance: return -np.inf if AV > self.maxAV: return -np.inf if self.min_logg is not None: logg = self.ic.logg(mass,age,feh) if logg < self.min_logg: return -np.inf logl = 0 for prop in self.properties.keys(): try: val,err = self.properties[prop] except TypeError: #property not appropriate for fitting (e.g. no error provided) continue if prop in self.ic.bands: if not fit_for_distance: raise ValueError('must fit for mass, age, feh, dist, A_V if apparent magnitudes provided.') mod = self.ic.mag[prop](mass,age,feh) + 5*np.log10(dist) - 5 A = AV*EXTINCTION[prop] mod += A elif re.search('delta_',prop): continue elif prop=='feh': mod = feh elif prop=='parallax': mod = 1./dist * 1000 else: mod = getattr(self.ic,prop)(mass,age,feh) logl += -(val-mod)**2/(2*err**2) + np.log(1/(err*np.sqrt(2*np.pi))) if np.isnan(logl): logl = -np.inf return logl
def lnlike(self, p)
Log-likelihood of model at given parameters :param p: mass, log10(age), feh, [distance, A_V (extinction)]. Final two should only be provided if ``self.fit_for_distance`` is ``True``; that is, apparent magnitudes are provided. :return: log-likelihood. Will be -np.inf if values out of range.
2.878411
2.669956
1.078074
mass_prior = salpeter_prior(mass) if mass_prior==0: mass_lnprior = -np.inf else: mass_lnprior = np.log(mass_prior) if np.isnan(mass_lnprior): logging.warning('mass prior is nan at {}'.format(mass)) age_lnprior = np.log(age * (2/(self.ic.maxage**2-self.ic.minage**2))) if np.isnan(age_lnprior): logging.warning('age prior is nan at {}'.format(age)) if use_local_fehprior: fehdist = local_fehdist(feh) else: fehdist = 1/(self.ic.maxfeh - self.ic.minfeh) feh_lnprior = np.log(fehdist) if np.isnan(feh_lnprior): logging.warning('feh prior is nan at {}'.format(feh)) if distance is not None: if distance <= 0: distance_lnprior = -np.inf else: distance_lnprior = np.log(3/self.max_distance**3 * distance**2) else: distance_lnprior = 0 if np.isnan(distance_lnprior): logging.warning('distance prior is nan at {}'.format(distance)) if AV is not None: AV_lnprior = np.log(1/self.maxAV) else: AV_lnprior = 0 if np.isnan(AV_lnprior): logging.warning('AV prior is nan at {}'.format(AV)) lnprior = (mass_lnprior + age_lnprior + feh_lnprior + distance_lnprior + AV_lnprior) return lnprior
def lnprior(self, mass, age, feh, distance=None, AV=None, use_local_fehprior=True)
log-prior for model parameters
1.95473
1.97399
0.990243
m0,age0,feh0 = self.ic.random_points(nseeds) d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nseeds)) AV0 = rand.uniform(0,self.maxAV,size=nseeds) costs = np.zeros(nseeds) if self.fit_for_distance: pfits = np.zeros((nseeds,5)) else: pfits = np.zeros((nseeds,3)) def fn(p): #fmin is a function *minimizer* return -1*self.lnpost(p) for i,m,age,feh,d,AV in zip(range(nseeds), m0,age0,feh0,d0,AV0): if self.fit_for_distance: pfit = scipy.optimize.fmin(fn,[m,age,feh,d,AV],disp=False) else: pfit = scipy.optimize.fmin(fn,[m,age,feh],disp=False) pfits[i,:] = pfit costs[i] = self.lnpost(pfit) return pfits[np.argmax(costs),:]
def maxlike(self,nseeds=50)
Returns the best-fit parameters, choosing the best of multiple starting guesses :param nseeds: (optional) Number of starting guesses, uniformly distributed throughout allowed ranges. Default=50. :return: list of best-fit parameters: ``[m,age,feh,[distance,A_V]]``. Note that distance and A_V values will be meaningless unless magnitudes are present in ``self.properties``.
3.13038
2.880701
1.086673
if self.use_emcee: if 'basename' in kwargs: del kwargs['basename'] if 'verbose' in kwargs: del kwargs['verbose'] if 'overwrite' in kwargs: del kwargs['overwrite'] self.fit_mcmc(**kwargs) else: self.fit_multinest(**kwargs)
def fit(self, **kwargs)
Wrapper for either :func:`fit_multinest` or :func:`fit_mcmc`. Default will be to use MultiNest; set `use_emcee` keyword to `True` if you want to use MCMC, or just call :func:`fit_mcmc` directly.
3.025183
2.29738
1.316797
folder = os.path.abspath(os.path.dirname(basename)) if not os.path.exists(folder): os.makedirs(folder) #If previous fit exists, see if it's using the same # observed properties prop_nomatch = False propfile = '{}properties.json'.format(basename) if os.path.exists(propfile): with open(propfile) as f: props = json.load(f) if set(props.keys()) != set(self.properties.keys()): prop_nomatch = True else: for k,v in props.items(): if np.size(v)==2: if not self.properties[k][0] == v[0] and \ self.properties[k][1] == v[1]: props_nomatch = True else: if not self.properties[k] == v: props_nomatch = True if prop_nomatch and not overwrite: raise ValueError('Properties not same as saved chains ' + '(basename {}*). '.format(basename) + 'Use overwrite=True to fit.') if refit or overwrite: files = glob.glob('{}*'.format(basename)) [os.remove(f) for f in files] self._mnest_basename = basename pymultinest.run(self.mnest_loglike, self.mnest_prior, self.n_params, n_live_points=n_live_points, outputfiles_basename=basename, verbose=verbose, **kwargs) with open(propfile, 'w') as f: json.dump(self.properties, f, indent=2) self._make_samples()
def fit_multinest(self, n_live_points=1000, basename='chains/single-', verbose=True, refit=False, overwrite=False, **kwargs)
Fits model using MultiNest, via pymultinest. :param n_live_points: Number of live points to use for MultiNest fit. :param basename: Where the MulitNest-generated files will live. By default this will be in a folder named `chains` in the current working directory. Calling this will define a `_mnest_basename` attribute for this object. :param verbose: Whether you want MultiNest to talk to you. :param refit, overwrite: Set either of these to true if you want to delete the MultiNest files associated with the given basename and start over. :param **kwargs: Additional keyword arguments will be passed to :func:`pymultinest.run`.
2.777413
2.776487
1.000334
if self.fit_for_distance: fig1 = self.triangle(plot_datapoints=False, params=['mass','radius','Teff','logg','feh','age', 'distance','AV'], **kwargs) else: fig1 = self.triangle(plot_datapoints=False, params=['mass','radius','Teff','feh','age'], **kwargs) if basename is not None: plt.savefig('{}_physical.{}'.format(basename,format)) plt.close() fig2 = self.prop_triangle(**kwargs) if basename is not None: plt.savefig('{}_observed.{}'.format(basename,format)) plt.close() return fig1, fig2
def triangle_plots(self, basename=None, format='png', **kwargs)
Returns two triangle plots, one with physical params, one observational :param basename: If basename is provided, then plots will be saved as "[basename]_physical.[format]" and "[basename]_observed.[format]" :param format: Format in which to save figures (e.g., 'png' or 'pdf') :param **kwargs: Additional keyword arguments passed to :func:`StarModel.triangle` and :func:`StarModel.prop_triangle` :return: * Physical parameters triangle plot (mass, radius, Teff, feh, age, distance) * Observed properties triangle plot.
3.333576
2.351965
1.417358
if triangle is None: raise ImportError('please run "pip install triangle_plot".') if params is None: if self.fit_for_distance: params = ['mass', 'age', 'feh', 'distance', 'AV'] else: params = ['mass', 'age', 'feh'] df = self.samples if query is not None: df = df.query(query) #convert extent to ranges, but making sure # that truths are in range. extents = [] remove = [] for i,par in enumerate(params): m = re.search('delta_(\w+)$',par) if m: if type(self) == BinaryStarModel: b = m.group(1) values = (df['{}_mag_B'.format(b)] - df['{}_mag_A'.format(b)]) df[par] = values else: remove.append(i) continue else: values = df[par] qs = np.array([0.5 - 0.5*extent, 0.5 + 0.5*extent]) minval, maxval = values.quantile(qs) if 'truths' in kwargs: datarange = maxval - minval if kwargs['truths'][i] < minval: minval = kwargs['truths'][i] - 0.05*datarange if kwargs['truths'][i] > maxval: maxval = kwargs['truths'][i] + 0.05*datarange extents.append((minval,maxval)) [params.pop(i) for i in remove] fig = triangle.corner(df[params], labels=params, extents=extents, **kwargs) fig.suptitle(self.name, fontsize=22) return fig
def triangle(self, params=None, query=None, extent=0.999, **kwargs)
Makes a nifty corner plot. Uses :func:`triangle.corner`. :param params: (optional) Names of columns (from :attr:`StarModel.samples`) to plot. If ``None``, then it will plot samples of the parameters used in the MCMC fit-- that is, mass, age, [Fe/H], and optionally distance and A_V. :param query: (optional) Optional query on samples. :param extent: (optional) Will be appropriately passed to :func:`triangle.corner`. :param **kwargs: Additional keyword arguments passed to :func:`triangle.corner`. :return: Figure oject containing corner plot.
3.398483
3.217214
1.056343
truths = [] params = [] for p in self.properties: try: val, err = self.properties[p] except: continue if p in self.ic.bands: params.append('{}_mag'.format(p)) truths.append(val) elif p=='parallax': params.append('distance') truths.append(1/(val/1000.)) else: params.append(p) truths.append(val) return self.triangle(params, truths=truths, **kwargs)
def prop_triangle(self, **kwargs)
Makes corner plot of only observable properties. The idea here is to compare the predictions of the samples with the actual observed data---this can be a quick way to check if there are outlier properties that aren't predicted well by the model. :param **kwargs: Keyword arguments passed to :func:`StarModel.triangle`. :return: Figure object containing corner plot.
3.670277
3.579846
1.025261
samples = self.samples[prop].values if return_values: sorted = np.sort(samples) med = np.median(samples) n = len(samples) lo_ind = int(n*(0.5 - conf/2)) hi_ind = int(n*(0.5 + conf/2)) lo = med - sorted[lo_ind] hi = sorted[hi_ind] - med return samples, (med,lo,hi) else: return samples
def prop_samples(self,prop,return_values=True,conf=0.683)
Returns samples of given property, based on MCMC sampling :param prop: Name of desired property. Must be column of ``self.samples``. :param return_values: (optional) If ``True`` (default), then also return (median, lo_err, hi_err) corresponding to desired credible interval. :param conf: (optional) Desired quantile for credible interval. Default = 0.683. :return: :class:`np.ndarray` of desired samples :return: Optionally also return summary statistics (median, lo_err, hi_err), if ``returns_values == True`` (this is default behavior)
2.641194
2.509506
1.052476
setfig(fig) samples,stats = self.prop_samples(prop) fig = plt.hist(samples,bins=bins,normed=True, histtype=histtype,lw=lw,**kwargs) plt.xlabel(prop) plt.ylabel('Normalized count') if label: med,lo,hi = stats plt.annotate('$%.2f^{+%.2f}_{-%.2f}$' % (med,hi,lo), xy=(0.7,0.8),xycoords='axes fraction',fontsize=20) return fig
def plot_samples(self,prop,fig=None,label=True, histtype='step',bins=50,lw=3, **kwargs)
Plots histogram of samples of desired property. :param prop: Desired property (must be legit column of samples) :param fig: Argument for :func:`plotutils.setfig` (``None`` or int). :param histtype, bins, lw: Passed to :func:`plt.hist`. :param **kwargs: Additional keyword arguments passed to `plt.hist` :return: Figure object.
3.149507
3.352245
0.939522
if os.path.exists(filename): store = pd.HDFStore(filename) if path in store: store.close() if overwrite: os.remove(filename) elif not append: raise IOError('{} in {} exists. Set either overwrite or append option.'.format(path,filename)) else: store.close() self.samples.to_hdf(filename, '{}/samples'.format(path)) store = pd.HDFStore(filename) attrs = store.get_storer('{}/samples'.format(path)).attrs attrs.properties = self.properties attrs.ic_type = type(self.ic) attrs.maxAV = self.maxAV attrs.max_distance = self.max_distance attrs.min_logg = self.min_logg attrs.use_emcee = self.use_emcee attrs._mnest_basename = self._mnest_basename attrs.name = self.name store.close()
def save_hdf(self, filename, path='', overwrite=False, append=False)
Saves object data to HDF file (only works if MCMC is run) Samples are saved to /samples location under given path, and object properties are also attached, so suitable for re-loading via :func:`StarModel.load_hdf`. :param filename: Name of file to save to. Should be .h5 file. :param path: (optional) Path within HDF file structure to save to. :param overwrite: (optional) If ``True``, delete any existing file by the same name before writing. :param append: (optional) If ``True``, then if a file exists, then just the path within the file will be updated.
3.491865
3.244973
1.076085
store = pd.HDFStore(filename) try: samples = store['{}/samples'.format(path)] attrs = store.get_storer('{}/samples'.format(path)).attrs except: store.close() raise properties = attrs.properties maxAV = attrs.maxAV max_distance = attrs.max_distance min_logg = attrs.min_logg ic_type = attrs.ic_type use_emcee = attrs.use_emcee basename = attrs._mnest_basename if name is None: try: name = attrs.name except: name = '' store.close() #ic = ic_type() don't need to initialize anymore mod = cls(ic_type, maxAV=maxAV, max_distance=max_distance, use_emcee=use_emcee, name=name, **properties) mod._samples = samples mod._mnest_basename = basename return mod
def load_hdf(cls, filename, path='', name=None)
A class method to load a saved StarModel from an HDF5 file. File must have been created by a call to :func:`StarModel.save_hdf`. :param filename: H5 file to load. :param path: (optional) Path within HDF file. :return: :class:`StarModel` object.
4.324292
4.398523
0.983124
mA_0,age0,feh0 = self.ic.random_points(nseeds) mB_0,foo1,foo2 = self.ic.random_points(nseeds) mA_fixed = np.maximum(mA_0,mB_0) mB_fixed = np.minimum(mA_0,mB_0) mA_0, mB_0 = (mA_fixed, mB_fixed) d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nseeds)) AV0 = rand.uniform(0,self.maxAV,size=nseeds) costs = np.zeros(nseeds) if self.fit_for_distance: pfits = np.zeros((nseeds,6)) else: pfits = np.zeros((nseeds,4)) def fn(p): #fmin is a function *minimizer* return -1*self.lnpost(p) for i,mA,mB,age,feh,d,AV in zip(range(nseeds), mA_0,mB_0,age0,feh0,d0,AV0): if self.fit_for_distance: pfit = scipy.optimize.fmin(fn,[mA,mB,age,feh,d,AV],disp=False) else: pfit = scipy.optimize.fmin(fn,[mA,mB,age,feh],disp=False) pfits[i,:] = pfit costs[i] = self.lnpost(pfit) return pfits[np.argmax(costs),:]
def maxlike(self,nseeds=50)
Returns the best-fit parameters, choosing the best of multiple starting guesses :param nseeds: (optional) Number of starting guesses, uniformly distributed throughout allowed ranges. Default=50. :return: list of best-fit parameters: ``[mA,mB,age,feh,[distance,A_V]]``. Note that distance and A_V values will be meaningless unless magnitudes are present in ``self.properties``.
3.157655
2.878161
1.097108
if params is None: params = ['mass_A', 'mass_B', 'age', 'feh', 'distance', 'AV'] super(BinaryStarModel, self).triangle(params=params, **kwargs)
def triangle(self, params=None, **kwargs)
Makes a nifty corner plot. Uses :func:`triangle.corner`. :param params: (optional) Names of columns (from :attr:`StarModel.samples`) to plot. If ``None``, then it will plot samples of the parameters used in the MCMC fit-- that is, mass, age, [Fe/H], and optionally distance and A_V. :param query: (optional) Optional query on samples. :param extent: (optional) Will be appropriately passed to :func:`triangle.corner`. :param **kwargs: Additional keyword arguments passed to :func:`triangle.corner`. :return: Figure oject containing corner plot.
6.832888
5.344519
1.278485
mA_0,age0,feh0 = self.ic.random_points(nseeds) mB_0,foo1,foo2 = self.ic.random_points(nseeds) mC_0,foo3,foo4 = self.ic.random_points(nseeds) m_all = np.sort(np.array([mA_0, mB_0, mC_0]), axis=0) mA_0, mB_0, mC_0 = (m_all[0,:], m_all[1,:], m_all[2,:]) d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nseeds)) AV0 = rand.uniform(0,self.maxAV,size=nseeds) costs = np.zeros(nseeds) if self.fit_for_distance: pfits = np.zeros((nseeds,7)) else: pfits = np.zeros((nseeds,5)) def fn(p): #fmin is a function *minimizer* return -1*self.lnpost(p) for i,mA,mB,mC,age,feh,d,AV in zip(range(nseeds), mA_0,mB_0,mC_0,age0,feh0,d0,AV0): if self.fit_for_distance: pfit = scipy.optimize.fmin(fn,[mA,mB,mC,age,feh,d,AV],disp=False) else: pfit = scipy.optimize.fmin(fn,[mA,mB,mC,age,feh],disp=False) pfits[i,:] = pfit costs[i] = self.lnpost(pfit) return pfits[np.argmax(costs),:]
def maxlike(self,nseeds=50)
Returns the best-fit parameters, choosing the best of multiple starting guesses :param nseeds: (optional) Number of starting guesses, uniformly distributed throughout allowed ranges. Default=50. :return: list of best-fit parameters: ``[mA,mB,age,feh,[distance,A_V]]``. Note that distance and A_V values will be meaningless unless magnitudes are present in ``self.properties``.
2.809123
2.644964
1.062065
if params is None: params = ['mass_A', 'mass_B', 'mass_C', 'age', 'feh', 'distance', 'AV'] super(TripleStarModel, self).triangle(params=params, **kwargs)
def triangle(self, params=None, **kwargs)
Makes a nifty corner plot.
5.94734
5.480618
1.085159
phot = None # Default to SDSS for these if b in ['u','g','r','i','z']: phot = 'SDSSugriz' band = 'sdss_{}'.format(b) elif b in ['U','B','V','R','I','J','H','Ks']: phot = 'UBVRIJHKsKp' band = b elif b=='K': phot = 'UBVRIJHKsKp' band = 'Ks' elif b in ['kep','Kepler','Kp']: phot = 'UBVRIJHKsKp' band = 'Kp' elif b in ['W1','W2','W3','W4']: phot = 'WISE' band = b elif re.match('uvf', b) or re.match('irf', b): phot = 'HST_WFC3' band = b else: m = re.match('([a-zA-Z]+)_([a-zA-Z_]+)',b) if m: if m.group(1) in cls.phot_systems: phot = m.group(1) if phot=='LSST': band = b else: band = m.group(2) elif m.group(1) in ['UK','UKIRT']: phot = 'UKIDSS' band = m.group(2) if phot is None: raise ValueError('Dartmouth Models cannot resolve band {}!'.format(b)) return phot, band
def get_band(cls, b, **kwargs)
Defines what a "shortcut" band name refers to.
2.989383
2.954572
1.011782
L = 0 R = N-1 done = False m = (L+R)//2 while not done: if arr[m] < x: L = m + 1 elif arr[m] > x: R = m - 1 elif arr[m] == x: done = True m = (L+R)//2 if L>R: done = True return L
def searchsorted(arr, N, x)
N is length of arr
1.887997
1.91397
0.986429
coords = SkyCoord(ra,dec,unit='deg',frame=frame).transform_to('icrs') rah,ram,ras = coords.ra.hms decd,decm,decs = coords.dec.dms if decd > 0: decsign = '%2B' else: decsign = '%2D' url = 'http://ned.ipac.caltech.edu/cgi-bin/nph-calc?in_csys=Equatorial&in_equinox=J2000.0&obs_epoch=2010&lon='+'%i' % rah + \ '%3A'+'%i' % ram + '%3A' + '%05.2f' % ras + '&lat=%s' % decsign + '%i' % abs(decd) + '%3A' + '%i' % abs(decm) + '%3A' + '%05.2f' % abs(decs) + \ '&pa=0.0&out_csys=Equatorial&out_equinox=J2000.0' AV = None for line in urllib.request.urlopen(url).readlines(): m = re.search(b'^Landolt V \(0.54\)\s+(\d+\.\d+)', line) if m: AV = (float(m.group(1))) break if AV is None: raise RuntimeError('AV query fails! URL is {}'.format(url)) return AV
def get_AV_infinity(ra,dec,frame='icrs')
Gets the A_V exctinction at infinity for a given line of sight. Queries the NED database. :param ra,dec: Desired coordinates, in degrees. :param frame: (optional) Frame of input coordinates (e.g., ``'icrs', 'galactic'``)
2.777463
2.834613
0.979838
# The following fragment of code is copied from flask_restful project @functools.wraps(fun) def wrapper(*args: Any, **kwargs: Any) -> Any: resp = fun(*args, **kwargs) if isinstance(resp, Response): # There may be a better way to test return resp data, code, headers = flask_response_unpack(resp) return flask_restful_api.make_response(data, code, headers=headers) # end of flask_restful code return wrapper
def wrap_flask_restful_resource( fun: Callable, flask_restful_api: FlaskRestfulApi, injector: Injector ) -> Callable
This is needed because of how flask_restful views are registered originally. :type flask_restful_api: :class:`flask_restful.Api`
3.666291
3.760152
0.975038
if self.base_url: return urlparse.urljoin(self.base_url, url) else: return url
def complete_url(self, url)
Completes a given URL with this instance's URL base.
2.779793
2.44037
1.139087
import code code.interact(local=dict(sess=self, **local))
def interact(self, **local)
Drops the user into an interactive Python session with the ``sess`` variable set to the current session instance. If keyword arguments are supplied, these names will also be available within the session.
11.448528
5.370917
2.131578
start = time.time() # at least execute the check once! while True: res = condition() if res: return res # timeout? if time.time() - start > timeout: break # wait a bit time.sleep(interval) # timeout occured! raise WaitTimeoutError("wait_for timed out")
def wait_for(self, condition, interval = DEFAULT_WAIT_INTERVAL, timeout = DEFAULT_WAIT_TIMEOUT)
Wait until a condition holds by checking it in regular intervals. Raises ``WaitTimeoutError`` on timeout.
4.494526
4.079686
1.101684
return self.wait_for(lambda: not condition(), *args, **kw)
def wait_while(self, condition, *args, **kw)
Wait while a condition holds.
6.453421
5.48377
1.176822
return self.wait_for_safe(lambda: super(WaitMixin, self).at_css(css), timeout = timeout, **kw)
def at_css(self, css, timeout = DEFAULT_AT_TIMEOUT, **kw)
Returns the first node matching the given CSSv3 expression or ``None`` if a timeout occurs.
6.987713
8.242093
0.847808
return self.wait_for_safe(lambda: super(WaitMixin, self).at_xpath(xpath), timeout = timeout, **kw)
def at_xpath(self, xpath, timeout = DEFAULT_AT_TIMEOUT, **kw)
Returns the first node matching the given XPath 2.0 expression or ``None`` if a timeout occurs.
6.852977
8.003282
0.856271
''' Switch the axis limits of either x or y. Or both! ''' for a in which_axis: assert a in ('x', 'y') ax_limits = ax.axis() if a == 'x': ax.set_xlim(ax_limits[1], ax_limits[0]) else: ax.set_ylim(ax_limits[3], ax_limits[2])
def switch_axis_limits(ax, which_axis)
Switch the axis limits of either x or y. Or both!
2.797015
2.139095
1.307569
''' Removes "chartjunk", such as extra lines of axes and tick marks. If grid="y" or "x", will add a white grid at the "y" or "x" axes, respectively If ticklabels="y" or "x", or ['x', 'y'] will remove ticklabels from that axis ''' all_spines = ['top', 'bottom', 'right', 'left', 'polar'] for spine in spines: # The try/except is for polar coordinates, which only have a 'polar' # spine and none of the others try: ax.spines[spine].set_visible(False) except KeyError: pass # For the remaining spines, make their line thinner and a slightly # off-black dark grey if not xkcd: for spine in set(all_spines).difference(set(spines)): # The try/except is for polar coordinates, which only have a # 'polar' spine and none of the others try: ax.spines[spine].set_linewidth(0.5) except KeyError: pass # ax.spines[spine].set_color(almost_black) # ax.spines[spine].set_tick_params(color=almost_black) # Check that the axes are not log-scale. If they are, leave # the ticks because otherwise people assume a linear scale. x_pos = set(['top', 'bottom']) y_pos = set(['left', 'right']) xy_pos = [x_pos, y_pos] xy_ax_names = ['xaxis', 'yaxis'] for ax_name, pos in zip(xy_ax_names, xy_pos): axis = ax.__dict__[ax_name] # axis.set_tick_params(color=almost_black) #print 'axis.get_scale()', axis.get_scale() if show_ticks or axis.get_scale() == 'log': # if this spine is not in the list of spines to remove for p in pos.difference(spines): #print 'p', p axis.set_tick_params(direction='out') axis.set_ticks_position(p) # axis.set_tick_params(which='both', p) else: axis.set_ticks_position('none') if grid is not None: for g in grid: assert g in ('x', 'y') ax.grid(axis=grid, color='white', linestyle='-', linewidth=0.5) if ticklabels is not None: if type(ticklabels) is str: assert ticklabels in set(('x', 'y')) if ticklabels == 'x': ax.set_xticklabels([]) if ticklabels == 'y': ax.set_yticklabels([]) else: assert set(ticklabels) | set(('x', 'y')) > 0 if 'x' in ticklabels: ax.set_xticklabels([]) elif 'y' in ticklabels: ax.set_yticklabels([])
def remove_chartjunk(ax, spines, grid=None, ticklabels=None, show_ticks=False, xkcd=False)
Removes "chartjunk", such as extra lines of axes and tick marks. If grid="y" or "x", will add a white grid at the "y" or "x" axes, respectively If ticklabels="y" or "x", or ['x', 'y'] will remove ticklabels from that axis
2.97161
2.500572
1.188372
if 'ax' in kwargs: ax = kwargs.pop('ax') elif len(args) == 0: fig = plt.gcf() ax = plt.gca() elif isinstance(args[0], mpl.axes.Axes): ax = args[0] args = args[1:] else: ax = plt.gca() return ax, args, dict(kwargs)
def maybe_get_ax(*args, **kwargs)
It used to be that the first argument of prettyplotlib had to be the 'ax' object, but that's not the case anymore. @param args: @type args: @param kwargs: @type kwargs: @return: @rtype:
2.033947
2.138034
0.951316
if 'ax' in kwargs: ax = kwargs.pop('ax') if 'fig' in kwargs: fig = kwargs.pop('fig') else: fig = plt.gcf() elif len(args) == 0: fig = plt.gcf() ax = plt.gca() elif isinstance(args[0], mpl.figure.Figure) and \ isinstance(args[1], mpl.axes.Axes): fig = args[0] ax = args[1] args = args[2:] else: fig, ax = plt.subplots(1) return fig, ax, args, dict(kwargs)
def maybe_get_fig_ax(*args, **kwargs)
It used to be that the first argument of prettyplotlib had to be the 'ax' object, but that's not the case anymore. This is specially made for pcolormesh. @param args: @type args: @param kwargs: @type kwargs: @return: @rtype:
1.892809
1.975018
0.958375
# Force 'color' to indicate the edge color, so the middle of the # scatter patches are empty. Can specify ax, args, kwargs = utils.maybe_get_ax(*args, **kwargs) if 'color' not in kwargs: # Assume that color means the edge color. You can assign the color_cycle = ax._get_lines.color_cycle kwargs['color'] = next(color_cycle) kwargs.setdefault('edgecolor', almost_black) kwargs.setdefault('alpha', 0.5) lw = utils.maybe_get_linewidth(**kwargs) kwargs['lw'] = lw show_ticks = kwargs.pop('show_ticks', False) scatterpoints = ax.scatter(*args, **kwargs) utils.remove_chartjunk(ax, ['top', 'right'], show_ticks=show_ticks) return scatterpoints
def scatter(*args, **kwargs)
This will plot a scatterplot of x and y, iterating over the ColorBrewer "Set2" color cycle unless a color is specified. The symbols produced are empty circles, with the outline in the color specified by either 'color' or 'edgecolor'. If you want to fill the circle, specify 'facecolor'. Besides the matplotlib scatter(), will also take the parameter @param show_ticks: Whether or not to show the x and y axis ticks
5.721254
5.503458
1.039574
ax, args, kwargs = maybe_get_ax(*args, **kwargs) # If no ticklabels are specified, don't draw any xticklabels = kwargs.pop('xticklabels', None) fontsize = kwargs.pop('fontsize', 10) kwargs.setdefault('widths', 0.15) bp = ax.boxplot(*args, **kwargs) if xticklabels: ax.xaxis.set_ticklabels(xticklabels, fontsize=fontsize) show_caps = kwargs.pop('show_caps', True) show_ticks = kwargs.pop('show_ticks', False) remove_chartjunk(ax, ['top', 'right', 'bottom'], show_ticks=show_ticks) linewidth = 0.75 blue = colors.set1[1] red = colors.set1[0] plt.setp(bp['boxes'], color=blue, linewidth=linewidth) plt.setp(bp['medians'], color=red) plt.setp(bp['whiskers'], color=blue, linestyle='solid', linewidth=linewidth) plt.setp(bp['fliers'], color=blue) if show_caps: plt.setp(bp['caps'], color=blue, linewidth=linewidth) else: plt.setp(bp['caps'], color='none') ax.spines['left']._linewidth = 0.5 return bp
def boxplot(*args, **kwargs)
Create a box-and-whisker plot showing the mean, 25th percentile, and 75th percentile. The difference from matplotlib is only the left axis line is shown, and ticklabels labeling each category of data can be added. @param ax: @param x: @param kwargs: Besides xticklabels, which is a prettyplotlib-specific argument which will label each individual boxplot, any argument for matplotlib.pyplot.boxplot will be accepted: http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot @return:
2.7119
2.864471
0.946737
ax, args, kwargs = maybe_get_ax(*args, **kwargs) color_cycle = ax._get_lines.color_cycle # Reassign the default colors to Set2 by Colorbrewer if iterable(args[0]): if isinstance(args[0], list): ncolors = len(args[0]) else: if len(args[0].shape) == 2: ncolors = args[0].shape[1] else: ncolors = 1 kwargs.setdefault('color', [next(color_cycle) for _ in range(ncolors)]) else: kwargs.setdefault('color', next(color_cycle)) kwargs.setdefault('edgecolor', 'white') show_ticks = kwargs.pop('show_ticks', False) # If no grid specified, don't draw one. grid = kwargs.pop('grid', None) # print 'hist kwargs', kwargs patches = ax.hist(*args, **kwargs) remove_chartjunk(ax, ['top', 'right'], grid=grid, show_ticks=show_ticks) return patches
def hist(*args, **kwargs)
Plots a histogram of the provided data. Can provide optional argument "grid='x'" or "grid='y'" to draw a white grid over the histogram. Almost like "erasing" some of the plot, but it adds more information!
3.535964
3.504557
1.008962
ax, args, kwargs = maybe_get_ax(*args, **kwargs) # If no ticklabels are specified, don't draw any xticklabels = kwargs.pop('xticklabels', None) colors = kwargs.pop('colors', None) fontsize = kwargs.pop('fontsize', 10) gray = _colors.set1[8] red = _colors.set1[0] blue = kwargs.pop('color', _colors.set1[1]) kwargs.setdefault('widths', 0.25) kwargs.setdefault('sym', "o") bp = _beeswarm(ax, *args, **kwargs) kwargs.setdefault("median_color", gray) kwargs.setdefault("median_linewidth", 2) if xticklabels: ax.xaxis.set_ticklabels(xticklabels, fontsize=fontsize) show_caps = kwargs.pop('show_caps', True) show_ticks = kwargs.pop('show_ticks', False) remove_chartjunk(ax, ['top', 'right', 'bottom'], show_ticks=show_ticks) linewidth = 0.75 plt.setp(bp['boxes'], color=blue, linewidth=linewidth) plt.setp(bp['medians'], color=kwargs.pop("median_color"), linewidth=kwargs.pop("median_linewidth")) #plt.setp(bp['whiskers'], color=blue, linestyle='solid', # linewidth=linewidth) for color, flier in zip(colors, bp['fliers']): plt.setp(flier, color=color) #if show_caps: # plt.setp(bp['caps'], color=blue, linewidth=linewidth) #else: # plt.setp(bp['caps'], color='none') ax.spines['left']._linewidth = 0.5 return bp
def beeswarm(*args, **kwargs)
Create a R-like beeswarm plot showing the mean and datapoints. The difference from matplotlib is only the left axis line is shown, and ticklabels labeling each category of data can be added. @param ax: @param x: @param kwargs: Besides xticklabels, which is a prettyplotlib-specific argument which will label each individual beeswarm, many arguments for matplotlib.pyplot.boxplot will be accepted: http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot Additional arguments include: *median_color* : (default gray) The color of median lines *median_width* : (default 2) Median line width *colors* : (default None) Colors to use when painting a dataseries, for example list1 = [1,2,3] list2 = [5,6,7] ppl.beeswarm([list1, list2], colors=["red", "blue"], xticklabels=["data1", "data2"]) @return:
3.027292
2.973643
1.018042
LOG.debug(kwargs) if query: query = jmespath.compile(query) if self._client.can_paginate(op_name): paginator = self._client.get_paginator(op_name) results = paginator.paginate(**kwargs) data = results.build_full_result() else: op = getattr(self._client, op_name) done = False data = {} while not done: try: data = op(**kwargs) done = True except ClientError as e: LOG.debug(e, kwargs) if 'Throttling' in str(e): time.sleep(1) elif 'AccessDenied' in str(e): done = True elif 'NoSuchTagSet' in str(e): done = True except Exception: done = True if query: data = query.search(data) return data
def call(self, op_name, query=None, **kwargs)
Make a request to a method in this client. The response data is returned from this call as native Python data structures. This method differs from just calling the client method directly in the following ways: * It automatically handles the pagination rather than relying on a separate pagination method call. * You can pass an optional jmespath query and this query will be applied to the data returned from the low-level call. This allows you to tailor the returned data to be exactly what you want. :type op_name: str :param op_name: The name of the request you wish to make. :type query: str :param query: A jmespath query that will be applied to the data returned by the operation prior to returning it to the user. :type kwargs: keyword arguments :param kwargs: Additional keyword arguments you want to pass to the method when making the request.
2.244423
2.225296
1.008596
class_path = ResourceTypes[resource_path] # First prepend our __name__ to the resource string passed in. full_path = '.'.join([__name__, class_path]) class_data = full_path.split(".") module_path = ".".join(class_data[:-1]) class_str = class_data[-1] module = importlib.import_module(module_path) # Finally, we retrieve the Class return getattr(module, class_str)
def find_resource_class(resource_path)
dynamically load a class from a string
4.012527
3.961632
1.012847
matches = [] regex = pattern if regex == '*': regex = '.*' regex = re.compile(regex) for choice in self.choices(context): if regex.search(choice): matches.append(choice) return matches
def match(self, pattern, context=None)
This method returns a (possibly empty) list of strings that match the regular expression ``pattern`` provided. You can also provide a ``context`` as described above. This method calls ``choices`` to get a list of all possible choices and then filters the list by performing a regular expression search on each choice using the supplied ``pattern``.
3.195688
2.755273
1.159844
if self._tags is None: LOG.debug('need to build tags') self._tags = {} if hasattr(self.Meta, 'tags_spec') and (self.Meta.tags_spec is not None): LOG.debug('have a tags_spec') method, path, param_name, param_value = self.Meta.tags_spec[:4] kwargs = {} filter_type = getattr(self.Meta, 'filter_type', None) if filter_type == 'arn': kwargs = {param_name: [getattr(self, param_value)]} elif filter_type == 'list': kwargs = {param_name: [getattr(self, param_value)]} else: kwargs = {param_name: getattr(self, param_value)} if len(self.Meta.tags_spec) > 4: kwargs.update(self.Meta.tags_spec[4]) LOG.debug('fetching tags') self.data['Tags'] = self._client.call( method, query=path, **kwargs) LOG.debug(self.data['Tags']) if 'Tags' in self.data: _tags = self.data['Tags'] if isinstance(_tags, list): for kvpair in _tags: if kvpair['Key'] in self._tags: if not isinstance(self._tags[kvpair['Key']], list): self._tags[kvpair['Key']] = [self._tags[kvpair['Key']]] self._tags[kvpair['Key']].append(kvpair['Value']) else: self._tags[kvpair['Key']] = kvpair['Value'] elif isinstance(_tags, dict): self._tags = _tags return self._tags
def tags(self)
Convert the ugly Tags JSON into a real dictionary and memorize the result.
2.296828
2.264992
1.014056
if not statistics: statistics = ['Average'] if days: delta = datetime.timedelta(days=days) elif hours: delta = datetime.timedelta(hours=hours) else: delta = datetime.timedelta(minutes=minutes) if not period: period = max(60, self._total_seconds(delta) // 1440) if not metric: metric = self.find_metric(metric_name) if metric and self._cloudwatch: end = datetime.datetime.utcnow() start = end - delta data = self._cloudwatch.call( 'get_metric_statistics', Dimensions=metric['Dimensions'], Namespace=metric['Namespace'], MetricName=metric['MetricName'], StartTime=start.isoformat(), EndTime=end.isoformat(), Statistics=statistics, Period=period) return MetricData(jmespath.search('Datapoints', data), period) else: raise ValueError('Metric (%s) not available' % metric_name)
def get_metric_data(self, metric_name=None, metric=None, days=None, hours=1, minutes=None, statistics=None, period=None)
Get metric data for this resource. You can specify the time frame for the data as either the number of days or number of hours. The maximum window is 14 days. Based on the time frame this method will calculate the correct ``period`` to return the maximum number of data points up to the CloudWatch max of 1440. :type metric_name: str :param metric_name: The name of the metric this data will pertain to. :type days: int :param days: The number of days worth of data to return. You can specify either ``days`` or ``hours``. The default is one hour. The maximum value is 14 days. :type hours: int :param hours: The number of hours worth of data to return. You can specify either ``days`` or ``hours``. The default is one hour. The maximum value is 14 days. :type statistics: list of str :param statistics: The metric statistics to return. The default value is **Average**. Possible values are: * Average * Sum * SampleCount * Maximum * Minimum :returns: A ``MetricData`` object that contains both the CloudWatch data as well as the ``period`` used since this value may have been calculated by skew.
2.484656
2.471579
1.005291
distributions = [] for this in dir(scipy.stats): if "fit" in eval("dir(scipy.stats." + this +")"): distributions.append(this) self.distributions = distributions[:]
def load_all_distributions(self)
Replace the :attr:`distributions` attribute with all scipy distributions
6.608592
5.106747
1.29409
_ = pylab.hist(self._data, bins=self.bins, density=True) pylab.grid(True)
def hist(self)
Draw normed histogram of the data using :attr:`bins` .. plot:: >>> from scipy import stats >>> data = stats.gamma.rvs(2, loc=1.5, scale=2, size=20000) >>> # We then create the Fitter object >>> import fitter >>> fitter.Fitter(data).hist()
5.314194
5.98961
0.887235
r for distribution in self.distributions: try: # need a subprocess to check time it takes. If too long, skip it dist = eval("scipy.stats." + distribution) # TODO here, dist.fit may take a while or just hang forever # with some distributions. So, I thought to use signal module # to catch the error when signal takes too long. It did not work # presumably because another try/exception is inside the # fit function, so I used threading with arecipe from stackoverflow # See timed_run function above param = self._timed_run(dist.fit, distribution, args=self._data) # with signal, does not work. maybe because another expection is caught pdf_fitted = dist.pdf(self.x, *param) # hoping the order returned by fit is the same as in pdf self.fitted_param[distribution] = param[:] self.fitted_pdf[distribution] = pdf_fitted sq_error = pylab.sum((self.fitted_pdf[distribution] - self.y)**2) if self.verbose: print("Fitted {} distribution with error={})".format(distribution, sq_error)) # compute some errors now self._fitted_errors[distribution] = sq_error except Exception as err: if self.verbose: print("SKIPPED {} distribution (taking more than {} seconds)".format(distribution, self.timeout)) # if we cannot compute the error, set it to large values # FIXME use inf self._fitted_errors[distribution] = 1e6 self.df_errors = pd.DataFrame({'sumsquare_error':self._fitted_errors})
def fit(self)
r"""Loop over distributions and find best parameter to fit the data for each When a distribution is fitted onto the data, we populate a set of dataframes: - :attr:`df_errors` :sum of the square errors between the data and the fitted distribution i.e., :math:`\sum_i \left( Y_i - pdf(X_i) \right)^2` - :attr:`fitted_param` : the parameters that best fit the data - :attr:`fitted_pdf` : the PDF generated with the parameters that best fit the data Indices of the dataframes contains the name of the distribution.
9.126401
8.242911
1.107182
assert Nbest > 0 if Nbest > len(self.distributions): Nbest = len(self.distributions) if isinstance(names, list): for name in names: pylab.plot(self.x, self.fitted_pdf[name], lw=lw, label=name) elif names: pylab.plot(self.x, self.fitted_pdf[names], lw=lw, label=names) else: try: names = self.df_errors.sort_values( by="sumsquare_error").index[0:Nbest] except: names = self.df_errors.sort("sumsquare_error").index[0:Nbest] for name in names: if name in self.fitted_pdf.keys(): pylab.plot(self.x, self.fitted_pdf[name], lw=lw, label=name) else: print("%s was not fitted. no parameters available" % name) pylab.grid(True) pylab.legend()
def plot_pdf(self, names=None, Nbest=5, lw=2)
Plots Probability density functions of the distributions :param str,list names: names can be a single distribution name, or a list of distribution names, or kept as None, in which case, the first Nbest distribution will be taken (default to best 5)
2.421985
2.429026
0.997101
# self.df should be sorted, so then us take the first one as the best name = self.df_errors.sort_values('sumsquare_error').iloc[0].name params = self.fitted_param[name] return {name: params}
def get_best(self)
Return best fitted distribution and its parameters a dictionary with one key (the distribution name) and its parameters
12.950407
10.956831
1.181948