code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# parse channel type from name, # e.g. 'L1:GDS-CALIB_STRAIN,reduced' -> 'L1:GDS-CALIB_STRAIN', 'reduced' name, ctype = _strip_ctype(name, ctype, connection.get_protocol()) # query NDS2 found = connection.find_channels(name, ctype, dtype, *sample_rate) # if don't care about defaults, just return now if not unique: return found # if two results, remove 'online' copy (if present) # (if no online channels present, this does nothing) if len(found) == 2: found = [c for c in found if c.channel_type != Nds2ChannelType.ONLINE.value] # if not unique result, panic if len(found) != 1: raise ValueError("unique NDS2 channel match not found for %r" % name) return found
def _find_channel(connection, name, ctype, dtype, sample_rate, unique=False)
Internal method to find a single channel Parameters ---------- connection : `nds2.connection`, optional open NDS2 connection to use for query name : `str` the name of the channel to find ctype : `int` the NDS2 channel type to match dtype : `int` the NDS2 data type to match sample_rate : `tuple` a pre-formatted rate tuple (see `find_channels`) unique : `bool`, optional, default: `False` require one (and only one) match per channel Returns ------- channels : `list` of `nds2.channel` list of NDS2 channel objects, if `unique=True` is given the list is guaranteed to have only one element. See also -------- nds2.connection.find_channels for documentation on the underlying query method
7.148093
6.914095
1.033844
# parse channel type from name (e.g. 'L1:GDS-CALIB_STRAIN,reduced') try: name, ctypestr = name.rsplit(',', 1) except ValueError: pass else: ctype = Nds2ChannelType.find(ctypestr).value # NDS1 stores channels with trend suffix, so we put it back: if protocol == 1 and ctype in ( Nds2ChannelType.STREND.value, Nds2ChannelType.MTREND.value ): name += ',{0}'.format(ctypestr) return name, ctype
def _strip_ctype(name, ctype, protocol=2)
Strip the ctype from a channel name for the given nds server version This is needed because NDS1 servers store trend channels _including_ the suffix, but not raw channels, and NDS2 doesn't do this.
7.886587
6.101226
1.292623
# pylint: disable=unused-argument from ..segments import (Segment, SegmentList, SegmentListDict) connection.set_epoch(start, end) # map user-given real names to NDS names names = list(map( _get_nds2_name, find_channels(channels, epoch=(start, end), connection=connection, unique=True), )) # query for availability result = connection.get_availability(names) # map to segment types out = SegmentListDict() for name, result in zip(channels, result): out[name] = SegmentList([Segment(s.gps_start, s.gps_stop) for s in result.simple_list()]) return out
def get_availability(channels, start, end, connection=None, host=None, port=None)
Query an NDS2 server for data availability Parameters ---------- channels : `list` of `str` list of channel names to query; this list is mapped to NDS channel names using :func:`find_channels`. start : `int` GPS start time of query end : `int` GPS end time of query connection : `nds2.connection`, optional open NDS2 connection to use for query host : `str`, optional name of NDS2 server to query, required if ``connection`` is not given port : `int`, optional port number on host to use for NDS2 connection Returns ------- segdict : `~gwpy.segments.SegmentListDict` dict of ``(name, SegmentList)`` pairs Raises ------ ValueError if the given channel name cannot be mapped uniquely to a name in the NDS server database. See also -------- nds2.connection.get_availability for documentation on the underlying query method
7.862071
5.877718
1.337606
if start % 60: start = int(start) // 60 * 60 if end % 60: end = int(end) // 60 * 60 + 60 return int(start), int(end)
def minute_trend_times(start, end)
Expand a [start, end) interval for use in querying for minute trends NDS2 requires start and end times for minute trends to be a multiple of 60 (to exactly match the time of a minute-trend sample), so this function expands the given ``[start, end)`` interval to the nearest multiples. Parameters ---------- start : `int` GPS start time of query end : `int` GPS end time of query Returns ------- mstart : `int` ``start`` rounded down to nearest multiple of 60 mend : `int` ``end`` rounded up to nearest multiple of 60
2.415614
2.68673
0.899091
try: return cls._member_map_[name] except KeyError: for ctype in cls._member_map_.values(): if ctype.name == name: return ctype raise ValueError('%s is not a valid %s' % (name, cls.__name__))
def find(cls, name)
Returns the NDS2 channel type corresponding to the given name
2.612319
2.620266
0.996967
try: return cls._member_map_[dtype] except KeyError: try: dtype = numpy.dtype(dtype).type except TypeError: for ndstype in cls._member_map_.values(): if ndstype.value is dtype: return ndstype else: for ndstype in cls._member_map_.values(): if ndstype.value and ndstype.numpy_dtype is dtype: return ndstype raise ValueError('%s is not a valid %s' % (dtype, cls.__name__))
def find(cls, dtype)
Returns the NDS2 type corresponding to the given python type
2.550763
2.507691
1.017176
if isinstance(connection, FflConnection): return type(connection)(connection.ffldir) kw = {'context': connection._context} if connection.port != 80 else {} return connection.__class__(connection.host, port=connection.port, **kw)
def reconnect(connection)
Open a new datafind connection based on an existing connection This is required because of https://git.ligo.org/lscsoft/glue/issues/1 Parameters ---------- connection : :class:`~gwdatafind.http.HTTPConnection` or `FflConnection` a connection object (doesn't need to be open) Returns ------- newconn : :class:`~gwdatafind.http.HTTPConnection` or `FflConnection` the new open connection to the same `host:port` server
7.569006
6.293011
1.202764
# if looking for a trend channel, prioritise the matching type for trendname, trend_regex in [ ('m-trend', MINUTE_TREND_TYPE), ('s-trend', SECOND_TREND_TYPE), ]: if trend == trendname and trend_regex.match(ftype): return 0, len(ftype) # otherwise rank this type according to priority for reg, prio in { HIGH_PRIORITY_TYPE: 1, re.compile(r'[A-Z]\d_C'): 6, LOW_PRIORITY_TYPE: 10, MINUTE_TREND_TYPE: 10, SECOND_TREND_TYPE: 10, }.items(): if reg.search(ftype): return prio, len(ftype) return 5, len(ftype)
def _type_priority(ifo, ftype, trend=None)
Prioritise the given GWF type based on its name or trend status. This is essentially an ad-hoc ordering function based on internal knowledge of how LIGO does GWF type naming.
4.695883
4.963995
0.945989
for path in files: try: if os.stat(path).st_blocks == 0: return True except AttributeError: # windows doesn't have st_blocks return False return False
def on_tape(*files)
Determine whether any of the given files are on tape Parameters ---------- *files : `str` one or more paths to GWF files Returns ------- True/False : `bool` `True` if any of the files are determined to be on tape, otherwise `False`
4.934929
5.422453
0.910092
@wraps(func) def wrapped(*args, **kwargs): if kwargs.get('connection') is None: kwargs['connection'] = _choose_connection(host=kwargs.get('host'), port=kwargs.get('port')) try: return func(*args, **kwargs) except HTTPException: kwargs['connection'] = reconnect(kwargs['connection']) return func(*args, **kwargs) return wrapped
def with_connection(func)
Decorate a function to open a new datafind connection if required This method will inspect the ``connection`` keyword, and if `None` (or missing), will use the ``host`` and ``port`` keywords to open a new connection and pass it as ``connection=<new>`` to ``func``.
2.623379
2.321572
1.130001
try: return find_frametype(channel, gpstime=(start, end), frametype_match=frametype_match, allow_tape=allow_tape, on_gaps='error', connection=connection, host=host, port=port) except RuntimeError: # gaps (or something else went wrong) ftout = find_frametype(channel, gpstime=(start, end), frametype_match=frametype_match, return_all=True, allow_tape=allow_tape, on_gaps='ignore', connection=connection, host=host, port=port) try: if isinstance(ftout, dict): return {key: ftout[key][0] for key in ftout} return ftout[0] except IndexError: raise ValueError("Cannot find any valid frametypes for channel(s)")
def find_best_frametype(channel, start, end, frametype_match=None, allow_tape=True, connection=None, host=None, port=None)
Intelligently select the best frametype from which to read this channel Parameters ---------- channel : `str`, `~gwpy.detector.Channel` the channel to be found start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of period of interest, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS end time of period of interest, any input parseable by `~gwpy.time.to_gps` is fine host : `str`, optional name of datafind host to use port : `int`, optional port on datafind host to use frametype_match : `str`, optiona regular expression to use for frametype `str` matching allow_tape : `bool`, optional do not test types whose frame files are stored on tape (not on spinning disk) Returns ------- frametype : `str` the best matching frametype for the ``channel`` in the ``[start, end)`` interval Raises ------ ValueError if no valid frametypes are found Examples -------- >>> from gwpy.io.datafind import find_best_frametype >>> find_best_frametype('L1:GDS-CALIB_STRAIN', 1126259460, 1126259464) 'L1_HOFT_C00'
2.929047
3.363651
0.870794
return sorted(connection.find_types(observatory, match=match), key=lambda x: _type_priority(observatory, x, trend=trend))
def find_types(observatory, match=None, trend=None, connection=None, **connection_kw)
Find the available data types for a given observatory. See also -------- gwdatafind.http.HTTPConnection.find_types FflConnection.find_types for details on the underlying method(s)
5.354008
6.051663
0.884717
return connection.find_urls(observatory, frametype, start, end, on_gaps=on_gaps)
def find_urls(observatory, frametype, start, end, on_gaps='error', connection=None, **connection_kw)
Find the URLs of files of a given data type in a GPS interval. See also -------- gwdatafind.http.HTTPConnection.find_urls FflConnection.find_urls for details on the underlying method(s)
2.546567
3.690238
0.690082
try: return self.paths[(site, frametype)] except KeyError: self._find_paths() return self.paths[(site, frametype)]
def ffl_path(self, site, frametype)
Returns the path of the FFL file for the given site and frametype Examples -------- >>> from gwpy.io.datafind import FflConnection >>> conn = FflConnection() >>> print(conn.ffl_path('V', 'V1Online')) /virgoData/ffl/V1Online.ffl
3.152627
4.571375
0.689645
self._find_paths() types = [tag for (site_, tag) in self.paths if site in (None, site_)] if match is not None: match = re.compile(match) return list(filter(match.search, types)) return types
def find_types(self, site=None, match=r'^(?!lastfile|spectro|\.).*')
Return the list of known data types. This is just the basename of each FFL file found in the FFL directory (minus the ``.ffl`` extension)
4.996024
5.349237
0.933969
span = Segment(gpsstart, gpsend) cache = [e for e in self._read_ffl_cache(site, frametype) if e.observatory == site and e.description == frametype and e.segment.intersects(span)] urls = [e.path for e in cache] missing = SegmentList([span]) - cache_segments(cache) if match: match = re.compile(match) urls = list(filter(match.search, urls)) # no missing data or don't care, return if on_gaps == 'ignore' or not missing: return urls # handle missing data msg = 'Missing segments: \n{0}'.format('\n'.join(map(str, missing))) if on_gaps == 'warn': warnings.warn(msg) return urls raise RuntimeError(msg)
def find_urls(self, site, frametype, gpsstart, gpsend, match=None, on_gaps='warn')
Find all files of the given type in the [start, end) GPS interval.
4.784491
4.552074
1.051057
from ligo.lw import ( ligolw, array as ligolw_array, param as ligolw_param ) @ligolw_array.use_in @ligolw_param.use_in class ArrayContentHandler(ligolw.LIGOLWContentHandler): pass return ArrayContentHandler
def series_contenthandler()
Build a `~xml.sax.handlers.ContentHandler` to read a LIGO_LW <Array>
7.820472
5.028478
1.555236
from ligo.lw.ligolw import (LIGO_LW, Time, Array, Dim) from ligo.lw.param import get_param # read document xmldoc = read_ligolw(source, contenthandler=series_contenthandler()) # parse match dict if match is None: match = dict() def _is_match(elem): try: if elem.Name != name: return False except AttributeError: # Name is not set return False for key, value in match.items(): try: if get_param(elem, key).pcdata != value: return False except ValueError: # no Param with this Name return False return True # parse out correct element matches = filter(_is_match, xmldoc.getElementsByTagName(LIGO_LW.tagName)) try: elem, = matches except ValueError as exc: if not matches: exc.args = ("no LIGO_LW elements found matching request",) else: exc.args = ('multiple LIGO_LW elements found matching request, ' 'please consider using `match=` to select the ' 'correct element',) raise # get data array, = elem.getElementsByTagName(Array.tagName) # parse dimensions dims = array.getElementsByTagName(Dim.tagName) xdim = dims[0] x0 = xdim.Start dx = xdim.Scale xunit = xdim.Unit try: ndim = dims[1].n except IndexError: pass else: if ndim > 2: raise ValueError("Cannot parse LIGO_LW Array with {} " "dimensions".format(ndim)) # parse metadata array_kw = { 'name': array.Name, 'unit': array.Unit, 'xunit': xunit, } try: array_kw['epoch'] = to_gps( elem.getElementsByTagName(Time.tagName)[0].pcdata) except IndexError: pass for key in ('channel',): try: array_kw[key] = get_param(elem, key) except ValueError: pass # build Series try: xindex, value = array.array except ValueError: # not two dimensions stored return Series(array.array[0], x0=x0, dx=dx, **array_kw) return Series(value, xindex=xindex, **array_kw)
def read_series(source, name, match=None)
Read a `Series` from LIGO_LW-XML Parameters ---------- source : `file`, `str`, :class:`~ligo.lw.ligolw.Document` file path or open LIGO_LW-format XML file name : `str` name of the relevant `LIGO_LW` element to read match : `dict`, optional dict of (key, value) `Param` pairs to match correct LIGO_LW element, this is useful if a single file contains multiple `LIGO_LW` elements with the same name
4.313878
3.884148
1.110637
args = self.args fftlength = float(args.secpfft) overlap = args.overlap self.log(2, "Calculating spectrum secpfft: %s, overlap: %s" % (fftlength, overlap)) if overlap is not None: overlap *= fftlength self.log(3, 'Reference channel: ' + self.ref_chan) # group data by segment groups = OrderedDict() for series in self.timeseries: seg = series.span try: groups[seg][series.channel.name] = series except KeyError: groups[seg] = OrderedDict() groups[seg][series.channel.name] = series # -- plot plot = Plot(figsize=self.figsize, dpi=self.dpi) ax = plot.gca() self.spectra = [] # calculate coherence for seg in groups: refts = groups[seg].pop(self.ref_chan) for name in groups[seg]: series = groups[seg][name] coh = series.coherence(refts, fftlength=fftlength, overlap=overlap, window=args.window) label = name if len(self.start_list) > 1: label += ', {0}'.format(series.epoch.gps) if self.usetex: label = label_to_latex(label) ax.plot(coh, label=label) self.spectra.append(coh) if args.xscale == 'log' and not args.xmin: args.xmin = 1/fftlength return plot
def make_plot(self)
Generate the coherence plot from all time series
4.412125
4.201653
1.050092
leg = super(Coherence, self).set_legend() if leg is not None: leg.set_title('Coherence with:') return leg
def set_legend(self)
Create a legend for this product
5.859506
5.504852
1.064426
if name is None or isinstance(name, units.UnitBase): return name try: # have we already identified this unit as unrecognised? return UNRECOGNIZED_UNITS[name] except KeyError: # no, this is new # pylint: disable=unexpected-keyword-arg try: return units.Unit(name, parse_strict='raise') except ValueError as exc: if (parse_strict == 'raise' or 'did not parse as unit' not in str(exc)): raise # try again using out own lenient parser GWpyFormat.warn = parse_strict != 'silent' return units.Unit(name, parse_strict='silent', format=format) finally: GWpyFormat.warn = True
def parse_unit(name, parse_strict='warn', format='gwpy')
Attempt to intelligently parse a `str` as a `~astropy.units.Unit` Parameters ---------- name : `str` unit name to parse parse_strict : `str` one of 'silent', 'warn', or 'raise' depending on how pedantic you want the parser to be format : `~astropy.units.format.Base` the formatter class to use when parsing the unit string Returns ------- unit : `~astropy.units.UnitBase` the unit parsed by `~astropy.units.Unit` Raises ------ ValueError if the unit cannot be parsed and `parse_strict='raise'`
4.688336
4.955321
0.946122
# read params params = dict(frevent.GetParam()) params['time'] = float(LIGOTimeGPS(*frevent.GetGTime())) params['amplitude'] = frevent.GetAmplitude() params['probability'] = frevent.GetProbability() params['timeBefore'] = frevent.GetTimeBefore() params['timeAfter'] = frevent.GetTimeAfter() params['comment'] = frevent.GetComment() # filter if not all(op_(params[c], t) for c, op_, t in selection): return None # return event as list return [params[c] for c in columns]
def _row_from_frevent(frevent, columns, selection)
Generate a table row from an FrEvent Filtering (``selection``) is done here, rather than in the table reader, to enable filtering on columns that aren't being returned.
4.463645
4.449137
1.003261
# open frame file if isinstance(filename, FILE_LIKE): filename = filename.name stream = io_gwf.open_gwf(filename) # parse selections and map to column indices if selection is None: selection = [] selection = parse_column_filters(selection) # read events row by row data = [] i = 0 while True: try: frevent = stream.ReadFrEvent(i, name) except IndexError: break i += 1 # read first event to get column names if columns is None: columns = _columns_from_frevent(frevent) # read row with filter row = _row_from_frevent(frevent, columns, selection) if row is not None: # if passed selection data.append(row) return Table(rows=data, names=columns)
def table_from_gwf(filename, name, columns=None, selection=None)
Read a Table from FrEvent structures in a GWF file (or files) Parameters ---------- filename : `str` path of GWF file to read name : `str` name associated with the `FrEvent` structures columns : `list` of `str` list of column names to read selection : `str`, `list` of `str` one or more column selection strings to apply, e.g. ``'snr>6'``
5.125963
4.819983
1.063482
from LDAStools.frameCPP import (FrEvent, GPSTime) # create frame write_kw = {key: kwargs.pop(key) for key in ('compression', 'compression_level') if key in kwargs} frame = io_gwf.create_frame(name=name, **kwargs) # append row by row names = table.dtype.names for row in table: rowd = dict((n, row[n]) for n in names) gps = LIGOTimeGPS(rowd.pop('time', 0)) frame.AppendFrEvent(FrEvent( str(name), str(rowd.pop('comment', '')), str(rowd.pop('inputs', '')), GPSTime(gps.gpsSeconds, gps.gpsNanoSeconds), float(rowd.pop('timeBefore', 0)), float(rowd.pop('timeAfter', 0)), int(rowd.pop('eventStatus', 0)), float(rowd.pop('amplitude', 0)), float(rowd.pop('probability', -1)), str(rowd.pop('statistics', '')), list(rowd.items()), # remaining params as tuple )) # write frame to file io_gwf.write_frames(filename, [frame], **write_kw)
def table_to_gwf(table, filename, name, **kwargs)
Create a new `~frameCPP.FrameH` and fill it with data Parameters ---------- table : `~astropy.table.Table` the data to write filename : `str` the name of the file to write into **kwargs other keyword arguments (see below for references) See Also -------- gwpy.io.gwf.create_frame gwpy.io.gwf.write_frames for documentation of keyword arguments
5.382529
5.179337
1.039231
key = (data_format, data_class) if key not in _FETCHERS or force: _FETCHERS[key] = (function, usage) else: raise IORegistryError("Fetcher for format '{0}' and class '{1}' " "has already been " "defined".format( data_format, data_class)) _update__doc__(data_class)
def register_fetcher(data_format, data_class, function, force=False, usage=None)
Register a new method to EventTable.fetch() for a given format Parameters ---------- data_format : `str` name of the format to be registered data_class : `type` the class that the fetcher returns function : `callable` the method to call from :meth:`EventTable.fetch` force : `bool`, optional overwrite existing registration for ``data_format`` if found, default: `False`
3.762096
5.779483
0.65094
# this is a copy of astropy.io.regsitry.get_reader fetchers = [(fmt, cls) for fmt, cls in _FETCHERS if fmt == data_format] for fetch_fmt, fetch_cls in fetchers: if io_registry._is_best_match(data_class, fetch_cls, fetchers): return _FETCHERS[(fetch_fmt, fetch_cls)][0] else: formats = [fmt for fmt, cls in _FETCHERS if io_registry._is_best_match(fmt, cls, fetchers)] formatstr = '\n'.join(sorted(formats)) raise IORegistryError( "No fetcher definer for format '{0}' and class '{1}'.\n" "The available formats are:\n{2}".format( data_format, data_class.__name__, formatstr))
def get_fetcher(data_format, data_class)
Return the :meth:`~EventTable.fetch` function for the given format Parameters ---------- data_format : `str` name of the format data_class : `type` the class that the fetcher returns Raises ------ astropy.io.registry.IORegistryError if not registration is found matching ``data_format``
3.883294
3.7178
1.044514
return io_registry.read(cls, source, *args, **kwargs)
def read(cls, source, *args, **kwargs)
Read data into a `FrequencySeries` Arguments and keywords depend on the output format, see the online documentation for full details for each format, the parameters below are common to most formats. Parameters ---------- source : `str`, `list` Source of data, any of the following: - `str` path of single data file, - `str` path of LAL-format cache file, - `list` of paths. *args Other arguments are (in general) specific to the given ``format``. format : `str`, optional Source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. **kwargs Other keywords are (in general) specific to the given ``format``. Notes -----
7.224488
14.005297
0.51584
from ..timeseries import TimeSeries nout = (self.size - 1) * 2 # Undo normalization from TimeSeries.fft # The DC component does not have the factor of two applied # so we account for it here dift = npfft.irfft(self.value * nout) / 2 new = TimeSeries(dift, epoch=self.epoch, channel=self.channel, unit=self.unit, dx=1/self.dx/nout) return new
def ifft(self)
Compute the one-dimensional discrete inverse Fourier transform of this `FrequencySeries`. Returns ------- out : :class:`~gwpy.timeseries.TimeSeries` the normalised, real-valued `TimeSeries`. See Also -------- :mod:`scipy.fftpack` for the definition of the DFT and conventions used. Notes ----- This method applies the necessary normalisation such that the condition holds: >>> timeseries = TimeSeries([1.0, 0.0, -1.0, 0.0], sample_rate=1.0) >>> timeseries.fft().ifft() == timeseries
10.082074
9.952062
1.013064
f0 = self.f0.decompose().value N = (self.size - 1) * (self.df.decompose().value / df) + 1 fsamples = numpy.arange(0, numpy.rint(N), dtype=self.dtype) * df + f0 out = type(self)(numpy.interp(fsamples, self.frequencies.value, self.value)) out.__array_finalize__(self) out.f0 = f0 out.df = df return out
def interpolate(self, df)
Interpolate this `FrequencySeries` to a new resolution. Parameters ---------- df : `float` desired frequency resolution of the interpolated `FrequencySeries`, in Hz Returns ------- out : `FrequencySeries` the interpolated version of the input `FrequencySeries` See Also -------- numpy.interp for the underlying 1-D linear interpolation scheme
4.80054
4.24673
1.130409
from ..utils.lal import from_lal_unit try: unit = from_lal_unit(lalfs.sampleUnits) except TypeError: unit = None channel = Channel(lalfs.name, unit=unit, dtype=lalfs.data.data.dtype) return cls(lalfs.data.data, channel=channel, f0=lalfs.f0, df=lalfs.deltaF, epoch=float(lalfs.epoch), dtype=lalfs.data.data.dtype, copy=copy)
def from_lal(cls, lalfs, copy=True)
Generate a new `FrequencySeries` from a LAL `FrequencySeries` of any type
3.408876
3.40365
1.001535
return cls(fs.data, f0=0, df=fs.delta_f, epoch=fs.epoch, copy=copy)
def from_pycbc(cls, fs, copy=True)
Convert a `pycbc.types.frequencyseries.FrequencySeries` into a `FrequencySeries` Parameters ---------- fs : `pycbc.types.frequencyseries.FrequencySeries` the input PyCBC `~pycbc.types.frequencyseries.FrequencySeries` array copy : `bool`, optional, default: `True` if `True`, copy these data to a new array Returns ------- spectrum : `FrequencySeries` a GWpy version of the input frequency series
7.924445
11.517562
0.688031
from pycbc import types if self.epoch is None: epoch = None else: epoch = self.epoch.gps return types.FrequencySeries(self.value, delta_f=self.df.to('Hz').value, epoch=epoch, copy=copy)
def to_pycbc(self, copy=True)
Convert this `FrequencySeries` into a `~pycbc.types.frequencyseries.FrequencySeries` Parameters ---------- copy : `bool`, optional, default: `True` if `True`, copy these data to a new array Returns ------- frequencyseries : `pycbc.types.frequencyseries.FrequencySeries` a PyCBC representation of this `FrequencySeries`
5.816616
6.000271
0.969392
cls = kwargs.pop('cls', TimeSeries) cache = kwargs.pop('cache', None) verbose = kwargs.pop('verbose', False) # match file format if url.endswith('.gz'): ext = os.path.splitext(url[:-3])[-1] else: ext = os.path.splitext(url)[-1] if ext == '.hdf5': kwargs.setdefault('format', 'hdf5.losc') elif ext == '.txt': kwargs.setdefault('format', 'ascii.losc') elif ext == '.gwf': kwargs.setdefault('format', 'gwf') with _download_file(url, cache, verbose=verbose) as rem: # get channel for GWF if not given if ext == ".gwf" and (not args or args[0] is None): args = (_gwf_channel(rem, cls, kwargs.get("verbose")),) if verbose: print('Reading data...', end=' ') try: series = cls.read(rem, *args, **kwargs) except Exception as exc: if verbose: print('') exc.args = ("Failed to read LOSC data from %r: %s" % (url, str(exc)),) raise else: # parse bits from unit in GWF if ext == '.gwf' and isinstance(series, StateVector): try: bits = {} for bit in str(series.unit).split(): a, b = bit.split(':', 1) bits[int(a)] = b series.bits = bits series.override_unit('') except (TypeError, ValueError): # don't care, bad LOSC pass if verbose: print('[Done]') return series
def _fetch_losc_data_file(url, *args, **kwargs)
Internal function for fetching a single LOSC file and returning a Series
3.803736
3.758825
1.011948
segments = set() for path in files: seg = file_segment(path) for s in segments: if seg.intersects(s): return True segments.add(seg) return False
def _overlapping(files)
Quick method to see if a file list contains overlapping files
3.38067
2.986307
1.132057
# format arguments start = to_gps(start) end = to_gps(end) span = Segment(start, end) kwargs.update({ 'start': start, 'end': end, }) # find URLs (requires gwopensci) url_kw = {key: kwargs.pop(key) for key in GWOSC_LOCATE_KWARGS if key in kwargs} if 'sample_rate' in url_kw: # format as Hertz url_kw['sample_rate'] = Quantity(url_kw['sample_rate'], 'Hz').value cache = get_urls(detector, int(start), int(ceil(end)), **url_kw) # if event dataset, pick shortest file that covers the request # -- this is a bit hacky, and presumes that only an event dataset # -- would be produced with overlapping files. # -- This should probably be improved to use dataset information if len(cache) and _overlapping(cache): cache.sort(key=lambda x: abs(file_segment(x))) for url in cache: a, b = file_segment(url) if a <= start and b >= end: cache = [url] break if kwargs.get('verbose', False): # get_urls() guarantees len(cache) >= 1 host = urlparse(cache[0]).netloc print("Fetched {0} URLs from {1} for [{2} .. {3}))".format( len(cache), host, int(start), int(ceil(end)))) is_gwf = cache[0].endswith('.gwf') if is_gwf and len(cache): args = (kwargs.pop('channel', None),) else: args = () # read data out = None kwargs['cls'] = cls for url in cache: keep = file_segment(url) & span new = _fetch_losc_data_file(url, *args, **kwargs).crop( *keep, copy=False) if is_gwf and (not args or args[0] is None): args = (new.name,) if out is None: out = new.copy() else: out.append(new, resize=True) return out
def fetch_losc_data(detector, start, end, cls=TimeSeries, **kwargs)
Fetch LOSC data for a given detector This function is for internal purposes only, all users should instead use the interface provided by `TimeSeries.fetch_open_data` (and similar for `StateVector.fetch_open_data`).
5.297528
5.405903
0.979953
dataset = io_hdf5.find_dataset(h5f, path) # read data nddata = dataset[()] # read metadata xunit = parse_unit(dataset.attrs['Xunits']) epoch = dataset.attrs['Xstart'] dt = Quantity(dataset.attrs['Xspacing'], xunit) unit = dataset.attrs['Yunits'] # build and return return TimeSeries(nddata, epoch=epoch, sample_rate=(1/dt).to('Hertz'), unit=unit, name=path.rsplit('/', 1)[1], copy=copy).crop(start=start, end=end)
def read_losc_hdf5(h5f, path='strain/Strain', start=None, end=None, copy=False)
Read a `TimeSeries` from a LOSC-format HDF file. Parameters ---------- h5f : `str`, `h5py.HLObject` path of HDF5 file, or open `H5File` path : `str` name of HDF5 dataset to read. Returns ------- data : `~gwpy.timeseries.TimeSeries` a new `TimeSeries` containing the data read from disk
4.878158
5.326177
0.915884
# find data dataset = io_hdf5.find_dataset(f, '%s/DQmask' % path) maskset = io_hdf5.find_dataset(f, '%s/DQDescriptions' % path) # read data nddata = dataset[()] bits = [bytes.decode(bytes(b), 'utf-8') for b in maskset[()]] # read metadata epoch = dataset.attrs['Xstart'] try: dt = dataset.attrs['Xspacing'] except KeyError: dt = Quantity(1, 's') else: xunit = parse_unit(dataset.attrs['Xunits']) dt = Quantity(dt, xunit) return StateVector(nddata, bits=bits, t0=epoch, name='Data quality', dx=dt, copy=copy).crop(start=start, end=end)
def read_losc_hdf5_state(f, path='quality/simple', start=None, end=None, copy=False)
Read a `StateVector` from a LOSC-format HDF file. Parameters ---------- f : `str`, `h5py.HLObject` path of HDF5 file, or open `H5File` path : `str` path of HDF5 dataset to read. start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional start GPS time of desired data end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional end GPS time of desired data copy : `bool`, default: `False` create a fresh-memory copy of the underlying array Returns ------- data : `~gwpy.timeseries.TimeSeries` a new `TimeSeries` containing the data read from disk
5.265726
5.65732
0.930781
channels = list(io_gwf.iter_channel_names(file_path(path))) if issubclass(series_class, StateVector): regex = DQMASK_CHANNEL_REGEX else: regex = STRAIN_CHANNEL_REGEX found, = list(filter(regex.match, channels)) if verbose: print("Using channel {0!r}".format(found)) return found
def _gwf_channel(path, series_class=TimeSeries, verbose=False)
Find the right channel name for a LOSC GWF file
6.632591
6.197589
1.070189
# read file path if isinstance(source, string_types): with open(source, 'r') as fobj: return from_segwizard(fobj, gpstype=gpstype, strict=strict) # read file object out = SegmentList() fmt_pat = None for line in source: if line.startswith(('#', ';')): # comment continue # determine line format if fmt_pat is None: fmt_pat = _line_format(line) # parse line tokens, = fmt_pat.findall(line) out.append(_format_segment(tokens[-3:], gpstype=gpstype, strict=strict)) return out
def from_segwizard(source, gpstype=LIGOTimeGPS, strict=True)
Read segments from a segwizard format file into a `SegmentList` Parameters ---------- source : `file`, `str` An open file, or file path, from which to read gpstype : `type`, optional The numeric type to which to cast times (from `str`) when reading. strict : `bool`, optional Check that recorded duration matches ``end-start`` for all segments; only used when reading from a 3+-column file. Returns ------- segments : `~gwpy.segments.SegmentList` The list of segments as parsed from the file. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3.
3.657171
3.691353
0.99074
for pat in (FOUR_COL_REGEX, THREE_COL_REGEX, TWO_COL_REGEX): if pat.match(line): return pat raise ValueError("unable to parse segment from line {!r}".format(line))
def _line_format(line)
Determine the column format pattern for a line in an ASCII segment file.
5.431307
4.455885
1.218906
try: start, end, dur = tokens except ValueError: # two-columns return Segment(*map(gpstype, tokens)) seg = Segment(gpstype(start), gpstype(end)) if strict and not float(abs(seg)) == float(dur): raise ValueError( "segment {0!r} has incorrect duration {1!r}".format(seg, dur), ) return seg
def _format_segment(tokens, strict=True, gpstype=LIGOTimeGPS)
Format a list of tokens parsed from an ASCII file into a segment.
4.769601
4.690275
1.016913
# write file path if isinstance(target, string_types): with open(target, 'w') as fobj: return to_segwizard(segs, fobj, header=header, coltype=coltype) # write file object if header: print('# seg\tstart\tstop\tduration', file=target) for i, seg in enumerate(segs): a = coltype(seg[0]) b = coltype(seg[1]) c = float(b - a) print( '\t'.join(map(str, (i, a, b, c))), file=target, )
def to_segwizard(segs, target, header=True, coltype=LIGOTimeGPS)
Write the given `SegmentList` to a file in SegWizard format. Parameters ---------- segs : :class:`~gwpy.segments.SegmentList` The list of segments to write. target : `file`, `str` An open file, or file path, to which to write. header : `bool`, optional Print a column header into the file, default: `True`. coltype : `type`, optional The numerical type in which to cast times before printing. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3.
2.753995
2.708867
1.016659
def identify(origin, filepath, fileobj, *args, **kwargs): # pylint: disable=unused-argument if (isinstance(filepath, string_types) and filepath.endswith(extensions)): return True return False return identify
def identify_factory(*extensions)
Factory function to create I/O identifiers for a set of extensions The returned function is designed for use in the unified I/O registry via the `astropy.io.registry.register_identifier` hool. Parameters ---------- extensions : `str` one or more file extension strings Returns ------- identifier : `callable` an identifier function that tests whether an incoming file path carries any of the given file extensions (using `str.endswith`)
5.234177
5.506593
0.950529
# filename declares gzip if name.endswith('.gz'): return gzip.open(name, *args, **kwargs) # open regular file fobj = open(name, *args, **kwargs) sig = fobj.read(3) fobj.seek(0) if sig == GZIP_SIGNATURE: # file signature declares gzip fobj.close() # GzipFile won't close orig file when it closes return gzip.open(name, *args, **kwargs) return fobj
def gopen(name, *args, **kwargs)
Open a file handling optional gzipping If ``name`` endswith ``'.gz'``, or if the GZIP file signature is found at the beginning of the file, the file will be opened with `gzip.open`, otherwise a regular file will be returned from `open`. Parameters ---------- name : `str` path (name) of file to open. *args, **kwargs other arguments to pass to either `open` for regular files, or `gzip.open` for gzipped files. Returns ------- file : `io.TextIoBase`, `file`, `gzip.GzipFile` the open file object
4.532834
4.287165
1.057303
# open a cache file and return list of paths if (isinstance(flist, string_types) and flist.endswith(('.cache', '.lcf', '.ffl'))): from .cache import read_cache return read_cache(flist) # separate comma-separate list of names if isinstance(flist, string_types): return flist.split(',') # parse list of entries (of some format) if isinstance(flist, (list, tuple)): return list(map(file_path, flist)) # otherwise parse a single entry try: return [file_path(flist)] except ValueError as exc: exc.args = ( "Could not parse input {!r} as one or more " "file-like objects".format(flist), ) raise
def file_list(flist)
Parse a number of possible input types into a list of filepaths. Parameters ---------- flist : `file-like` or `list-like` iterable the input data container, normally just a single file path, or a list of paths, but can generally be any of the following - `str` representing a single file path (or comma-separated collection) - open `file` or `~gzip.GzipFile` object - :class:`~lal.utils.CacheEntry` - `str` with ``.cache`` or ``.lcf`` extension - simple `list` or `tuple` of `str` paths Returns ------- files : `list` `list` of `str` file paths Raises ------ ValueError if the input `flist` cannot be interpreted as any of the above inputs
4.101853
3.77699
1.086011
if isinstance(fobj, string_types) and fobj.startswith("file:"): return urlparse(fobj).path if isinstance(fobj, string_types): return fobj if (isinstance(fobj, FILE_LIKE) and hasattr(fobj, "name")): return fobj.name try: return fobj.path except AttributeError: raise ValueError("Cannot parse file name for {!r}".format(fobj))
def file_path(fobj)
Determine the path of a file. This doesn't do any sanity checking to check that the file actually exists, or is readable. Parameters ---------- fobj : `file`, `str`, `CacheEntry`, ... the file object or path to parse Returns ------- path : `str` the path of the underlying file Raises ------ ValueError if a file path cannnot be determined Examples -------- >>> from gwpy.io.utils import file_path >>> file_path("test.txt") 'test.txt' >>> file_path(open("test.txt", "r")) 'test.txt' >>> file_path("file:///home/user/test.txt") '/home/user/test.txt'
2.833912
3.204296
0.88441
while True: # pick item out of input wqueue idx, arg = q_in.get() if idx is None: # sentinel break # execute method and put the result in the output queue q_out.put((idx, func(arg)))
def process_in_out_queues(func, q_in, q_out)
Iterate through a Queue, call, ``func`, and Queue the result Parameters ---------- func : `callable` any function that can take an element of the input `Queue` as the only argument q_in : `multiprocessing.queue.Queue` the input `Queue` q_out : `multiprocessing.queue.Queue` the output `Queue` Notes ----- To close the input `Queue`, add ``(None, None)` as the last item
5.730982
7.522145
0.761881
if nproc != 1 and os.name == 'nt': warnings.warn( "multiprocessing is currently not supported on Windows, see " "https://github.com/gwpy/gwpy/issues/880, will continue with " "serial procesing (nproc=1)") nproc = 1 if progress_kw.pop('raise_exceptions', None) is not None: warnings.warn("the `raise_exceptions` keyword to " "multiprocess_with_queues is deprecated, and will be " "removed in a future release, all exceptions will be " "raised if they occur", DeprecationWarning) # create progress bar for verbose output if bool(verbose): if not isinstance(verbose, bool): progress_kw['desc'] = str(verbose) if isinstance(inputs, (list, tuple)): progress_kw.setdefault('total', len(inputs)) pbar = progress_bar(**progress_kw) else: pbar = None # ------------------------------------------- def _inner(x): try: return func(x) except Exception as exc: # pylint: disable=broad-except if nproc == 1: raise return exc finally: if pbar and nproc == 1: pbar.update() # ------------------------------------------- # shortcut single process if nproc == 1: return list(map(_inner, inputs)) # ------------------------------------------- # create input and output queues q_in = Queue() q_out = Queue() # create child processes and start proclist = [Process(target=process_in_out_queues, args=(_inner, q_in, q_out)) for _ in range(nproc)] for proc in proclist: proc.daemon = True proc.start() # populate queue (no need to block in serial put()) sent = [q_in.put(x, block=False) for x in enumerate(inputs)] for _ in range(nproc): # add sentinel for each process q_in.put((None, None)) # get results res = [] for _ in range(len(sent)): x = q_out.get() if pbar: pbar.update() res.append(x) # close processes and unwrap results for proc in proclist: proc.join() if pbar: pbar.close() # unwrap results in order results = [out for _, out in sorted(res, key=itemgetter(0))] # raise exceptions here for res in results: if isinstance(res, Exception): raise res return results
def multiprocess_with_queues(nproc, func, inputs, verbose=False, **progress_kw)
Map a function over a list of inputs using multiprocess This essentially duplicates `multiprocess.map` but allows for arbitrary functions (that aren't necessarily importable) Parameters ---------- nproc : `int` number of processes to use, if ``1`` is given, the current process is used, and no child processes are forked func : `callable` the function to call in each iteration, should take a single argument that is the next element from ``inputs`` inputs : `iterable` iterable (e.g. `list`) of inputs, each element of which is passed to ``func`` in one of the child processes verbose : `bool`, `str`, optional if `True`, print progress to the console as a bar, pass a `str` to customise the heading for the progress bar, default: `False`, (default heading ``'Processing:'`` if ``verbose=True`) Returns ------- outputs : `list` the `list` of results from calling ``func(x)`` for each element of ``inputs``
3.019
3.066027
0.984662
from astropy.utils.data import get_readable_fileobj import json from six.moves.urllib.error import HTTPError from six.moves import urllib # Need to build the url call for the restful API base = 'https://gravityspytools.ciera.northwestern.edu' + \ '/search/similarity_search_restful_API' map_era_to_url = { 'ALL': "event_time BETWEEN 1126400000 AND 1229176818", 'O1': "event_time BETWEEN 1126400000 AND 1137250000", 'ER10': "event_time BETWEEN 1161907217 AND 1164499217", 'O2a': "event_time BETWEEN 1164499217 AND 1219276818", 'ER13': "event_time BETWEEN 1228838418 AND 1229176818", } parts = { 'howmany': howmany, 'imageid': gravityspy_id, 'era': map_era_to_url[era], 'ifo': "{}".format(", ".join( map(repr, [ifos[i:i+2] for i in range(0, len(ifos), 2)]), )), 'database': 'updated_similarity_index_v2d0', } search = urllib.parse.urlencode(parts) url = '{}/?{}'.format(base, search) try: with get_readable_fileobj(url, remote_timeout=remote_timeout) as f: return GravitySpyTable(json.load(f)) except HTTPError as exc: if exc.code == 500: exc.msg += ', confirm the gravityspy_id is valid' raise
def search(cls, gravityspy_id, howmany=10, era='ALL', ifos='H1L1', remote_timeout=20)
perform restful API version of search available here: https://gravityspytools.ciera.northwestern.edu/search/ Parameters ---------- gravityspy_id : `str`, This is the unique 10 character hash that identifies a Gravity Spy Image howmany : `int`, optional, default: 10 number of similar images you would like Returns ------- `GravitySpyTable` containing similar events based on an evaluation of the Euclidean distance of the input image to all other images in some Feature Space
4.116562
3.825676
1.076035
try: if self._epoch is None: return None return Time(*modf(self._epoch)[::-1], format='gps', scale='utc') except AttributeError: self._epoch = None return self._epoch
def epoch(self)
GPS epoch associated with these data :type: `~astropy.time.Time`
7.514794
5.100288
1.473406
self._unit = parse_unit(unit, parse_strict=parse_strict)
def override_unit(self, unit, parse_strict='raise')
Forcefully reset the unit of these data Use of this method is discouraged in favour of `to()`, which performs accurate conversions from one unit to another. The method should really only be used when the original unit of the array is plain wrong. Parameters ---------- unit : `~astropy.units.Unit`, `str` the unit to force onto this array parse_strict : `str`, optional how to handle errors in the unit parsing, default is to raise the underlying exception from `astropy.units` Raises ------ ValueError if a `str` cannot be parsed as a valid unit
4.275752
6.707603
0.637449
return super(Array, self).flatten(order=order).view(Quantity)
def flatten(self, order='C')
Return a copy of the array collapsed into one dimension. Any index information is removed as part of the flattening, and the result is returned as a `~astropy.units.Quantity` array. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional 'C' means to flatten in row-major (C-style) order. 'F' means to flatten in column-major (Fortran- style) order. 'A' means to flatten in column-major order if `a` is Fortran *contiguous* in memory, row-major order otherwise. 'K' means to flatten `a` in the order the elements occur in memory. The default is 'C'. Returns ------- y : `~astropy.units.Quantity` A copy of the input array, flattened to one dimension. See Also -------- ravel : Return a flattened array. flat : A 1-D flat iterator over the array. Examples -------- >>> a = Array([[1,2], [3,4]], unit='m', name='Test') >>> a.flatten() <Quantity [1., 2., 3., 4.] m>
10.179667
15.38058
0.661852
# check sampling rates if ts1.sample_rate.to('Hertz') != ts2.sample_rate.to('Hertz'): sampling = min(ts1.sample_rate.value, ts2.sample_rate.value) # resample higher rate series if ts1.sample_rate.value == sampling: ts2 = ts2.resample(sampling) else: ts1 = ts1.resample(sampling) else: sampling = ts1.sample_rate.value # format FFT parameters if fftlength is None: fftlength = stride if overlap is None: overlap = 0 nstride = int(stride * sampling) # get size of spectrogram nsteps = int(ts1.size // nstride) nfreqs = int(fftlength * sampling // 2 + 1) # generate output spectrogram out = Spectrogram(zeros((nsteps, nfreqs)), epoch=ts1.epoch, dt=stride, f0=0, df=1/fftlength, copy=True, unit='coherence') if not nsteps: return out # stride through TimeSeries, recording PSDs as columns of spectrogram for step in range(nsteps): # find step TimeSeries idx = nstride * step idx_end = idx + nstride stepseries1 = ts1[idx:idx_end] stepseries2 = ts2[idx:idx_end] stepcoh = stepseries1.coherence(stepseries2, fftlength=fftlength, overlap=overlap, window=window, **kwargs) out.value[step] = stepcoh.value return out
def _from_timeseries(ts1, ts2, stride, fftlength=None, overlap=None, window=None, **kwargs)
Generate a time-frequency coherence :class:`~gwpy.spectrogram.Spectrogram` from a pair of :class:`~gwpy.timeseries.TimeSeries`. For each `stride`, a PSD :class:`~gwpy.frequencyseries.FrequencySeries` is generated, with all resulting spectra stacked in time and returned.
3.584124
3.463315
1.034882
# format FFT parameters if fftlength is None: fftlength = stride / 2. # get size of spectrogram nsteps = int(ts1.size // (stride * ts1.sample_rate.value)) nproc = min(nsteps, nproc) # single-process return if nsteps == 0 or nproc == 1: return _from_timeseries(ts1, ts2, stride, fftlength=fftlength, overlap=overlap, window=window, **kwargs) # wrap spectrogram generator def _specgram(queue_, tsa, tsb): try: queue_.put(_from_timeseries(tsa, tsb, stride, fftlength=fftlength, overlap=overlap, window=window, **kwargs)) except Exception as exc: # pylint: disable=broad-except queue_.put(exc) # otherwise build process list stepperproc = int(ceil(nsteps / nproc)) nsamp = [stepperproc * ts.sample_rate.value * stride for ts in (ts1, ts2)] queue = ProcessQueue(nproc) processlist = [] for i in range(nproc): process = Process(target=_specgram, args=(queue, ts1[i * nsamp[0]:(i + 1) * nsamp[0]], ts2[i * nsamp[1]:(i + 1) * nsamp[1]])) process.daemon = True processlist.append(process) process.start() if ((i + 1) * nsamp[0]) >= ts1.size: break # get data data = [] for process in processlist: result = queue.get() if isinstance(result, Exception): raise result else: data.append(result) # and block for process in processlist: process.join() # format and return out = SpectrogramList(*data) out.sort(key=lambda spec: spec.epoch.gps) return out.join()
def from_timeseries(ts1, ts2, stride, fftlength=None, overlap=None, window=None, nproc=1, **kwargs)
Calculate the coherence `Spectrogram` between two `TimeSeries`. Parameters ---------- timeseries : :class:`~gwpy.timeseries.TimeSeries` input time-series to process. stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `int`, optiona, default: fftlength number of seconds of overlap between FFTs, defaults to no overlap window : `timeseries.window.Window`, optional, default: `None` window function to apply to timeseries prior to FFT. nproc : `int`, default: ``1`` maximum number of independent frame reading processes, default is set to single-process file reading. Returns ------- spectrogram : :class:`~gwpy.spectrogram.Spectrogram` time-frequency power spectrogram as generated from the input time-series.
3.008722
3.086318
0.974858
# read file(s) config = configparser.ConfigParser(dict_type=OrderedDict) source = file_list(source) success_ = config.read(*source) if len(success_) != len(source): raise IOError("Failed to read one or more CLF files") # create channel list out = ChannelList() out.source = source append = out.append # loop over all groups and channels for group in config.sections(): params = OrderedDict(config.items(group)) channels = params.pop('channels').strip('\n').split('\n') if 'flow' in params or 'fhigh' in params: low = params.pop('flow', 0) high = params.pop('fhigh', inf) if isinstance(high, string_types) and high.lower() == 'nyquist': high = inf frange = float(low), float(high) else: frange = None for channel in channels: try: match = CHANNEL_DEFINITION.match(channel).groupdict() except AttributeError as exc: exc.args = ('Cannot parse %r as channel list entry' % channel,) raise # remove Nones from match match = dict((k, v) for k, v in match.items() if v is not None) match.setdefault('safe', 'safe') match.setdefault('fidelity', 'clean') # create channel and copy group params safe = match.get('safe', 'safe').lower() != 'unsafe' channel = Channel(match.pop('name'), frequency_range=frange, safe=safe, sample_rate=match.pop('sample_rate')) channel.params = params.copy() channel.params.update(match) channel.group = group # extract those params for which the Channel has an attribute for key in ['frametype']: setattr(channel, key, channel.params.pop(key, None)) append(channel) return out
def read_channel_list_file(*source)
Read a `~gwpy.detector.ChannelList` from a Channel List File
3.944593
3.907049
1.009609
if not isinstance(fobj, FILE_LIKE): with open(fobj, "w") as fobj: return write_channel_list_file(channels, fobj) out = configparser.ConfigParser(dict_type=OrderedDict) for channel in channels: group = channel.group if not out.has_section(group): out.add_section(group) for param, value in channel.params.items(): out.set(group, param, value) if channel.sample_rate is not None: entry = '%s %s' % (str(channel), str(channel.sample_rate.to('Hz').value)) else: entry = str(channel) entry += ' %s' % channel.params.get('safe', 'safe') entry += ' %s' % channel.params.get('fidelity', 'clean') try: clist = out.get(group, 'channels') except configparser.NoOptionError: out.set(group, 'channels', '\n%s' % entry) else: out.set(group, 'channels', clist + '\n%s' % entry) out.write(fobj)
def write_channel_list_file(channels, fobj)
Write a `~gwpy.detector.ChannelList` to a INI-format channel list file
2.652512
2.58579
1.025803
# warn about deprecated functions if deprecated: func = deprecated_function( func, "the {0!r} PSD methods is deprecated, and will be removed " "in a future release, please consider using {1!r} instead".format( name, name.split('-', 1)[1], ), ) if name is None: name = func.__name__ name = _format_name(name) METHODS[name] = func return name
def register_method(func, name=None, deprecated=False)
Register a method of calculating an average spectrogram. Parameters ---------- func : `callable` function to execute name : `str`, optional name of the method, defaults to ``func.__name__`` deprecated : `bool`, optional whether this method is deprecated (`True`) or not (`False`) Returns ------- name : `str` the registered name of the function, which may differ pedantically from what was given by the user.
4.668211
4.781676
0.976271
# find method name = _format_name(name) try: return METHODS[name] except KeyError as exc: exc.args = ("no PSD method registered with name {0!r}".format(name),) raise
def get_method(name)
Return the PSD method registered with the given name.
5.831781
4.076223
1.430683
# compute chirp mass and symmetric mass ratio mass1 = units.Quantity(mass1, 'solMass').to('kg') mass2 = units.Quantity(mass2, 'solMass').to('kg') mtotal = mass1 + mass2 mchirp = (mass1 * mass2) ** (3/5.) / mtotal ** (1/5.) # compute ISCO fisco = (constants.c ** 3 / (constants.G * 6**1.5 * pi * mtotal)).to('Hz') # calculate integral pre-factor prefactor = ( (1.77**2 * 5 * constants.c ** (1/3.) * (mchirp * constants.G / constants.c ** 2) ** (5/3.)) / (96 * pi ** (4/3.) * snr ** 2) ) # calculate inspiral range ASD in m^2/Hz integrand = 1 / psd * psd.frequencies ** (-7/3.) * prefactor # restrict to ISCO integrand = integrand[psd.frequencies.value < fisco.value] # normalize and return if integrand.f0.value == 0.0: integrand[0] = 0.0 if horizon: integrand *= 2.26 ** 2 return integrand.to('Mpc^2 / Hz')
def inspiral_range_psd(psd, snr=8, mass1=1.4, mass2=1.4, horizon=False)
Compute the inspiral sensitive distance PSD from a GW strain PSD Parameters ---------- psd : `~gwpy.frequencyseries.FrequencySeries` the instrumental power-spectral-density data snr : `float`, optional the signal-to-noise ratio for which to calculate range, default: `8` mass1 : `float`, `~astropy.units.Quantity`, optional the mass (`float` assumed in solar masses) of the first binary component, default: `1.4` mass2 : `float`, `~astropy.units.Quantity`, optional the mass (`float` assumed in solar masses) of the second binary component, default: `1.4` horizon : `bool`, optional if `True`, return the maximal 'horizon' sensitive distance, otherwise return the angle-averaged range, default: `False` Returns ------- rspec : `~gwpy.frequencyseries.FrequencySeries` the calculated inspiral sensitivity PSD [Mpc^2 / Hz]
5.060131
5.051022
1.001803
mass1 = units.Quantity(mass1, 'solMass').to('kg') mass2 = units.Quantity(mass2, 'solMass').to('kg') mtotal = mass1 + mass2 # compute ISCO fisco = (constants.c ** 3 / (constants.G * 6**1.5 * pi * mtotal)).to('Hz') # format frequency limits fmax = units.Quantity(fmax or fisco, 'Hz') if fmax > fisco: warnings.warn("Upper frequency bound greater than %s-%s ISCO " "frequency of %s, using ISCO" % (mass1, mass2, fisco)) fmax = fisco if fmin is None: fmin = psd.df # avoid using 0 as lower limit fmin = units.Quantity(fmin, 'Hz') # integrate f = psd.frequencies.to('Hz') condition = (f >= fmin) & (f < fmax) integrand = inspiral_range_psd(psd[condition], snr=snr, mass1=mass1, mass2=mass2, horizon=horizon) result = units.Quantity( integrate.trapz(integrand.value, f.value[condition]), unit=integrand.unit * units.Hertz) return (result ** (1/2.)).to('Mpc')
def inspiral_range(psd, snr=8, mass1=1.4, mass2=1.4, fmin=None, fmax=None, horizon=False)
Calculate the inspiral sensitive distance from a GW strain PSD The method returns the distance (in megaparsecs) to which an compact binary inspiral with the given component masses would be detectable given the instrumental PSD. The calculation is as defined in: https://dcc.ligo.org/LIGO-T030276/public Parameters ---------- psd : `~gwpy.frequencyseries.FrequencySeries` the instrumental power-spectral-density data snr : `float`, optional the signal-to-noise ratio for which to calculate range, default: `8` mass1 : `float`, `~astropy.units.Quantity`, optional the mass (`float` assumed in solar masses) of the first binary component, default: `1.4` mass2 : `float`, `~astropy.units.Quantity`, optional the mass (`float` assumed in solar masses) of the second binary component, default: `1.4` fmin : `float`, optional the lower frequency cut-off of the integral, default: `psd.df` fmax : `float`, optional the maximum frequency limit of the integral, defaults to innermost stable circular orbit (ISCO) frequency horizon : `bool`, optional if `True`, return the maximal 'horizon' sensitive distance, otherwise return the angle-averaged range, default: `False` Returns ------- range : `~astropy.units.Quantity` the calculated inspiral range [Mpc] Examples -------- Grab some data for LIGO-Livingston around GW150914 and generate a PSD >>> from gwpy.timeseries import TimeSeries >>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478) >>> hoff = hoft.psd(fftlength=4) Now we can calculate the :func:`inspiral_range`: >>> from gwpy.astro import inspiral_range >>> r = inspiral_range(hoff, fmin=30) >>> print(r) 70.4612102889 Mpc
4.042645
4.352987
0.928706
# calculate frequency dependent range in parsecs a = (constants.G * energy * constants.M_sun * 0.4 / (pi**2 * constants.c))**(1/2.) dspec = psd ** (-1/2.) * a / (snr * psd.frequencies) # convert to output unit rspec = dspec.to('Mpc') # rescale 0 Hertz (which has 0 range always) if rspec.f0.value == 0.0: rspec[0] = 0.0 return rspec
def burst_range_spectrum(psd, snr=8, energy=1e-2)
Calculate the frequency-dependent GW burst range from a strain PSD Parameters ---------- psd : `~gwpy.frequencyseries.FrequencySeries` the instrumental power-spectral-density data snr : `float`, optional the signal-to-noise ratio for which to calculate range, default: `8` energy : `float`, optional the relative energy output of the GW burst, default: `0.01` (GRB-like burst) Returns ------- rangespec : `~gwpy.frequencyseries.FrequencySeries` the burst range `FrequencySeries` [Mpc (default)]
9.596503
9.049432
1.060454
freqs = psd.frequencies.value # restrict integral if not fmin: fmin = psd.f0 if not fmax: fmax = psd.span[1] condition = (freqs >= fmin) & (freqs < fmax) # calculate integrand and integrate integrand = burst_range_spectrum( psd[condition], snr=snr, energy=energy) ** 3 result = integrate.trapz(integrand.value, freqs[condition]) # normalize and return r = units.Quantity(result / (fmax - fmin), unit=integrand.unit) ** (1/3.) return r.to('Mpc')
def burst_range(psd, snr=8, energy=1e-2, fmin=100, fmax=500)
Calculate the integrated GRB-like GW burst range from a strain PSD Parameters ---------- psd : `~gwpy.frequencyseries.FrequencySeries` the instrumental power-spectral-density data snr : `float`, optional the signal-to-noise ratio for which to calculate range, default: ``8`` energy : `float`, optional the relative energy output of the GW burst, defaults to ``1e-2`` for a GRB-like burst fmin : `float`, optional the lower frequency cutoff of the burst range integral, default: ``100 Hz`` fmax : `float`, optional the upper frequency cutoff of the burst range integral, default: ``500 Hz`` Returns ------- range : `~astropy.units.Quantity` the GRB-like-burst sensitive range [Mpc (default)] Examples -------- Grab some data for LIGO-Livingston around GW150914 and generate a PSD >>> from gwpy.timeseries import TimeSeries >>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478) >>> hoff = hoft.psd(fftlength=4) Now we can calculate the :func:`burst_range`: >>> from gwpy.astro import burst_range >>> r = burst_range(hoff, fmin=30) >>> print(r) 42.5055584195 Mpc
4.674922
5.2975
0.882477
# this method is more complicated than it need be to # support matplotlib-1.x. # for matplotlib-2.x this would just be # h, s, v = colors.rgb_to_hsv(colors.to_rgb(c)) # v *= factor # return colors.hsv_to_rgb((h, s, v)) rgb = numpy.array(to_rgb(col), ndmin=3) hsv = colors.rgb_to_hsv(rgb) hsv[-1][-1][2] *= factor return colors.hsv_to_rgb(hsv)[-1][-1]
def tint(col, factor=1.0)
Tint a color (make it darker), returning a new RGB array
4.081615
3.921462
1.04084
norm = kwargs.pop('norm', current) or 'linear' vmin = kwargs.pop('vmin', None) vmax = kwargs.pop('vmax', None) clim = kwargs.pop('clim', (vmin, vmax)) or (None, None) clip = kwargs.pop('clip', None) if norm == 'linear': norm = colors.Normalize() elif norm == 'log': norm = colors.LogNorm() elif not isinstance(norm, colors.Normalize): raise ValueError("unrecognised value for norm {!r}".format(norm)) for attr, value in (('vmin', clim[0]), ('vmax', clim[1]), ('clip', clip)): if value is not None: setattr(norm, attr, value) return norm, kwargs
def format_norm(kwargs, current=None)
Format a `~matplotlib.colors.Normalize` from a set of kwargs Returns ------- norm, kwargs the formatted `Normalize` instance, and the remaining keywords
2.263534
2.213627
1.022545
# pylint: disable=unused-argument # try and read file descriptor if fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: if fileobj.read(4) == GWF_SIGNATURE: return True finally: fileobj.seek(loc) if filepath is not None: if filepath.endswith('.gwf'): return True if filepath.endswith(('.lcf', '.cache')): try: cache = read_cache(filepath) except IOError: return False else: if cache[0].path.endswith('.gwf'): return True
def identify_gwf(origin, filepath, fileobj, *args, **kwargs)
Identify a filename or file object as GWF This function is overloaded in that it will also identify a cache file as 'gwf' if the first entry in the cache contains a GWF file extension
3.364292
3.240291
1.038269
if mode not in ('r', 'w'): raise ValueError("mode must be either 'r' or 'w'") from LDAStools import frameCPP filename = urlparse(filename).path # strip file://localhost or similar if mode == 'r': return frameCPP.IFrameFStream(str(filename)) return frameCPP.OFrameFStream(str(filename))
def open_gwf(filename, mode='r')
Open a filename for reading or writing GWF format data Parameters ---------- filename : `str` the path to read from, or write to mode : `str`, optional either ``'r'`` (read) or ``'w'`` (write) Returns ------- `LDAStools.frameCPP.IFrameFStream` the input frame stream (if `mode='r'`), or `LDAStools.frameCPP.IFrameFStream` the output frame stream (if `mode='w'`)
8.878717
4.672173
1.90034
from LDAStools import frameCPP # open stream stream = open_gwf(filename, 'w') # write frames one-by-one if isinstance(frames, frameCPP.FrameH): frames = [frames] for frame in frames: stream.WriteFrame(frame, compression, compression_level)
def write_frames(filename, frames, compression=257, compression_level=6)
Write a list of frame objects to a file **Requires:** |LDAStools.frameCPP|_ Parameters ---------- filename : `str` path to write into frames : `list` of `LDAStools.frameCPP.FrameH` list of frames to write into file compression : `int`, optional enum value for compression scheme, default is ``GZIP`` compression_level : `int`, optional compression level for given scheme
9.080189
4.491893
2.021461
from LDAStools import frameCPP # create frame frame = frameCPP.FrameH() # add timing gps = to_gps(time) gps = frameCPP.GPSTime(gps.gpsSeconds, gps.gpsNanoSeconds) frame.SetGTime(gps) if duration is not None: frame.SetDt(float(duration)) # add FrDetectors for prefix in ifos or []: idx = getattr(frameCPP, 'DETECTOR_LOCATION_%s' % prefix) frame.AppendFrDetector(frameCPP.GetDetector(idx, gps)) # add descriptions frame.SetName(name) frame.SetRun(run) return frame
def create_frame(time=0, duration=None, name='gwpy', run=-1, ifos=None)
Create a new :class:`~LDAStools.frameCPP.FrameH` **Requires:** |LDAStools.frameCPP|_ Parameters ---------- time : `float`, optional frame start time in GPS seconds duration : `float`, optional frame length in seconds name : `str`, optional name of project or other experiment description run : `int`, optional run number (number < 0 reserved for simulated data); monotonic for experimental runs ifos : `list`, optional list of interferometer prefices (e.g. ``'L1'``) associated with this frame Returns ------- frame : :class:`~LDAStools.frameCPP.FrameH` the newly created frame header
8.385499
6.054511
1.385
channel = str(channel) for name, type_ in _iter_channels(framefile): if channel == name: return type_ raise ValueError("%s not found in table-of-contents for %s" % (channel, framefile))
def get_channel_type(channel, framefile)
Find the channel type in a given GWF file **Requires:** |LDAStools.frameCPP|_ Parameters ---------- channel : `str`, `~gwpy.detector.Channel` name of data channel to find framefile : `str` path of GWF file in which to search Returns ------- ctype : `str` the type of the channel ('adc', 'sim', or 'proc') Raises ------ ValueError if the channel is not found in the table-of-contents
4.826603
5.224605
0.923822
channel = str(channel) for name in iter_channel_names(framefile): if channel == name: return True return False
def channel_in_frame(channel, framefile)
Determine whether a channel is stored in this framefile **Requires:** |LDAStools.frameCPP|_ Parameters ---------- channel : `str` name of channel to find framefile : `str` path of GWF file to test Returns ------- inframe : `bool` whether this channel is included in the table of contents for the given framefile
4.288503
8.247094
0.520002
from LDAStools import frameCPP if not isinstance(framefile, frameCPP.IFrameFStream): framefile = open_gwf(framefile, 'r') toc = framefile.GetTOC() for typename in ('Sim', 'Proc', 'ADC'): typen = typename.lower() for name in getattr(toc, 'Get{0}'.format(typename))(): yield name, typen
def _iter_channels(framefile)
Yields the name and type of each channel in a GWF file TOC **Requires:** |LDAStools.frameCPP|_ Parameters ---------- framefile : `str`, `LDAStools.frameCPP.IFrameFStream` path of GWF file, or open file stream, to read
13.967729
6.033597
2.314992
segments = SegmentList() for path in paths: segments.extend(_gwf_channel_segments(path, channel, warn=warn)) return segments.coalesce()
def data_segments(paths, channel, warn=True)
Returns the segments containing data for a channel **Requires:** |LDAStools.frameCPP|_ A frame is considered to contain data if a valid FrData structure (of any type) exists for the channel in that frame. No checks are directly made against the underlying FrVect structures. Parameters ---------- paths : `list` of `str` a list of GWF file paths channel : `str` the name to check in each frame warn : `bool`, optional emit a `UserWarning` when a channel is not found in a frame Returns ------- segments : `~gwpy.segments.SegmentList` the list of segments containing data
6.283658
7.919232
0.793468
stream = open_gwf(path) # get segments for frames toc = stream.GetTOC() secs = toc.GetGTimeS() nano = toc.GetGTimeN() dur = toc.GetDt() readers = [getattr(stream, 'ReadFr{0}Data'.format(type_.title())) for type_ in ("proc", "sim", "adc")] # for each segment, try and read the data for this channel for i, (s, ns, dt) in enumerate(zip(secs, nano, dur)): for read in readers: try: read(i, channel) except (IndexError, ValueError): continue readers = [read] # use this one from now on epoch = LIGOTimeGPS(s, ns) yield Segment(epoch, epoch + dt) break else: # none of the readers worked for this channel, warn if warn: warnings.warn( "{0!r} not found in frame {1} of {2}".format( channel, i, path), )
def _gwf_channel_segments(path, channel, warn=True)
Yields the segments containing data for ``channel`` in this GWF path
7.205949
6.913675
1.042275
# parse keyword args inplace = kwargs.pop('inplace', False) analog = kwargs.pop('analog', False) fs = kwargs.pop('sample_rate', None) if kwargs: raise TypeError("filter() got an unexpected keyword argument '%s'" % list(kwargs.keys())[0]) # parse filter if fs is None: fs = 2 * (data.shape[-1] * data.df).to('Hz').value form, filt = parse_filter(filt, analog=analog, sample_rate=fs) lti = signal.lti(*filt) # generate frequency response freqs = data.frequencies.value.copy() fresp = numpy.nan_to_num(abs(lti.freqresp(w=freqs)[1])) # apply to array if inplace: data *= fresp return data new = data * fresp return new
def fdfilter(data, *filt, **kwargs)
Filter a frequency-domain data object See Also -------- gwpy.frequencyseries.FrequencySeries.filter gwpy.spectrogram.Spectrogram.filter
4.283721
4.455705
0.961401
# define command line arguments parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("-V", "--version", action="version", version=__version__, help="show version number and exit") parser.add_argument("-l", "--local", action="store_true", default=False, help="print datetimes in local timezone") parser.add_argument("-f", "--format", type=str, action="store", default=r"%Y-%m-%d %H:%M:%S.%f %Z", help="output datetime format (default: %(default)r)") parser.add_argument("input", help="GPS or datetime string to convert", nargs="*") # parse and convert args = parser.parse_args(args) input_ = " ".join(args.input) output = tconvert(input_) # print (now with timezones!) if isinstance(output, datetime.datetime): output = output.replace(tzinfo=tz.tzutc()) if args.local: output = output.astimezone(tz.tzlocal()) print(output.strftime(args.format)) else: print(output)
def main(args=None)
Parse command-line arguments, tconvert inputs, and print
2.778881
2.599796
1.068884
name = func.__name__ @wraps(func) def timed_func(self, *args, **kwargs): # pylint: disable=missing-docstring _start = time.time() out = func(self, *args, **kwargs) self.log(2, '{0} took {1:.1f} sec'.format(name, time.time() - _start)) return out return timed_func
def timer(func)
Time a method and print its duration after return
2.461591
2.489992
0.988594
def converter(x): return Quantity(x, unit).value converter.__doc__ %= str(unit) # pylint: disable=no-member return converter
def to_float(unit)
Factory to build a converter from quantity string to float Examples -------- >>> conv = to_float('Hz') >>> conv('4 mHz') >>> 0.004
10.211293
18.016273
0.566782
@wraps(func) def decorated_func(*args, **kwargs): norm, kwargs = format_norm(kwargs) kwargs['norm'] = norm return func(*args, **kwargs) return decorated_func
def log_norm(func)
Wrap ``func`` to handle custom gwpy keywords for a LogNorm colouring
2.738856
2.76063
0.992113
@wraps(func) def wrapped_func(self, left=None, right=None, **kw): if right is None and numpy.iterable(left): left, right = left kw['left'] = left kw['right'] = right gpsscale = self.get_xscale() in GPS_SCALES for key in ('left', 'right'): if gpsscale: try: kw[key] = numpy.longdouble(str(to_gps(kw[key]))) except TypeError: pass return func(self, **kw) return wrapped_func
def xlim_as_gps(func)
Wrap ``func`` to handle pass limit inputs through `gwpy.time.to_gps`
3.262382
3.170543
1.028966
@wraps(func) def wrapped_func(self, *args, **kwargs): grid = (self.xaxis._gridOnMinor, self.xaxis._gridOnMajor, self.yaxis._gridOnMinor, self.yaxis._gridOnMajor) try: return func(self, *args, **kwargs) finally: # reset grid self.xaxis.grid(grid[0], which="minor") self.xaxis.grid(grid[1], which="major") self.yaxis.grid(grid[2], which="minor") self.yaxis.grid(grid[3], which="major") return wrapped_func
def restore_grid(func)
Wrap ``func`` to preserve the Axes current grid settings.
2.02543
1.86913
1.083621
scale = self.get_xscale() return self.set_xscale(scale, epoch=epoch)
def set_epoch(self, epoch)
Set the epoch for the current GPS scale. This method will fail if the current X-axis scale isn't one of the GPS scales. See :ref:`gwpy-plot-gps` for more details. Parameters ---------- epoch : `float`, `str` GPS-compatible time or date object, anything parseable by :func:`~gwpy.time.to_gps` is fine.
7.446944
8.522492
0.873799
if isinstance(array, Array2D): return self._imshow_array2d(array, *args, **kwargs) image = super(Axes, self).imshow(array, *args, **kwargs) self.autoscale(enable=None, axis='both', tight=None) return image
def imshow(self, array, *args, **kwargs)
Display an image, i.e. data on a 2D regular raster. If ``array`` is a :class:`~gwpy.types.Array2D` (e.g. a :class:`~gwpy.spectrogram.Spectrogram`), then the defaults are _different_ to those in the upstream :meth:`~matplotlib.axes.Axes.imshow` method. Namely, the defaults are - ``origin='lower'`` (coordinates start in lower-left corner) - ``aspect='auto'`` (pixels are not forced to be square) - ``interpolation='none'`` (no image interpolation is used) In all other usage, the defaults from the upstream matplotlib method are unchanged. Parameters ---------- array : array-like or PIL image The image data. *args, **kwargs All arguments and keywords are passed to the inherited :meth:`~matplotlib.axes.Axes.imshow` method. See Also -------- matplotlib.axes.Axes.imshow for details of the image rendering
3.443123
4.066292
0.846747
# NOTE: If you change the defaults for this method, please update # the docstring for `imshow` above. # calculate extent extent = tuple(array.xspan) + tuple(array.yspan) if self.get_xscale() == 'log' and extent[0] == 0.: extent = (1e-300,) + extent[1:] if self.get_yscale() == 'log' and extent[2] == 0.: extent = extent[:2] + (1e-300,) + extent[3:] kwargs.setdefault('extent', extent) return self.imshow(array.value.T, origin=origin, aspect=aspect, interpolation=interpolation, **kwargs)
def _imshow_array2d(self, array, origin='lower', interpolation='none', aspect='auto', **kwargs)
Render an `~gwpy.types.Array2D` using `Axes.imshow`
3.164022
3.082758
1.026361
if len(args) == 1 and isinstance(args[0], Array2D): return self._pcolormesh_array2d(*args, **kwargs) return super(Axes, self).pcolormesh(*args, **kwargs)
def pcolormesh(self, *args, **kwargs)
Create a pseudocolor plot with a non-regular rectangular grid. When using GWpy, this method can be called with a single argument that is an :class:`~gwpy.types.Array2D`, for which the ``X`` and ``Y`` coordinate arrays will be determined from the indexing. In all other usage, all ``args`` and ``kwargs`` are passed directly to :meth:`~matplotlib.axes.Axes.pcolormesh`. Notes ----- Unlike the upstream :meth:`matplotlib.axes.Axes.pcolormesh`, this method respects the current grid settings. See Also -------- matplotlib.axes.Axes.pcolormesh
2.505238
3.093839
0.809751
x = numpy.concatenate((array.xindex.value, array.xspan[-1:])) y = numpy.concatenate((array.yindex.value, array.yspan[-1:])) xcoord, ycoord = numpy.meshgrid(x, y, copy=False, sparse=True) return self.pcolormesh(xcoord, ycoord, array.value.T, *args, **kwargs)
def _pcolormesh_array2d(self, array, *args, **kwargs)
Render an `~gwpy.types.Array2D` using `Axes.pcolormesh`
3.006495
2.757515
1.090291
alpha = kwargs.pop('alpha', .1) # plot mean line, = self.plot(data, **kwargs) out = [line] # modify keywords for shading kwargs.update({ 'label': '', 'linewidth': line.get_linewidth() / 2, 'color': line.get_color(), 'alpha': alpha * 2, }) # plot lower and upper Series fill = [data.xindex.value, data.value, data.value] for i, bound in enumerate((lower, upper)): if bound is not None: out.extend(self.plot(bound, **kwargs)) fill[i+1] = bound.value # fill between out.append(self.fill_between( *fill, alpha=alpha, color=kwargs['color'], rasterized=kwargs.get('rasterized', True))) return out
def plot_mmm(self, data, lower=None, upper=None, **kwargs)
Plot a `Series` as a line, with a shaded region around it. The ``data`` `Series` is drawn, while the ``lower`` and ``upper`` `Series` are plotted lightly below and above, with a fill between them and the ``data``. All three `Series` should have the same `~Series.index` array. Parameters ---------- data : `~gwpy.types.Series` Data to plot normally. lower : `~gwpy.types.Series` Lower boundary (on Y-axis) for shade. upper : `~gwpy.types.Series` Upper boundary (on Y-axis) for shade. **kwargs Any other keyword arguments acceptable for :meth:`~matplotlib.Axes.plot`. Returns ------- artists : `tuple` All of the drawn artists: - `~matplotlib.lines.Line2d` for ``data``, - `~matplotlib.lines.Line2D` for ``lower``, if given - `~matplotlib.lines.Line2D` for ``upper``, if given - `~matplitlib.collections.PolyCollection` for shading See Also -------- matplotlib.axes.Axes.plot for a full description of acceptable ``*args`` and ``**kwargs``
3.998598
4.449451
0.898672
# get color and sort if color is not None and kwargs.get('c_sort', True): sortidx = color.argsort() x = x[sortidx] y = y[sortidx] w = w[sortidx] h = h[sortidx] color = color[sortidx] # define how to make a polygon for each tile if anchor == 'll': def _poly(x, y, w, h): return ((x, y), (x, y+h), (x+w, y+h), (x+w, y)) elif anchor == 'lr': def _poly(x, y, w, h): return ((x-w, y), (x-w, y+h), (x, y+h), (x, y)) elif anchor == 'ul': def _poly(x, y, w, h): return ((x, y-h), (x, y), (x+w, y), (x+w, y-h)) elif anchor == 'ur': def _poly(x, y, w, h): return ((x-w, y-h), (x-w, y), (x, y), (x, y-h)) elif anchor == 'center': def _poly(x, y, w, h): return ((x-w/2., y-h/2.), (x-w/2., y+h/2.), (x+w/2., y+h/2.), (x+w/2., y-h/2.)) else: raise ValueError("Unrecognised tile anchor {!r}".format(anchor)) # build collection cmap = kwargs.pop('cmap', rcParams['image.cmap']) coll = PolyCollection((_poly(*tile) for tile in zip(x, y, w, h)), edgecolors=edgecolors, linewidth=linewidth, **kwargs) if color is not None: coll.set_array(color) coll.set_cmap(cmap) out = self.add_collection(coll) self.autoscale_view() return out
def tile(self, x, y, w, h, color=None, anchor='center', edgecolors='face', linewidth=0.8, **kwargs)
Plot rectanguler tiles based onto these `Axes`. ``x`` and ``y`` give the anchor point for each tile, with ``w`` and ``h`` giving the extent in the X and Y axis respectively. Parameters ---------- x, y, w, h : `array_like`, shape (n, ) Input data color : `array_like`, shape (n, ) Array of amplitudes for tile color anchor : `str`, optional Anchor point for tiles relative to ``(x, y)`` coordinates, one of - ``'center'`` - center tile on ``(x, y)`` - ``'ll'`` - ``(x, y)`` defines lower-left corner of tile - ``'lr'`` - ``(x, y)`` defines lower-right corner of tile - ``'ul'`` - ``(x, y)`` defines upper-left corner of tile - ``'ur'`` - ``(x, y)`` defines upper-right corner of tile **kwargs Other keywords are passed to :meth:`~matplotlib.collections.PolyCollection` Returns ------- collection : `~matplotlib.collections.PolyCollection` the collection of tiles drawn Examples -------- >>> import numpy >>> from matplotlib import pyplot >>> import gwpy.plot # to get gwpy's Axes >>> x = numpy.arange(10) >>> y = numpy.arange(x.size) >>> w = numpy.ones_like(x) * .8 >>> h = numpy.ones_like(x) * .8 >>> fig = pyplot.figure() >>> ax = fig.gca() >>> ax.tile(x, y, w, h, anchor='ll') >>> pyplot.show()
1.788723
1.78403
1.002631
fig = self.get_figure() if kwargs.get('use_axesgrid', True): kwargs.setdefault('fraction', 0.) if kwargs.get('fraction', 0.) == 0.: kwargs.setdefault('use_axesgrid', True) mappable, kwargs = gcbar.process_colorbar_kwargs( fig, mappable=mappable, ax=self, **kwargs) if isinstance(fig, Plot): # either we have created colorbar Axes using axesgrid1, or # the user already gave use_axesgrid=False, so we forcefully # disable axesgrid here in case fraction == 0., which causes # gridspec colorbars to fail. kwargs['use_axesgrid'] = False return fig.colorbar(mappable, **kwargs)
def colorbar(self, mappable=None, **kwargs)
Add a `~matplotlib.colorbar.Colorbar` to these `Axes` Parameters ---------- mappable : matplotlib data collection, optional collection against which to map the colouring, default will be the last added mappable artist (collection or image) fraction : `float`, optional fraction of space to steal from these `Axes` to make space for the new axes, default is ``0.`` if ``use_axesgrid=True`` is given (default), otherwise default is ``.15`` to match the upstream matplotlib default. **kwargs other keyword arguments to be passed to the :meth:`Plot.colorbar` generator Returns ------- cbar : `~matplotlib.colorbar.Colorbar` the newly added `Colorbar` See Also -------- Plot.colorbar
6.439205
6.399065
1.006273
# convert from GPS into datetime try: float(gpsordate) # if we can 'float' it, then its probably a GPS time except (TypeError, ValueError): return to_gps(gpsordate) return from_gps(gpsordate)
def tconvert(gpsordate='now')
Convert GPS times to ISO-format date-times and vice-versa. Parameters ---------- gpsordate : `float`, `astropy.time.Time`, `datetime.datetime`, ... input gps or date to convert, many input types are supported Returns ------- date : `datetime.datetime` or `LIGOTimeGPS` converted gps or date Notes ----- If the input object is a `float` or `LIGOTimeGPS`, it will get converted from GPS format into a `datetime.datetime`, otherwise the input will be converted into `LIGOTimeGPS`. Examples -------- Integers and floats are automatically converted from GPS to `datetime.datetime`: >>> from gwpy.time import tconvert >>> tconvert(0) datetime.datetime(1980, 1, 6, 0, 0) >>> tconvert(1126259462.3910) datetime.datetime(2015, 9, 14, 9, 50, 45, 391000) while strings are automatically converted to `~gwpy.time.LIGOTimeGPS`: >>> to_gps('Sep 14 2015 09:50:45.391') LIGOTimeGPS(1126259462, 391000000) Additionally, a few special-case words as supported, which all return `~gwpy.time.LIGOTimeGPS`: >>> tconvert('now') >>> tconvert('today') >>> tconvert('tomorrow') >>> tconvert('yesterday')
7.033786
8.021016
0.87692
# -- convert input to Time, or something we can pass to LIGOTimeGPS if isinstance(t, string_types): try: # if str represents a number, leave it for LIGOTimeGPS to handle float(t) except ValueError: # str -> datetime.datetime t = _str_to_datetime(t) # tuple -> datetime.datetime if isinstance(t, (tuple, list)): t = datetime.datetime(*t) # datetime.datetime -> Time if isinstance(t, datetime.date): t = _datetime_to_time(t) # Quantity -> float if isinstance(t, Quantity): t = t.to('second').value # Number/Decimal -> str if isinstance(t, Decimal): t = str(t) if isinstance(t, Number): # note, on python < 3, str(<float>) isn't very good, so we use repr # for python > 3 we can just use str for both Decimal and Number t = repr(t) # -- convert to LIGOTimeGPS if isinstance(t, Time): return _time_to_gps(t, *args, **kwargs) try: return LIGOTimeGPS(t) except (TypeError, ValueError): return LIGOTimeGPS(float(t))
def to_gps(t, *args, **kwargs)
Convert any input date/time into a `LIGOTimeGPS`. Any input object that can be cast as a `~astropy.time.Time` (with `str` going through the `datetime.datetime`) are acceptable. Parameters ---------- t : `float`, `~datetime.datetime`, `~astropy.time.Time`, `str` the input time, any object that can be converted into a `LIGOTimeGPS`, `~astropy.time.Time`, or `~datetime.datetime`, is acceptable. *args, **kwargs other arguments to pass to pass to `~astropy.time.Time` if given Returns ------- gps : `LIGOTimeGPS` the number of GPS seconds (non-integer) since the start of the epoch (January 6 1980). Raises ------ TypeError if a `str` input cannot be parsed as a `datetime.datetime`. ValueError if the input cannot be cast as a `~astropy.time.Time` or `LIGOTimeGPS`. Examples -------- >>> to_gps('Jan 1 2017') LIGOTimeGPS(1167264018, 0) >>> to_gps('Sep 14 2015 09:50:45.391') LIGOTimeGPS(1126259462, 391000000) >>> import datetime >>> to_gps(datetime.datetime(2017, 1, 1)) LIGOTimeGPS(1167264018, 0) >>> from astropy.time import Time >>> to_gps(Time(57754, format='mjd')) LIGOTimeGPS(1167264018, 0)
4.204109
4.305482
0.976455
try: gps = LIGOTimeGPS(gps) except (ValueError, TypeError, RuntimeError): gps = LIGOTimeGPS(float(gps)) sec, nano = gps.gpsSeconds, gps.gpsNanoSeconds date = Time(sec, format='gps', scale='utc').datetime return date + datetime.timedelta(microseconds=nano*1e-3)
def from_gps(gps)
Convert a GPS time into a `datetime.datetime`. Parameters ---------- gps : `LIGOTimeGPS`, `int`, `float` GPS time to convert Returns ------- datetime : `datetime.datetime` ISO-format datetime equivalent of input GPS time Examples -------- >>> from_gps(1167264018) datetime.datetime(2017, 1, 1, 0, 0) >>> from_gps(1126259462.3910) datetime.datetime(2015, 9, 14, 9, 50, 45, 391000)
4.346214
4.287741
1.013637
# try known string try: return DATE_STRINGS[str(datestr).lower()]() except KeyError: # any other string pass # use maya try: import maya return maya.when(datestr).datetime() except ImportError: pass # use dateutil.parse with warnings.catch_warnings(): # don't allow lazy passing of time-zones warnings.simplefilter("error", RuntimeWarning) try: return dateparser.parse(datestr) except RuntimeWarning: raise ValueError("Cannot parse date string with timezone " "without maya, please install maya") except (ValueError, TypeError) as exc: # improve error reporting exc.args = ("Cannot parse date string {0!r}: {1}".format( datestr, exc.args[0]),) raise
def _str_to_datetime(datestr)
Convert `str` to `datetime.datetime`.
5.029822
4.940498
1.01808
time = time.utc date = time.datetime micro = date.microsecond if isinstance(date, datetime.datetime) else 0 return LIGOTimeGPS(int(time.gps), int(micro*1e3))
def _time_to_gps(time)
Convert a `Time` into `LIGOTimeGPS`. This method uses `datetime.datetime` underneath, which restricts to microsecond precision by design. This should probably be fixed... Parameters ---------- time : `~astropy.time.Time` formatted `Time` object to convert Returns ------- gps : `LIGOTimeGPS` Nano-second precision `LIGOTimeGPS` time
6.509067
5.566547
1.169319
if isinstance(filename, (h5py.Group, h5py.Dataset)): return filename if isinstance(filename, FILE_LIKE): return h5py.File(filename.name, mode) return h5py.File(filename, mode)
def open_hdf5(filename, mode='r')
Wrapper to open a :class:`h5py.File` from disk, gracefully handling a few corner cases
2.53637
2.73873
0.926112
@wraps(func) def decorated_func(fobj, *args, **kwargs): # pylint: disable=missing-docstring if not isinstance(fobj, h5py.HLObject): if isinstance(fobj, FILE_LIKE): fobj = fobj.name with h5py.File(fobj, 'r') as h5f: return func(h5f, *args, **kwargs) return func(fobj, *args, **kwargs) return decorated_func
def with_read_hdf5(func)
Decorate an HDF5-reading function to open a filepath if needed ``func`` should be written to presume an `h5py.Group` as the first positional argument.
2.405732
2.514167
0.95687
# find dataset if isinstance(h5o, h5py.Dataset): return h5o elif path is None and len(h5o) == 1: path = list(h5o.keys())[0] elif path is None: raise ValueError("Please specify the HDF5 path via the " "``path=`` keyword argument") return h5o[path]
def find_dataset(h5o, path=None)
Find and return the relevant dataset inside the given H5 object If ``path=None`` is given, and ``h5o`` contains a single dataset, that will be returned Parameters ---------- h5o : `h5py.File`, `h5py.Group` the HDF5 object in which to search path : `str`, optional the path (relative to ``h5o``) of the desired data set Returns ------- dset : `h5py.Dataset` the recovered dataset object Raises ------ ValueError if ``path=None`` and the HDF5 object contains multiple datasets KeyError if ``path`` is given but is not found within the HDF5 object
2.874604
3.287447
0.874418
@wraps(func) def decorated_func(obj, fobj, *args, **kwargs): # pylint: disable=missing-docstring if not isinstance(fobj, h5py.HLObject): append = kwargs.get('append', False) overwrite = kwargs.get('overwrite', False) if os.path.exists(fobj) and not (overwrite or append): raise IOError("File exists: %s" % fobj) with h5py.File(fobj, 'a' if append else 'w') as h5f: return func(obj, h5f, *args, **kwargs) return func(obj, fobj, *args, **kwargs) return decorated_func
def with_write_hdf5(func)
Decorate an HDF5-writing function to open a filepath if needed ``func`` should be written to take the object to be written as the first argument, and then presume an `h5py.Group` as the second. This method uses keywords ``append`` and ``overwrite`` as follows if the output file already exists: - ``append=False, overwrite=False``: raise `~exceptions.IOError` - ``append=True``: open in mode ``a`` - ``append=False, overwrite=True``: open in mode ``w``
2.119471
2.147678
0.986866
# force deletion of existing dataset if path in parent and overwrite: del parent[path] # create new dataset with improved error handling try: return parent.create_dataset(path, **kwargs) except RuntimeError as exc: if str(exc) == 'Unable to create link (Name already exists)': exc.args = ('{0}: {1!r}, pass overwrite=True ' 'to ignore existing datasets'.format(str(exc), path),) raise
def create_dataset(parent, path, overwrite=False, **kwargs)
Create a new dataset inside the parent HDF5 object Parameters ---------- parent : `h5py.Group`, `h5py.File` the object in which to create a new dataset path : `str` the path at which to create the new dataset overwrite : `bool` if `True`, delete any existing dataset at the desired path, default: `False` **kwargs other arguments are passed directly to :meth:`h5py.Group.create_dataset` Returns ------- dataset : `h5py.Dataset` the newly created dataset
5.313466
5.477108
0.970123
# parse selection for SQL query if selection is None: return '' selections = [] for col, op_, value in parse_column_filters(selection): if engine and engine.name == 'postgresql': col = '"%s"' % col try: opstr = [key for key in OPERATORS if OPERATORS[key] is op_][0] except KeyError: raise ValueError("Cannot format database 'WHERE' command with " "selection operator %r" % op_) selections.append('{0} {1} {2!r}'.format(col, opstr, value)) if selections: return 'WHERE %s' % ' AND '.join(selections) return ''
def format_db_selection(selection, engine=None)
Format a column filter selection as a SQL database WHERE string
3.993
3.709401
1.076454
import pandas as pd # parse columns for SQL query if columns is None: columnstr = '*' else: columnstr = ', '.join('"%s"' % c for c in columns) # parse selection for SQL query selectionstr = format_db_selection(selection, engine=engine) # build SQL query qstr = 'SELECT %s FROM %s %s' % (columnstr, tablename, selectionstr) # perform query tab = pd.read_sql(qstr, engine, **kwargs) # Convert unicode columns to string types = tab.apply(lambda x: pd.api.types.infer_dtype(x.values)) if not tab.empty: for col in types[types == 'unicode'].index: tab[col] = tab[col].astype(str) return Table.from_pandas(tab).filled()
def fetch(engine, tablename, columns=None, selection=None, **kwargs)
Fetch data from an SQL table into an `EventTable` Parameters ---------- engine : `sqlalchemy.engine.Engine` the database engine to use when connecting table : `str`, The name of table you are attempting to receive triggers from. selection other filters you would like to supply underlying reader method for the given format .. note:: For now it will attempt to automatically connect you to a specific DB. In the future, this may be an input argument. Returns ------- table : `GravitySpyTable`
3.125695
3.432441
0.910633