code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
from ligo.lw.lsctables import use_in class _ContentHandler(parent): # pylint: disable=too-few-public-methods def __init__(self, document): super(_ContentHandler, self).__init__(document, filter_func) return use_in(_ContentHandler)
def build_content_handler(parent, filter_func)
Build a `~xml.sax.handler.ContentHandler` with a given filter
7.30243
8.146453
0.896394
from ligo.lw.ligolw import Document from ligo.lw import types from ligo.lw.lsctables import use_in from ligo.lw.utils import (load_url, ligolw_add) # mock ToPyType to link to numpy dtypes topytype = types.ToPyType.copy() for key in types.ToPyType: if key in types.ToNumPyType: types.ToPyType[key] = numpy.dtype(types.ToNumPyType[key]).type contenthandler = use_in(contenthandler) # read one or more files into a single Document source = file_list(source) try: if len(source) == 1: return load_url( source[0], contenthandler=contenthandler, **kwargs ) return ligolw_add.ligolw_add( Document(), source, contenthandler=contenthandler, **kwargs ) except LigolwElementError as exc: # failed to read with ligo.lw, # try again with glue.ligolw (ilwdchar_compat) if LIGO_LW_COMPAT_ERROR.search(str(exc)): try: return read_ligolw( source, contenthandler=contenthandler, ilwdchar_compat=True, **kwargs ) except Exception: # if fails for any reason, use original error pass raise finally: # replace ToPyType types.ToPyType = topytype
def read_ligolw(source, contenthandler=LIGOLWContentHandler, **kwargs)
Read one or more LIGO_LW format files Parameters ---------- source : `str`, `file` the open file or file path to read contenthandler : `~xml.sax.handler.ContentHandler`, optional content handler used to parse document verbose : `bool`, optional be verbose when reading files, default: `False` Returns ------- xmldoc : :class:`~ligo.lw.ligolw.Document` the document object as parsed from the file(s)
5.24396
5.085704
1.031118
def decorator(func_): # pylint: disable=missing-docstring @wraps(func_) def decorated_func(source, *args, **kwargs): # pylint: disable=missing-docstring from ligo.lw.ligolw import Document from glue.ligolw.ligolw import Document as GlueDocument if not isinstance(source, (Document, GlueDocument)): read_kw = { 'contenthandler': kwargs.pop('contenthandler', contenthandler), 'verbose': kwargs.pop('verbose', False), } return func_(read_ligolw(source, **read_kw), *args, **kwargs) return func_(source, *args, **kwargs) return decorated_func if func is not None: return decorator(func) return decorator
def with_read_ligolw(func=None, contenthandler=None)
Decorate a LIGO_LW-reading function to open a filepath if needed ``func`` should be written to presume a :class:`~ligo.lw.ligolw.Document` as the first positional argument
2.900886
2.825028
1.026852
from ligo.lw.ligolw import Document from ligo.lw import (table, lsctables) # get ilwdchar_compat to pass to read_ligolw() if Document.__module__.startswith("glue"): kwargs["ilwdchar_compat"] = True # get content handler to read only this table (if given) if tablename is not None: tableclass = lsctables.TableByName[ table.Table.TableName(tablename) ] if contenthandler is None: contenthandler = get_partial_contenthandler(tableclass) # overwrite loading column names to get just what was asked for _oldcols = tableclass.loadcolumns if columns is not None: tableclass.loadcolumns = columns # read document if isinstance(source, Document): xmldoc = source else: if contenthandler is None: contenthandler = lsctables.use_in(LIGOLWContentHandler) try: xmldoc = read_ligolw(source, contenthandler=contenthandler, **kwargs) finally: # reinstate original set of loading column names if tablename is not None: tableclass.loadcolumns = _oldcols # now find the right table if tablename is None: tables = list_tables(xmldoc) if not tables: raise ValueError("No tables found in LIGO_LW document(s)") if len(tables) > 1: tlist = "'{}'".format("', '".join(tables)) raise ValueError("Multiple tables found in LIGO_LW document(s), " "please specify the table to read via the " "``tablename=`` keyword argument. The following " "tables were found: {}".format(tlist)) tableclass = lsctables.TableByName[table.Table.TableName(tables[0])] # extract table return tableclass.get_table(xmldoc)
def read_table(source, tablename=None, columns=None, contenthandler=None, **kwargs)
Read a :class:`~ligo.lw.table.Table` from one or more LIGO_LW files Parameters ---------- source : `Document`, `file`, `str`, `CacheEntry`, `list` object representing one or more files. One of - a LIGO_LW :class:`~ligo.lw.ligolw.Document` - an open `file` - a `str` pointing to a file path on disk - a formatted :class:`~lal.utils.CacheEntry` representing one file - a `list` of `str` file paths or :class:`~lal.utils.CacheEntry` tablename : `str` name of the table to read. columns : `list`, optional list of column name strings to read, default all. contenthandler : `~xml.sax.handler.ContentHandler`, optional SAX content handler for parsing LIGO_LW documents. **kwargs other keyword arguments are passed to `~gwpy.io.ligolw.read_ligolw` Returns ------- table : :class:`~ligo.lw.table.Table` `Table` of data
4.760512
4.410399
1.079383
from ligo.lw.ligolw import (Document, LIGOLWContentHandler) from ligo.lw.lsctables import use_in from ligo.lw.utils import (load_filename, load_fileobj) use_in(kwargs.setdefault('contenthandler', LIGOLWContentHandler)) try: # try and load existing file if isinstance(fobj, string_types): return load_filename(fobj, **kwargs) if isinstance(fobj, FILE_LIKE): return load_fileobj(fobj, **kwargs)[0] except (OSError, IOError): # or just create a new Document return Document() except LigolwElementError as exc: if LIGO_LW_COMPAT_ERROR.search(str(exc)): try: return open_xmldoc(fobj, ilwdchar_compat=True, **kwargs) except Exception: # for any reason, raise original pass raise
def open_xmldoc(fobj, **kwargs)
Try and open an existing LIGO_LW-format file, or create a new Document Parameters ---------- fobj : `str`, `file` file path or open file object to read **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate Returns -------- xmldoc : :class:`~ligo.lw.ligolw.Document` either the `Document` as parsed from an existing file, or a new, empty `Document`
6.873064
5.32017
1.291888
from ligo.lw.ligolw import LIGO_LW try: from glue.ligolw.ligolw import LIGO_LW as LIGO_LW2 except ImportError: ligolw_types = (LIGO_LW,) else: ligolw_types = (LIGO_LW, LIGO_LW2) if isinstance(xmldoc, ligolw_types): return xmldoc else: for node in xmldoc.childNodes: if isinstance(node, ligolw_types): return node raise ValueError("Cannot find LIGO_LW element in XML Document")
def get_ligolw_element(xmldoc)
Find an existing <LIGO_LW> element in this XML Document
3.041781
2.534585
1.20011
from ligo.lw.ligolw import LIGO_LW from ligo.lw import lsctables # find or create LIGO_LW tag try: llw = get_ligolw_element(xmldoc) except ValueError: llw = LIGO_LW() xmldoc.appendChild(llw) for table in tables: try: # append new data to existing table old = lsctables.TableByName[ table.TableName(table.Name)].get_table(xmldoc) except ValueError: # or create a new table llw.appendChild(table) else: if overwrite: llw.removeChild(old) old.unlink() llw.appendChild(table) else: old.extend(table) return xmldoc
def write_tables_to_document(xmldoc, tables, overwrite=False)
Write the given LIGO_LW table into a :class:`Document` Parameters ---------- xmldoc : :class:`~ligo.lw.ligolw.Document` the document to write into tables : `list` of :class:`~ligo.lw.table.Table` the set of tables to write overwrite : `bool`, optional, default: `False` if `True`, delete an existing instance of the table type, otherwise append new rows
4.896252
4.035317
1.21335
from ligo.lw.ligolw import (Document, LIGO_LW, LIGOLWContentHandler) from ligo.lw import utils as ligolw_utils # allow writing directly to XML if isinstance(target, (Document, LIGO_LW)): xmldoc = target # open existing document, if possible elif append: xmldoc = open_xmldoc( target, contenthandler=kwargs.pop('contenthandler', LIGOLWContentHandler)) # fail on existing document and not overwriting elif (not overwrite and isinstance(target, string_types) and os.path.isfile(target)): raise IOError("File exists: {}".format(target)) else: # or create a new document xmldoc = Document() # convert table to format write_tables_to_document(xmldoc, tables, overwrite=overwrite) # write file if isinstance(target, string_types): kwargs.setdefault('gz', target.endswith('.gz')) ligolw_utils.write_filename(xmldoc, target, **kwargs) elif isinstance(target, FILE_LIKE): kwargs.setdefault('gz', target.name.endswith('.gz')) ligolw_utils.write_fileobj(xmldoc, target, **kwargs)
def write_tables(target, tables, append=False, overwrite=False, **kwargs)
Write an LIGO_LW table to file Parameters ---------- target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into tables : `list`, `tuple` of :class:`~ligo.lw.table.Table` the tables to write append : `bool`, optional, default: `False` if `True`, append to an existing file/table, otherwise `overwrite` overwrite : `bool`, optional, default: `False` if `True`, delete an existing instance of the table type, otherwise append new rows **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate
4.369182
3.855838
1.133134
# pylint: disable=line-too-long # noqa: E501 try: from ligo.lw.ligolw import (Document, Stream) except ImportError: # no python-ligo-lw from glue.ligolw.ligolw import Document, Stream # read file object if isinstance(source, Document): xmldoc = source else: filt = get_filtering_contenthandler(Stream) xmldoc = read_ligolw(source, contenthandler=filt) # get list of table names tables = [] for tbl in xmldoc.childNodes[0].childNodes: try: tables.append(tbl.TableName(tbl.Name)) except AttributeError: # not a table continue return tables
def list_tables(source)
List the names of all tables in this file(s) Parameters ---------- source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list` one or more open files, file paths, or LIGO_LW `Document`s Examples -------- >>> from gwpy.io.ligolw import list_tables >>> print(list_tables('H1-LDAS_STRAIN-968654552-10.xml.gz')) ['process', 'process_params', 'sngl_burst', 'search_summary', 'segment_definer', 'segment_summary', 'segment']
7.195781
6.210495
1.158649
from ligo.lw.types import ( ToNumPyType as numpytypes, ToPyType as pytypes, ) # if nothing to do... if val is None or colname not in cls.validcolumns: return val llwtype = cls.validcolumns[colname] # don't mess with formatted IlwdChar if llwtype == 'ilwd:char': return _to_ilwd(val, cls.tableName, colname, ilwdchar_compat=_is_glue_ligolw_object(cls)) # otherwise map to numpy or python types try: return numpy.typeDict[numpytypes[llwtype]](val) except KeyError: return pytypes[llwtype](val)
def to_table_type(val, cls, colname)
Cast a value to the correct type for inclusion in a LIGO_LW table This method returns the input unmodified if a type mapping for ``colname`` isn't found. Parameters ---------- val : `object` The input object to convert, of any type cls : `type`, subclass of :class:`~ligo.lw.table.Table` the table class to map against colname : `str` The name of the mapping column Returns ------- obj : `object` The input ``val`` cast to the correct type Examples -------- >>> from gwpy.io.ligolw import to_table_type as to_ligolw_type >>> from ligo.lw.lsctables import SnglBurstTable >>> print(to_ligolw_type(1.0, SnglBurstTable, 'central_freq'))) 1.0 ID integers are converted to fancy ILWD objects >>> print(to_ligolw_type(1, SnglBurstTable, 'process_id'))) sngl_burst:process_id:1 Formatted fancy ILWD objects are left untouched: >>> from ligo.lw.ilwd import ilwdchar >>> pid = ilwdchar('process:process_id:0') >>> print(to_ligolw_type(pid, SnglBurstTable, 'process_id'))) process:process_id:1
10.940473
7.859859
1.391943
# pylint: disable=unused-argument if fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: line1 = fileobj.readline().lower() line2 = fileobj.readline().lower() try: return (line1.startswith(XML_SIGNATURE) and line2.startswith((LIGOLW_SIGNATURE, LIGOLW_ELEMENT))) except TypeError: # bytes vs str return (line1.startswith(XML_SIGNATURE.decode('utf-8')) and line2.startswith((LIGOLW_SIGNATURE.decode('utf-8'), LIGOLW_ELEMENT.decode('utf-8')))) finally: fileobj.seek(loc) try: from ligo.lw.ligolw import Element except ImportError: return False try: from glue.ligolw.ligolw import Element as GlueElement except ImportError: element_types = (Element,) else: element_types = (Element, GlueElement) return len(args) > 0 and isinstance(args[0], element_types)
def is_ligolw(origin, filepath, fileobj, *args, **kwargs)
Identify a file object as LIGO_LW-format XML
2.766265
2.684192
1.030576
# pylint: disable=unused-argument if fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: sig = fileobj.read(5).lower() return sig == XML_SIGNATURE finally: fileobj.seek(loc) elif filepath is not None: return filepath.endswith(('.xml', '.xml.gz'))
def is_xml(origin, filepath, fileobj, *args, **kwargs)
Identify a file object as XML (any format)
3.0001
2.942163
1.019692
# read data kwargs.setdefault('array_type', TimeSeries) series = read_hdf5_array(h5f, path=path, **kwargs) # crop if needed if start is not None or end is not None: return series.crop(start, end) return series
def read_hdf5_timeseries(h5f, path=None, start=None, end=None, **kwargs)
Read a `TimeSeries` from HDF5
3.468158
3.816686
0.908683
# find group from which to read if group: h5g = h5f[group] else: h5g = h5f # find list of names to read if names is None: names = [key for key in h5g if _is_timeseries_dataset(h5g[key])] # read names out = kwargs.pop('dict_type', TimeSeriesDict)() kwargs.setdefault('array_type', out.EntryClass) for name in names: out[name] = read_hdf5_timeseries(h5g[name], **kwargs) return out
def read_hdf5_dict(h5f, names=None, group=None, **kwargs)
Read a `TimeSeriesDict` from HDF5
3.747495
3.474671
1.078518
# create group if needed if group and group not in h5f: h5g = h5f.create_group(group) elif group: h5g = h5f[group] else: h5g = h5f # write each timeseries kwargs.setdefault('format', 'hdf5') for key, series in tsdict.items(): series.write(h5g, path=str(key), **kwargs)
def write_hdf5_dict(tsdict, h5f, group=None, **kwargs)
Write a `TimeSeriesBaseDict` to HDF5 Each series in the dict is written as a dataset in the group
2.682446
2.710033
0.98982
from pycbc.psd import welch as pycbc_welch # default to 'standard' welch kwargs.setdefault('avg_method', 'mean') # get scheme if scheme is None: scheme = null_context() # generate pycbc FrequencySeries with scheme: pycbc_fseries = pycbc_welch(timeseries.to_pycbc(copy=False), seg_len=segmentlength, seg_stride=segmentlength-noverlap, **kwargs) # return GWpy FrequencySeries fseries = FrequencySeries.from_pycbc(pycbc_fseries, copy=False) fseries.name = timeseries.name fseries.override_unit(scale_timeseries_unit( timeseries.unit, scaling='density')) return fseries
def welch(timeseries, segmentlength, noverlap=None, scheme=None, **kwargs)
Calculate a PSD using Welch's method with a mean average Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. scheme : `pycbc.scheme.Scheme`, optional processing scheme in which to execute FFT, default: `None` **kwargs other keyword arguments to pass to :func:`pycbc.psd.welch` Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- pycbc.psd.welch
5.212617
5.113805
1.019323
idx = int((n//10 % 10 != 1) * (n % 10 < 4) * n % 10) return '{}{}'.format(n, "tsnrhtdd"[idx::4])
def _ordinal(n)
Returns the ordinal string for a given integer See https://stackoverflow.com/a/20007730/1307974 Parameters ---------- n : `int` the number to convert to ordinal Examples -------- >>> _ordinal(11) '11th' >>> _ordinal(102) '102nd'
4.774713
6.588858
0.724665
if isinstance(operand, string_types): if operand == 'mean': operand = self.mean(axis=0) elif operand == 'median': operand = self.median(axis=0) else: raise ValueError("operand %r unrecognised, please give a " "Quantity or one of: 'mean', 'median'" % operand) out = self / operand return out
def ratio(self, operand)
Calculate the ratio of this `Spectrogram` against a reference Parameters ---------- operand : `str`, `FrequencySeries`, `Quantity` a `~gwpy.frequencyseries.FrequencySeries` or `~astropy.units.Quantity` to weight against, or one of - ``'mean'`` : weight against the mean of each spectrum in this Spectrogram - ``'median'`` : weight against the median of each spectrum in this Spectrogram Returns ------- spectrogram : `Spectrogram` a new `Spectrogram` Raises ------ ValueError if ``operand`` is given as a `str` that isn't supported
3.825184
3.853513
0.992649
if 'imshow' in kwargs: warnings.warn('the imshow keyword for Spectrogram.plot was ' 'removed, please pass method=\'imshow\' instead', DeprecationWarning) kwargs.setdefault('method', 'imshow' if kwargs.pop('imshow') else 'pcolormesh') kwargs.update(figsize=figsize, xscale=xscale) return super(Spectrogram, self).plot(**kwargs)
def plot(self, figsize=(12, 6), xscale='auto-gps', **kwargs)
Plot the data for this `Spectrogram` Parameters ---------- **kwargs all keyword arguments are passed along to underlying functions, see below for references Returns ------- plot : `~gwpy.plot.Plot` the `Plot` containing the data See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.Axes.imshow or gwpy.plot.Axes.pcolormesh for documentation of keyword arguments used in rendering the `Spectrogram` data
4.721219
4.851197
0.973207
data = numpy.vstack([s.value for s in spectra]) spec1 = list(spectra)[0] if not all(s.f0 == spec1.f0 for s in spectra): raise ValueError("Cannot stack spectra with different f0") if not all(s.df == spec1.df for s in spectra): raise ValueError("Cannot stack spectra with different df") kwargs.setdefault('name', spec1.name) kwargs.setdefault('channel', spec1.channel) kwargs.setdefault('epoch', spec1.epoch) kwargs.setdefault('f0', spec1.f0) kwargs.setdefault('df', spec1.df) kwargs.setdefault('unit', spec1.unit) if not ('dt' in kwargs or 'times' in kwargs): try: kwargs.setdefault('dt', spectra[1].epoch.gps - spec1.epoch.gps) except (AttributeError, IndexError): raise ValueError("Cannot determine dt (time-spacing) for " "Spectrogram from inputs") return Spectrogram(data, **kwargs)
def from_spectra(cls, *spectra, **kwargs)
Build a new `Spectrogram` from a list of spectra. Parameters ---------- *spectra any number of `~gwpy.frequencyseries.FrequencySeries` series dt : `float`, `~astropy.units.Quantity`, optional stride between given spectra Returns ------- Spectrogram a new `Spectrogram` from a vertical stacking of the spectra The new object takes the metadata from the first given `~gwpy.frequencyseries.FrequencySeries` if not given explicitly Notes ----- Each `~gwpy.frequencyseries.FrequencySeries` passed to this constructor must be the same length.
2.856874
2.724212
1.048698
out = scipy.percentile(self.value, percentile, axis=0) if self.name is not None: name = '{}: {} percentile'.format(self.name, _ordinal(percentile)) else: name = None return FrequencySeries(out, epoch=self.epoch, channel=self.channel, name=name, f0=self.f0, df=self.df, frequencies=(hasattr(self, '_frequencies') and self.frequencies or None))
def percentile(self, percentile)
Calculate a given spectral percentile for this `Spectrogram`. Parameters ---------- percentile : `float` percentile (0 - 100) of the bins to compute Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` the given percentile `FrequencySeries` calculated from this `SpectralVaraicence`
4.953671
4.642341
1.067063
from ..frequencyseries import SpectralVariance return SpectralVariance.from_spectrogram( self, bins=bins, low=low, high=high, nbins=nbins, log=log, norm=norm, density=density)
def variance(self, bins=None, low=None, high=None, nbins=500, log=False, norm=False, density=False)
Calculate the `SpectralVariance` of this `Spectrogram`. Parameters ---------- bins : `~numpy.ndarray`, optional, default `None` array of histogram bin edges, including the rightmost edge low : `float`, optional, default: `None` left edge of lowest amplitude bin, only read if ``bins`` is not given high : `float`, optional, default: `None` right edge of highest amplitude bin, only read if ``bins`` is not given nbins : `int`, optional, default: `500` number of bins to generate, only read if ``bins`` is not given log : `bool`, optional, default: `False` calculate amplitude bins over a logarithmic scale, only read if ``bins`` is not given norm : `bool`, optional, default: `False` normalise bin counts to a unit sum density : `bool`, optional, default: `False` normalise bin counts to a unit integral Returns ------- specvar : `SpectralVariance` 2D-array of spectral frequency-amplitude counts See Also -------- :func:`numpy.histogram` for details on specifying bins and weights
3.748318
3.73703
1.003021
if low is not None: low = units.Quantity(low, self._default_yunit) if high is not None: high = units.Quantity(high, self._default_yunit) # check low frequency if low is not None and low == self.f0: low = None elif low is not None and low < self.f0: warnings.warn('Spectrogram.crop_frequencies given low frequency ' 'cutoff below f0 of the input Spectrogram. Low ' 'frequency crop will have no effect.') # check high frequency if high is not None and high.value == self.band[1]: high = None elif high is not None and high.value > self.band[1]: warnings.warn('Spectrogram.crop_frequencies given high frequency ' 'cutoff above cutoff of the input Spectrogram. High ' 'frequency crop will have no effect.') # find low index if low is None: idx0 = None else: idx0 = int(float(low.value - self.f0.value) // self.df.value) # find high index if high is None: idx1 = None else: idx1 = int(float(high.value - self.f0.value) // self.df.value) # crop if copy: return self[:, idx0:idx1].copy() return self[:, idx0:idx1]
def crop_frequencies(self, low=None, high=None, copy=False)
Crop this `Spectrogram` to the specified frequencies Parameters ---------- low : `float` lower frequency bound for cropped `Spectrogram` high : `float` upper frequency bound for cropped `Spectrogram` copy : `bool` if `False` return a view of the original data, otherwise create a fresh memory copy Returns ------- spec : `Spectrogram` A new `Spectrogram` with a subset of data from the frequency axis
2.283895
2.415763
0.945414
for ax in axes: for aset in ('collections', 'images'): try: return getattr(ax, aset)[-1] except (AttributeError, IndexError): continue raise ValueError("Cannot determine mappable layer on any axes " "for this colorbar")
def find_mappable(*axes)
Find the most recently added mappable layer in the given axes Parameters ---------- *axes : `~matplotlib.axes.Axes` one or more axes to search for a mappable
6.467548
6.67251
0.969283
@wraps(identifier) def decorated_func(origin, filepath, fileobj, *args, **kwargs): # pylint: disable=missing-docstring try: filepath = file_list(filepath)[0] except ValueError: if filepath is None: try: files = file_list(args[0]) except (IndexError, ValueError): pass else: if files: filepath = files[0] except IndexError: pass return identifier(origin, filepath, fileobj, *args, **kwargs) return decorated_func
def identify_with_list(identifier)
Decorate an I/O identifier to handle a list of files as input This function tries to resolve a single file path as a `str` from any file-like or collection-of-file-likes to pass to the underlying identifier for comparison.
3.333165
2.969064
1.122632
ctx = None if isinstance(source, FILE_LIKE): fileobj = source filepath = source.name if hasattr(source, 'name') else None else: filepath = source try: ctx = get_readable_fileobj(filepath, encoding='binary') fileobj = ctx.__enter__() # pylint: disable=no-member except IOError: raise except Exception: # pylint: disable=broad-except fileobj = None try: return get_format('read', cls, filepath, fileobj, args, kwargs) finally: if ctx is not None: ctx.__exit__(*sys.exc_info())
def get_read_format(cls, source, args, kwargs)
Determine the read format for a given input source
3.157178
3.137054
1.006415
fsamp, arr = wavfile.read(fobj, **kwargs) return TimeSeries(arr, sample_rate=fsamp)
def read(fobj, **kwargs)
Read a WAV file into a `TimeSeries` Parameters ---------- fobj : `file`, `str` open file-like object or filename to read from **kwargs all keyword arguments are passed onto :func:`scipy.io.wavfile.read` See also -------- scipy.io.wavfile.read for details on how the WAV file is actually read Examples -------- >>> from gwpy.timeseries import TimeSeries >>> t = TimeSeries.read('test.wav')
6.504201
11.444007
0.56835
fsamp = int(series.sample_rate.decompose().value) if scale is None: scale = 1 / numpy.abs(series.value).max() data = (series.value * scale).astype('float32') return wavfile.write(output, fsamp, data)
def write(series, output, scale=None)
Write a `TimeSeries` to a WAV file Parameters ---------- series : `TimeSeries` the series to write output : `file`, `str` the file object or filename to write to scale : `float`, optional the factor to apply to scale the data to (-1.0, 1.0), pass `scale=1` to not apply any scale, otherwise the data will be auto-scaled See also -------- scipy.io.wavfile.write for details on how the WAV file is actually written Examples -------- >>> from gwpy.timeseries import TimeSeries >>> t = TimeSeries([1, 2, 3, 4, 5]) >>> t = TimeSeries.write('test.wav')
4.174592
4.801814
0.869378
# pylint: disable=unused-argument if origin == 'read' and fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: riff, _, fmt = struct.unpack('<4sI4s', fileobj.read(12)) if isinstance(riff, bytes): riff = riff.decode('utf-8') fmt = fmt.decode('utf-8') return riff == WAV_SIGNATURE[0] and fmt == WAV_SIGNATURE[1] except (UnicodeDecodeError, struct.error): return False finally: fileobj.seek(loc) elif filepath is not None: return filepath.endswith(('.wav', '.wave')) else: try: wave.open(args[0]) except (wave.Error, AttributeError): return False else: return True
def is_wav(origin, filepath, fileobj, *args, **kwargs)
Identify a file as WAV See `astropy.io.registry` for details on how this function is used.
2.454288
2.639162
0.92995
# remove any surrounding quotes value = QUOTE_REGEX.sub('', value) try: # attempt `float()` conversion return float(value) except ValueError: # just return the input return value
def _float_or_str(value)
Internal method to attempt `float(value)` handling a `ValueError`
7.352807
5.956891
1.234336
# noqa # parse definition into parts (skipping null tokens) parts = list(generate_tokens(StringIO(definition.strip()).readline)) while parts[-1][0] in (token.ENDMARKER, token.NEWLINE): parts = parts[:-1] # parse simple definition: e.g: snr > 5 if len(parts) == 3: a, b, c = parts # pylint: disable=invalid-name if a[0] in [token.NAME, token.STRING]: # string comparison name = QUOTE_REGEX.sub('', a[1]) oprtr = OPERATORS[b[1]] value = _float_or_str(c[1]) return [(name, oprtr, value)] elif b[0] in [token.NAME, token.STRING]: name = QUOTE_REGEX.sub('', b[1]) oprtr = OPERATORS_INV[b[1]] value = _float_or_str(a[1]) return [(name, oprtr, value)] # parse between definition: e.g: 5 < snr < 10 elif len(parts) == 5: a, b, c, d, e = list(zip(*parts))[1] # pylint: disable=invalid-name name = QUOTE_REGEX.sub('', c) return [(name, OPERATORS_INV[b], _float_or_str(a)), (name, OPERATORS[d], _float_or_str(e))] raise ValueError("Cannot parse filter definition from %r" % definition)
def parse_column_filter(definition)
Parse a `str` of the form 'column>50' Parameters ---------- definition : `str` a column filter definition of the form ``<name><operator><threshold>`` or ``<threshold><operator><name><operator><threshold>``, e.g. ``frequency >= 10``, or ``50 < snr < 100`` Returns ------- filters : `list` of `tuple` a `list` of filter 3-`tuple`s, where each `tuple` contains the following elements: - ``column`` (`str`) - the name of the column on which to operate - ``operator`` (`callable`) - the operator to call when evaluating the filter - ``operand`` (`anything`) - the argument to the operator function Raises ------ ValueError if the filter definition cannot be parsed KeyError if any parsed operator string cannnot be mapped to a function from the `operator` module Notes ----- Strings that contain non-alphanumeric characters (e.g. hyphen `-`) should be quoted inside the filter definition, to prevent such characters being interpreted as operators, e.g. ``channel = X1:TEST`` should always be passed as ``channel = "X1:TEST"``. Examples -------- >>> parse_column_filter("frequency>10") [('frequency', <function operator.gt>, 10.)] >>> parse_column_filter("50 < snr < 100") [('snr', <function operator.gt>, 50.), ('snr', <function operator.lt>, 100.)] >>> parse_column_filter("channel = "H1:TEST") [('channel', <function operator.eq>, 'H1:TEST')]
2.868086
3.119291
0.919467
# noqa: E501 fltrs = [] for def_ in _flatten(definitions): if is_filter_tuple(def_): fltrs.append(def_) else: for splitdef in DELIM_REGEX.split(def_)[::2]: fltrs.extend(parse_column_filter(splitdef)) return fltrs
def parse_column_filters(*definitions)
Parse multiple compound column filter definitions Examples -------- >>> parse_column_filters('snr > 10', 'frequency < 1000') [('snr', <function operator.gt>, 10.), ('frequency', <function operator.lt>, 1000.)] >>> parse_column_filters('snr > 10 && frequency < 1000') [('snr', <function operator.gt>, 10.), ('frequency', <function operator.lt>, 1000.)]
4.312211
4.624378
0.932495
if isinstance(container, string_types): container = [container] for elem in container: if isinstance(elem, string_types) or is_filter_tuple(elem): yield elem else: for elem2 in _flatten(elem): yield elem2
def _flatten(container)
Flatten arbitrary nested list of filters into a 1-D list
2.873858
2.52672
1.137387
return isinstance(tup, (tuple, list)) and ( len(tup) == 3 and isinstance(tup[0], string_types) and callable(tup[1]))
def is_filter_tuple(tup)
Return whether a `tuple` matches the format for a column filter
2.961611
3.029035
0.977741
keep = numpy.ones(len(table), dtype=bool) for name, op_func, operand in parse_column_filters(*column_filters): col = table[name].view(numpy.ndarray) keep &= op_func(col, operand) return table[keep]
def filter_table(table, *column_filters)
Apply one or more column slice filters to a `Table` Multiple column filters can be given, and will be applied concurrently Parameters ---------- table : `~astropy.table.Table` the table to filter column_filter : `str`, `tuple` a column slice filter definition, in one of two formats: - `str` - e.g. ``'snr > 10`` - `tuple` - ``(<column>, <operator>, <operand>)``, e.g. ``('snr', operator.gt, 10)`` multiple filters can be given and will be applied in order Returns ------- table : `~astropy.table.Table` a view of the input table with only those rows matching the filters Examples -------- >>> filter(my_table, 'snr>10', 'frequency<1000') custom operations can be defined using filter tuple definitions: >>> from gwpy.table.filters import in_segmentlist >>> filter(my_table, ('time', in_segmentlist, segs))
3.658295
4.834524
0.756702
dataset = io_hdf5.find_dataset(source, path=path) attrs = dict(dataset.attrs) # unpickle channel object try: attrs['channel'] = _unpickle_channel(attrs['channel']) except KeyError: # no channel stored pass # unpack byte strings for python3 for key in attrs: if isinstance(attrs[key], bytes): attrs[key] = attrs[key].decode('utf-8') return array_type(dataset[()], **attrs)
def read_hdf5_array(source, path=None, array_type=Array)
Read an `Array` from the given HDF5 object Parameters ---------- source : `str`, :class:`h5py.HLObject` path to HDF file on disk, or open `h5py.HLObject`. path : `str` path in HDF hierarchy of dataset. array_type : `type` desired return type
3.787569
4.581798
0.826656
try: return pickle.loads(raw) except (ValueError, pickle.UnpicklingError, EOFError, TypeError, IndexError) as exc: # maybe not pickled if isinstance(raw, bytes): raw = raw.decode('utf-8') try: # test if this is a valid channel name Channel.MATCH.match(raw) except ValueError: raise exc return raw
def _unpickle_channel(raw)
Try and unpickle a channel with sensible error handling
4.565801
4.348591
1.049949
if (value is None or (isinstance(value, Index) and value.regular)): raise IgnoredAttribute # map type to something HDF5 can handle for typekey, func in ATTR_TYPE_MAP.items(): if issubclass(type(value), typekey): return func(value) return value
def _format_metadata_attribute(value)
Format a value for writing to HDF5 as a `h5py.Dataset` attribute
8.77141
7.493371
1.170556
for attr in ('unit',) + array._metadata_slots: # format attribute try: value = _format_metadata_attribute( getattr(array, '_%s' % attr, None)) except IgnoredAttribute: continue # store attribute try: dataset.attrs[attr] = value except (TypeError, ValueError, RuntimeError) as exc: exc.args = ("Failed to store {} ({}) for {}: {}".format( attr, type(value).__name__, type(array).__name__, str(exc))) raise
def write_array_metadata(dataset, array)
Write metadata for ``array`` into the `h5py.Dataset`
4.695037
4.528395
1.0368
if path is None: path = array.name if path is None: raise ValueError("Cannot determine HDF5 path for %s, " "please set ``name`` attribute, or pass ``path=`` " "keyword when writing" % type(array).__name__) # create dataset dset = io_hdf5.create_dataset(h5g, path, overwrite=overwrite, data=array.value, compression=compression, **kwargs) # write default metadata write_array_metadata(dset, array) # allow caller to specify their own metadata dict if attrs: for key in attrs: dset.attrs[key] = attrs[key] return dset
def write_hdf5_array(array, h5g, path=None, attrs=None, append=False, overwrite=False, compression='gzip', **kwargs)
Write the ``array`` to an `h5py.Dataset` Parameters ---------- array : `gwpy.types.Array` the data object to write h5g : `str`, `h5py.Group` a file path to write to, or an `h5py.Group` in which to create a new dataset path : `str`, optional the path inside the group at which to create the new dataset, defaults to ``array.name`` attrs : `dict`, optional extra metadata to write into `h5py.Dataset.attrs`, on top of the default metadata append : `bool`, default: `False` if `True`, write new dataset to existing file, otherwise an exception will be raised if the output file exists (only used if ``f`` is `str`) overwrite : `bool`, default: `False` if `True`, overwrite an existing dataset in an existing file, otherwise an exception will be raised if a dataset exists with the given name (only used if ``f`` is `str`) compression : `str`, `int`, optional compression option to pass to :meth:`h5py.Group.create_dataset` **kwargs other keyword arguments for :meth:`h5py.Group.create_dataset` Returns ------- datasets : `h5py.Dataset` the newly created dataset
3.872767
4.369248
0.886369
attrs = {} # loop through named axes for i, axis in zip(range(series.ndim), ('x', 'y')): # find property names unit = '{}unit'.format(axis) origin = '{}0'.format(axis) delta = 'd{}'.format(axis) # store attributes aunit = getattr(series, unit) attrs.update({ unit: str(aunit), origin: getattr(series, origin).to(aunit).value, delta: getattr(series, delta).to(aunit).value, }) return attrs
def format_index_array_attrs(series)
Format metadata attributes for and indexed array This function is used to provide the necessary metadata to meet the (proposed) LIGO Common Data Format specification for series data in HDF5.
4.269448
4.565547
0.935145
if attrs is None: attrs = format_index_array_attrs(series) return write_hdf5_array(series, output, path=path, attrs=attrs, **kwargs)
def write_hdf5_series(series, output, path=None, attrs=None, **kwargs)
Write a Series to HDF5. See :func:`write_hdf5_array` for details of arguments and keywords.
4.071559
3.77552
1.07841
def from_hdf5(*args, **kwargs): kwargs.setdefault('array_type', array_type) return read_hdf5_array(*args, **kwargs) io_registry.register_reader(format, array_type, from_hdf5) if issubclass(array_type, Series): io_registry.register_writer(format, array_type, write_hdf5_series) else: io_registry.register_writer(format, array_type, write_hdf5_array) if identify: io_registry.register_identifier(format, array_type, io_hdf5.identify_hdf5)
def register_hdf5_array_io(array_type, format='hdf5', identify=True)
Registry read() and write() methods for the HDF5 format
2.312174
2.426806
0.952764
if colors: return itertools.cycle(colors) try: return itertools.cycle(p["color"] for p in rcParams["axes.prop_cycle"]) except KeyError: # matplotlib < 1.5 return itertools.cycle(rcParams["axes.color_cycle"])
def color_cycle(colors=None)
An infinite iterator of the given (or default) colors
3.307276
3.315237
0.997599
verbose = kwargs.pop('verbose', False) # parse input as a list of files try: # try and map to a list of file-like objects files = file_list(source) except ValueError: # otherwise treat as single file files = [source] path = None # to pass to get_read_format() else: path = files[0] if files else None # determine input format (so we don't have to do it multiple times) if kwargs.get('format', None) is None: kwargs['format'] = get_read_format(cls, path, (source,) + args, kwargs) # calculate maximum number of processes nproc = min(kwargs.pop('nproc', 1), len(files)) # define multiprocessing method def _read_single_file(fobj): try: return fobj, io_read(cls, fobj, *args, **kwargs) # pylint: disable=broad-except,redefine-in-handler except Exception as exc: if nproc == 1: raise if isinstance(exc, SAXException): # SAXExceptions don't pickle return fobj, exc.getException() # pylint: disable=no-member return fobj, exc # format verbosity if verbose is True: verbose = 'Reading ({})'.format(kwargs['format']) # read files output = mp_utils.multiprocess_with_queues( nproc, _read_single_file, files, verbose=verbose, unit='files') # raise exceptions (from multiprocessing, single process raises inline) for fobj, exc in output: if isinstance(exc, Exception): exc.args = ('Failed to read %s: %s' % (fobj, str(exc)),) raise exc # return combined object _, out = zip(*output) return flatten(out)
def read_multi(flatten, cls, source, *args, **kwargs)
Read sources into a `cls` with multiprocessing This method should be called by `cls.read` and uses the `nproc` keyword to enable and handle pool-based multiprocessing of multiple source files, using `flatten` to combine the chunked data into a single object of the correct type. Parameters ---------- flatten : `callable` a method to take a list of ``cls`` instances, and combine them into a single ``cls`` instance cls : `type` the object type to read source : `str`, `list` of `str`, ... the input data source, can be of in many different forms *args positional arguments to pass to the reader **kwargs keyword arguments to pass to the reader
4.734307
4.864501
0.973236
# read from filename if isinstance(fobj, string_types): with open(fobj, 'r') as fobj2: return read_json_flag(fobj2) # read from open file txt = fobj.read() if isinstance(txt, bytes): txt = txt.decode('utf-8') data = json.loads(txt) # format flag name = '{ifo}:{name}:{version}'.format(**data) out = DataQualityFlag(name, active=data['active'], known=data['known']) # parse 'metadata' try: out.description = data['metadata'].get('flag_description', None) except KeyError: # no metadata available, but that's ok pass else: out.isgood = not data['metadata'].get( 'active_indicates_ifo_badness', False) return out
def read_json_flag(fobj)
Read a `DataQualityFlag` from a segments-web.ligo.org JSON file
5.170742
4.842279
1.067832
# write to filename if isinstance(fobj, string_types): with open(fobj, 'w') as fobj2: return write_json_flag(flag, fobj2, **kwargs) # build json packet data = {} data['ifo'] = flag.ifo data['name'] = flag.tag data['version'] = flag.version data['active'] = flag.active data['known'] = flag.known data['metadata'] = {} data['metadata']['active_indicates_ifo_badness'] = not flag.isgood data['metadata']['flag_description'] = flag.description # write json.dump(data, fobj, **kwargs)
def write_json_flag(flag, fobj, **kwargs)
Write a `DataQualityFlag` to a JSON file Parameters ---------- flag : `DataQualityFlag` data to write fobj : `str`, `file` target file (or filename) to write **kwargs other keyword arguments to pass to :func:`json.dump` See also -------- json.dump for details on acceptable keyword arguments
4.000914
4.410175
0.907201
segmentlist = type(segmentlist)(segmentlist).coalesce() idx = column.argsort() contains = numpy.zeros(column.shape[0], dtype=bool) j = 0 try: segstart, segend = segmentlist[j] except IndexError: # no segments, return all False return contains i = 0 while i < contains.shape[0]: # extract time for this index x = idx[i] # <- index in original column time = column[x] # if before start, move to next value if time < segstart: i += 1 continue # if after end, find the next segment and check value again if time >= segend: j += 1 try: segstart, segend = segmentlist[j] continue except IndexError: break # otherwise value must be in this segment contains[x] = True i += 1 return contains
def in_segmentlist(column, segmentlist)
Return the index of values lying inside the given segmentlist A `~gwpy.segments.Segment` represents a semi-open interval, so for any segment `[a, b)`, a value `x` is 'in' the segment if a <= x < b
4.096488
4.294812
0.953822
if domain != 'py': return None modname = info['module'] fullname = info['fullname'] submod = sys.modules.get(modname) if submod is None: return None obj = submod for part in fullname.split('.'): try: obj = getattr(obj, part) except: return None # try and sneak past a decorator try: obj = obj.im_func.func_closure[0].cell_contents except (AttributeError, TypeError): pass try: fn = inspect.getsourcefile(obj) except: fn = None if not fn: try: fn = inspect.getsourcefile(sys.modules[obj.__module__]) except: fn = None if not fn: return None try: source, lineno = inspect.findsource(obj) except: lineno = None if lineno: linespec = "#L%d" % (lineno + 1) else: linespec = "" fn = os.path.relpath(fn, start=os.path.dirname(gwpy.__file__)) if fn.startswith(os.path.pardir): return None return ("http://github.com/gwpy/gwpy/tree/%s/gwpy/%s%s" % (GWPY_VERSION['full-revisionid'], fn, linespec))
def linkcode_resolve(domain, info)
Determine the URL corresponding to Python object This code is stolen with thanks from the scipy team.
2.143233
2.078006
1.03139
from ..frequencyseries import FrequencySeries if nfft is None: nfft = self.size dft = npfft.rfft(self.value, n=nfft) / nfft dft[1:] *= 2.0 new = FrequencySeries(dft, epoch=self.epoch, unit=self.unit, name=self.name, channel=self.channel) try: new.frequencies = npfft.rfftfreq(nfft, d=self.dx.value) except AttributeError: new.frequencies = numpy.arange(new.size) / (nfft * self.dx.value) return new
def fft(self, nfft=None)
Compute the one-dimensional discrete Fourier transform of this `TimeSeries`. Parameters ---------- nfft : `int`, optional length of the desired Fourier transform, input will be cropped or padded to match the desired length. If nfft is not given, the length of the `TimeSeries` will be used Returns ------- out : `~gwpy.frequencyseries.FrequencySeries` the normalised, complex-valued FFT `FrequencySeries`. See Also -------- :mod:`scipy.fftpack` for the definition of the DFT and conventions used. Notes ----- This method, in constrast to the :func:`numpy.fft.rfft` method it calls, applies the necessary normalisation such that the amplitude of the output `~gwpy.frequencyseries.FrequencySeries` is correct.
3.426427
3.341255
1.025491
from gwpy.spectrogram import Spectrogram # format lengths if fftlength is None: fftlength = self.duration if isinstance(fftlength, units.Quantity): fftlength = fftlength.value nfft = int((fftlength * self.sample_rate).decompose().value) noverlap = int((overlap * self.sample_rate).decompose().value) navg = divmod(self.size-noverlap, (nfft-noverlap))[0] # format window if window is None: window = 'boxcar' if isinstance(window, (str, tuple)): win = signal.get_window(window, nfft) else: win = numpy.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') elif win.shape[0] != nfft: raise ValueError('Window is the wrong size.') win = win.astype(self.dtype) scaling = 1. / numpy.absolute(win).mean() if nfft % 2: nfreqs = (nfft + 1) // 2 else: nfreqs = nfft // 2 + 1 ffts = Spectrogram(numpy.zeros((navg, nfreqs), dtype=numpy.complex), channel=self.channel, epoch=self.epoch, f0=0, df=1 / fftlength, dt=1, copy=True) # stride through TimeSeries, recording FFTs as columns of Spectrogram idx = 0 for i in range(navg): # find step TimeSeries idx_end = idx + nfft if idx_end > self.size: continue stepseries = self[idx:idx_end].detrend() * win # calculated FFT, weight, and stack fft_ = stepseries.fft(nfft=nfft) * scaling ffts.value[i, :] = fft_.value idx += (nfft - noverlap) mean = ffts.mean(0) mean.name = self.name mean.epoch = self.epoch mean.channel = self.channel return mean
def average_fft(self, fftlength=None, overlap=0, window=None)
Compute the averaged one-dimensional DFT of this `TimeSeries`. This method computes a number of FFTs of duration ``fftlength`` and ``overlap`` (both given in seconds), and returns the mean average. This method is analogous to the Welch average method for power spectra. Parameters ---------- fftlength : `float` number of seconds in single FFT, default, use whole `TimeSeries` overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- out : complex-valued `~gwpy.frequencyseries.FrequencySeries` the transformed output, with populated frequencies array metadata See Also -------- :mod:`scipy.fftpack` for the definition of the DFT and conventions used.
3.472322
3.531277
0.983305
# get method method_func = spectral.get_method(method) # calculate PSD using UI method return spectral.psd(self, method_func, fftlength=fftlength, overlap=overlap, window=window, **kwargs)
def psd(self, fftlength=None, overlap=None, window='hann', method=DEFAULT_FFT_METHOD, **kwargs)
Calculate the PSD `FrequencySeries` for this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats method : `str`, optional FFT-averaging method, see *Notes* for more details **kwargs other keyword arguments are passed to the underlying PSD-generation method Returns ------- psd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the PSD. Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms
5.320234
7.745251
0.686903
return self.psd(method=method, fftlength=fftlength, overlap=overlap, window=window, **kwargs) ** (1/2.)
def asd(self, fftlength=None, overlap=None, window='hann', method=DEFAULT_FFT_METHOD, **kwargs)
Calculate the ASD `FrequencySeries` of this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats method : `str`, optional FFT-averaging method, see *Notes* for more details Returns ------- psd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the PSD. See also -------- TimeSeries.psd Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms
3.88923
5.445631
0.714193
return spectral.psd( (self, other), spectral.csd, fftlength=fftlength, overlap=overlap, window=window, **kwargs )
def csd(self, other, fftlength=None, overlap=None, window='hann', **kwargs)
Calculate the CSD `FrequencySeries` for two `TimeSeries` Parameters ---------- other : `TimeSeries` the second `TimeSeries` in this CSD calculation fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- csd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the CSD.
4.266375
7.109475
0.600097
# get method method_func = spectral.get_method(method) # calculate PSD using UI method return spectral.average_spectrogram( self, method_func, stride, fftlength=fftlength, overlap=overlap, window=window, **kwargs )
def spectrogram(self, stride, fftlength=None, overlap=None, window='hann', method=DEFAULT_FFT_METHOD, nproc=1, **kwargs)
Calculate the average power spectrogram of this `TimeSeries` using the specified average spectrum method. Each time-bin of the output `Spectrogram` is calculated by taking a chunk of the `TimeSeries` in the segment `[t - overlap/2., t + stride + overlap/2.)` and calculating the :meth:`~gwpy.timeseries.TimeSeries.psd` of those data. As a result, each time-bin is calculated using `stride + overlap` seconds of data. Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats method : `str`, optional FFT-averaging method, see *Notes* for more details nproc : `int` number of CPUs to use in parallel processing of FFTs Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency power spectrogram as generated from the input time-series. Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms
5.591817
6.966973
0.802618
# set kwargs for periodogram() kwargs.setdefault('fs', self.sample_rate.to('Hz').value) # run return spectral.spectrogram(self, signal.periodogram, fftlength=fftlength, overlap=overlap, window=window, **kwargs)
def spectrogram2(self, fftlength, overlap=None, window='hann', **kwargs)
Calculate the non-averaged power `Spectrogram` of this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats scaling : [ 'density' | 'spectrum' ], optional selects between computing the power spectral density ('density') where the `Spectrogram` has units of V**2/Hz if the input is measured in V and computing the power spectrum ('spectrum') where the `Spectrogram` has units of V**2 if the input is measured in V. Defaults to 'density'. **kwargs other parameters to be passed to `scipy.signal.periodogram` for each column of the `Spectrogram` Returns ------- spectrogram: `~gwpy.spectrogram.Spectrogram` a power `Spectrogram` with `1/fftlength` frequency resolution and (fftlength - overlap) time resolution. See also -------- scipy.signal.periodogram for documentation on the Fourier methods used in this calculation Notes ----- This method calculates overlapping periodograms for all possible chunks of data entirely containing within the span of the input `TimeSeries`, then normalises the power in overlapping chunks using a triangular window centred on that chunk which most overlaps the given `Spectrogram` time sample.
5.276002
5.891247
0.895566
from ..spectrogram import Spectrogram try: from scipy.signal import spectrogram except ImportError: raise ImportError("Must have scipy>=0.16 to utilize " "this method.") # format lengths if isinstance(fftlength, units.Quantity): fftlength = fftlength.value nfft = int((fftlength * self.sample_rate).decompose().value) if not overlap: # use scipy.signal.spectrogram noverlap default noverlap = nfft // 8 else: noverlap = int((overlap * self.sample_rate).decompose().value) # generate output spectrogram [frequencies, times, sxx] = spectrogram(self, fs=self.sample_rate.value, window=window, nperseg=nfft, noverlap=noverlap, mode='complex', **kwargs) return Spectrogram(sxx.T, name=self.name, unit=self.unit, xindex=self.t0.value + times, yindex=frequencies)
def fftgram(self, fftlength, overlap=None, window='hann', **kwargs)
Calculate the Fourier-gram of this `TimeSeries`. At every ``stride``, a single, complex FFT is calculated. Parameters ---------- fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable Returns ------- a Fourier-gram
3.805285
4.083977
0.93176
specgram = self.spectrogram(stride, fftlength=fftlength, overlap=overlap, method=method, window=window, nproc=nproc) ** (1/2.) if filter: specgram = specgram.filter(*filter) return specgram.variance(bins=bins, low=low, high=high, nbins=nbins, log=log, norm=norm, density=density)
def spectral_variance(self, stride, fftlength=None, overlap=None, method=DEFAULT_FFT_METHOD, window='hann', nproc=1, filter=None, bins=None, low=None, high=None, nbins=500, log=False, norm=False, density=False)
Calculate the `SpectralVariance` of this `TimeSeries`. Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram) fftlength : `float` number of seconds in single FFT method : `str`, optional FFT-averaging method, see *Notes* for more details overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` maximum number of independent frame reading processes, default is set to single-process file reading. bins : `numpy.ndarray`, optional, default `None` array of histogram bin edges, including the rightmost edge low : `float`, optional left edge of lowest amplitude bin, only read if ``bins`` is not given high : `float`, optional right edge of highest amplitude bin, only read if ``bins`` is not given nbins : `int`, optional number of bins to generate, only read if ``bins`` is not given log : `bool`, optional calculate amplitude bins over a logarithmic scale, only read if ``bins`` is not given norm : `bool`, optional normalise bin counts to a unit sum density : `bool`, optional normalise bin counts to a unit integral Returns ------- specvar : `SpectralVariance` 2D-array of spectral frequency-amplitude counts See Also -------- :func:`numpy.histogram` for details on specifying bins and weights Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms
2.257056
3.09524
0.729202
return spectral.psd( self, spectral.rayleigh, fftlength=fftlength, overlap=overlap, )
def rayleigh_spectrum(self, fftlength=None, overlap=None)
Calculate the Rayleigh `FrequencySeries` for this `TimeSeries`. The Rayleigh statistic is calculated as the ratio of the standard deviation and the mean of a number of periodograms. Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to that of the relevant method. Returns ------- psd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the PSD.
5.305702
9.572266
0.554279
specgram = spectral.average_spectrogram( self, spectral.rayleigh, stride, fftlength=fftlength, overlap=overlap, nproc=nproc, **kwargs ) specgram.override_unit('') return specgram
def rayleigh_spectrogram(self, stride, fftlength=None, overlap=0, nproc=1, **kwargs)
Calculate the Rayleigh statistic spectrogram of this `TimeSeries` Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, default: ``0`` nproc : `int`, optional maximum number of independent frame reading processes, default default: ``1`` Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency Rayleigh spectrogram as generated from the input time-series. See Also -------- TimeSeries.rayleigh for details of the statistic calculation
4.461141
6.643525
0.671502
return spectral.average_spectrogram( (self, other), spectral.csd, stride, fftlength=fftlength, overlap=overlap, window=window, nproc=nproc, **kwargs )
def csd_spectrogram(self, other, stride, fftlength=None, overlap=0, window='hann', nproc=1, **kwargs)
Calculate the cross spectral density spectrogram of this `TimeSeries` with 'other'. Parameters ---------- other : `~gwpy.timeseries.TimeSeries` second time-series for cross spectral density calculation stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` maximum number of independent frame reading processes, default is set to single-process file reading. Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency cross spectrogram as generated from the two input time-series.
3.155043
4.817698
0.654886
# design filter filt = filter_design.highpass(frequency, self.sample_rate, fstop=fstop, gpass=gpass, gstop=gstop, analog=False, type=type, **kwargs) # apply filter return self.filter(*filt, filtfilt=filtfilt)
def highpass(self, frequency, gpass=2, gstop=30, fstop=None, type='iir', filtfilt=True, **kwargs)
Filter this `TimeSeries` with a high-pass filter. Parameters ---------- frequency : `float` high-pass corner frequency gpass : `float` the maximum loss in the passband (dB). gstop : `float` the minimum attenuation in the stopband (dB). fstop : `float` stop-band edge frequency, defaults to `frequency * 1.5` type : `str` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed to :func:`gwpy.signal.filter_design.highpass` Returns ------- hpseries : `TimeSeries` a high-passed version of the input `TimeSeries` See Also -------- gwpy.signal.filter_design.highpass for details on the filter design TimeSeries.filter for details on how the filter is applied .. note:: When using `scipy < 0.16.0` some higher-order filters may be unstable. With `scipy >= 0.16.0` higher-order filters are decomposed into second-order-sections, and so are much more stable.
3.629555
5.488215
0.661336
# design filter filt = filter_design.bandpass(flow, fhigh, self.sample_rate, fstop=fstop, gpass=gpass, gstop=gstop, analog=False, type=type, **kwargs) # apply filter return self.filter(*filt, filtfilt=filtfilt)
def bandpass(self, flow, fhigh, gpass=2, gstop=30, fstop=None, type='iir', filtfilt=True, **kwargs)
Filter this `TimeSeries` with a band-pass filter. Parameters ---------- flow : `float` lower corner frequency of pass band fhigh : `float` upper corner frequency of pass band gpass : `float` the maximum loss in the passband (dB). gstop : `float` the minimum attenuation in the stopband (dB). fstop : `tuple` of `float`, optional `(low, high)` edge-frequencies of stop band type : `str` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed to :func:`gwpy.signal.filter_design.bandpass` Returns ------- bpseries : `TimeSeries` a band-passed version of the input `TimeSeries` See Also -------- gwpy.signal.filter_design.bandpass for details on the filter design TimeSeries.filter for details on how the filter is applied .. note:: When using `scipy < 0.16.0` some higher-order filters may be unstable. With `scipy >= 0.16.0` higher-order filters are decomposed into second-order-sections, and so are much more stable.
3.458731
4.963628
0.696815
if n is None and ftype == 'iir': n = 8 elif n is None: n = 60 if isinstance(rate, units.Quantity): rate = rate.value factor = (self.sample_rate.value / rate) # NOTE: use math.isclose when python >= 3.5 if numpy.isclose(factor, 1., rtol=1e-09, atol=0.): warnings.warn( "resample() rate matches current sample_rate ({}), returning " "input data unmodified; please double-check your " "parameters".format(self.sample_rate), UserWarning, ) return self # if integer down-sampling, use decimate if factor.is_integer(): if ftype == 'iir': filt = signal.cheby1(n, 0.05, 0.8/factor, output='zpk') else: filt = signal.firwin(n+1, 1./factor, window=window) return self.filter(filt, filtfilt=True)[::int(factor)] # otherwise use Fourier filtering else: nsamp = int(self.shape[0] * self.dx.value * rate) new = signal.resample(self.value, nsamp, window=window).view(self.__class__) new.__metadata_finalize__(self) new._unit = self.unit new.sample_rate = rate return new
def resample(self, rate, window='hamming', ftype='fir', n=None)
Resample this Series to a new rate Parameters ---------- rate : `float` rate to which to resample this `Series` window : `str`, `numpy.ndarray`, optional window function to apply to signal in the Fourier domain, see :func:`scipy.signal.get_window` for details on acceptable formats, only used for `ftype='fir'` or irregular downsampling ftype : `str`, optional type of filter, either 'fir' or 'iir', defaults to 'fir' n : `int`, optional if `ftype='fir'` the number of taps in the filter, otherwise the order of the Chebyshev type I IIR filter Returns ------- Series a new Series with the resampling applied, and the same metadata
4.353776
4.450081
0.978359
return self.filter(zeros, poles, gain, analog=analog, **kwargs)
def zpk(self, zeros, poles, gain, analog=True, **kwargs)
Filter this `TimeSeries` by applying a zero-pole-gain filter Parameters ---------- zeros : `array-like` list of zero frequencies (in Hertz) poles : `array-like` list of pole frequencies (in Hertz) gain : `float` DC gain of filter analog : `bool`, optional type of ZPK being applied, if `analog=True` all parameters will be converted in the Z-domain for digital filtering Returns ------- timeseries : `TimeSeries` the filtered version of the input data See Also -------- TimeSeries.filter for details on how a digital ZPK-format filter is applied Examples -------- To apply a zpk filter with file poles at 100 Hz, and five zeros at 1 Hz (giving an overall DC gain of 1e-10):: >>> data2 = data.zpk([100]*5, [1]*5, 1e-10)
3.352439
11.510907
0.29124
from matplotlib import mlab from ..frequencyseries import FrequencySeries # check sampling rates if self.sample_rate.to('Hertz') != other.sample_rate.to('Hertz'): sampling = min(self.sample_rate.value, other.sample_rate.value) # resample higher rate series if self.sample_rate.value == sampling: other = other.resample(sampling) self_ = self else: self_ = self.resample(sampling) else: sampling = self.sample_rate.value self_ = self # check fft lengths if overlap is None: overlap = 0 else: overlap = int((overlap * self_.sample_rate).decompose().value) if fftlength is None: fftlength = int(self_.size/2. + overlap/2.) else: fftlength = int((fftlength * self_.sample_rate).decompose().value) if window is not None: kwargs['window'] = signal.get_window(window, fftlength) coh, freqs = mlab.cohere(self_.value, other.value, NFFT=fftlength, Fs=sampling, noverlap=overlap, **kwargs) out = coh.view(FrequencySeries) out.xindex = freqs out.epoch = self.epoch out.name = 'Coherence between %s and %s' % (self.name, other.name) out.unit = 'coherence' return out
def coherence(self, other, fftlength=None, overlap=None, window='hann', **kwargs)
Calculate the frequency-coherence between this `TimeSeries` and another. Parameters ---------- other : `TimeSeries` `TimeSeries` signal to calculate coherence with fftlength : `float`, optional number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats **kwargs any other keyword arguments accepted by :func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``, and ``noverlap`` which are superceded by the above keyword arguments Returns ------- coherence : `~gwpy.frequencyseries.FrequencySeries` the coherence `FrequencySeries` of this `TimeSeries` with the other Notes ----- If `self` and `other` have difference :attr:`TimeSeries.sample_rate` values, the higher sampled `TimeSeries` will be down-sampled to match the lower. See Also -------- :func:`matplotlib.mlab.cohere` for details of the coherence calculator
2.897224
2.764402
1.048047
# shifting self backwards is the same as forwards dt = abs(dt) # crop inputs self_ = self.crop(self.span[0], self.span[1] - dt) other = self.crop(self.span[0] + dt, self.span[1]) return self_.coherence(other, fftlength=fftlength, overlap=overlap, window=window, **kwargs)
def auto_coherence(self, dt, fftlength=None, overlap=None, window='hann', **kwargs)
Calculate the frequency-coherence between this `TimeSeries` and a time-shifted copy of itself. The standard :meth:`TimeSeries.coherence` is calculated between the input `TimeSeries` and a :meth:`cropped <TimeSeries.crop>` copy of itself. Since the cropped version will be shorter, the input series will be shortened to match. Parameters ---------- dt : `float` duration (in seconds) of time-shift fftlength : `float`, optional number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats **kwargs any other keyword arguments accepted by :func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``, and ``noverlap`` which are superceded by the above keyword arguments Returns ------- coherence : `~gwpy.frequencyseries.FrequencySeries` the coherence `FrequencySeries` of this `TimeSeries` with the other Notes ----- The :meth:`TimeSeries.auto_coherence` will perform best when ``dt`` is approximately ``fftlength / 2``. See Also -------- :func:`matplotlib.mlab.cohere` for details of the coherence calculator
4.461641
5.145082
0.867166
from ..spectrogram.coherence import from_timeseries return from_timeseries(self, other, stride, fftlength=fftlength, overlap=overlap, window=window, nproc=nproc)
def coherence_spectrogram(self, other, stride, fftlength=None, overlap=None, window='hann', nproc=1)
Calculate the coherence spectrogram between this `TimeSeries` and other. Parameters ---------- other : `TimeSeries` the second `TimeSeries` in this CSD calculation stride : `float` number of seconds in single PSD (column of spectrogram) fftlength : `float` number of seconds in single FFT overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` number of parallel processes to use when calculating individual coherence spectra. Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency coherence spectrogram as generated from the input time-series.
3.040777
3.79353
0.801569
stridesamp = int(stride * self.sample_rate.value) nsteps = int(self.size // stridesamp) # stride through TimeSeries, recording RMS data = numpy.zeros(nsteps) for step in range(nsteps): # find step TimeSeries idx = int(stridesamp * step) idx_end = idx + stridesamp stepseries = self[idx:idx_end] rms_ = numpy.sqrt(numpy.mean(numpy.abs(stepseries.value)**2)) data[step] = rms_ name = '%s %.2f-second RMS' % (self.name, stride) return self.__class__(data, channel=self.channel, t0=self.t0, name=name, sample_rate=(1/float(stride)))
def rms(self, stride=1)
Calculate the root-mean-square value of this `TimeSeries` once per stride. Parameters ---------- stride : `float` stride (seconds) between RMS calculations Returns ------- rms : `TimeSeries` a new `TimeSeries` containing the RMS value with dt=stride
4.647561
4.613097
1.007471
stridesamp = int(stride * self.sample_rate.value) nsteps = int(self.size // stridesamp) # stride through the TimeSeries and mix with a local oscillator, # taking the average over each stride out = type(self)(numpy.zeros(nsteps, dtype=complex)) out.__array_finalize__(self) out.sample_rate = 1 / float(stride) w = 2 * numpy.pi * f * self.dt.decompose().value for step in range(nsteps): istart = int(stridesamp * step) iend = istart + stridesamp idx = numpy.arange(istart, iend) mixed = 2 * numpy.exp(-1j * w * idx) * self.value[idx] out.value[step] = mixed.mean() if exp: return out mag = out.abs() phase = type(mag)(numpy.angle(out, deg=deg)) phase.__array_finalize__(out) phase.override_unit('deg' if deg else 'rad') return (mag, phase)
def demodulate(self, f, stride=1, exp=False, deg=True)
Compute the average magnitude and phase of this `TimeSeries` once per stride at a given frequency. Parameters ---------- f : `float` frequency (Hz) at which to demodulate the signal stride : `float`, optional stride (seconds) between calculations, defaults to 1 second exp : `bool`, optional return the magnitude and phase trends as one `TimeSeries` object representing a complex exponential, default: False deg : `bool`, optional if `exp=False`, calculates the phase in degrees Returns ------- mag, phase : `TimeSeries` if `exp=False`, returns a pair of `TimeSeries` objects representing magnitude and phase trends with `dt=stride` out : `TimeSeries` if `exp=True`, returns a single `TimeSeries` with magnitude and phase trends represented as `mag * exp(1j*phase)` with `dt=stride` Examples -------- Demodulation is useful when trying to examine steady sinusoidal signals we know to be contained within data. For instance, we can download some data from LOSC to look at trends of the amplitude and phase of LIGO Livingston's calibration line at 331.3 Hz: >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('L1', 1131350417, 1131357617) We can demodulate the `TimeSeries` at 331.3 Hz with a stride of one minute: >>> amp, phase = data.demodulate(331.3, stride=60) We can then plot these trends to visualize fluctuations in the amplitude of the calibration line: >>> from gwpy.plot import Plot >>> plot = Plot(amp) >>> ax = plot.gca() >>> ax.set_ylabel('Strain Amplitude at 331.3 Hz') >>> plot.show()
4.73429
4.574961
1.034826
# check window properties if side not in ('left', 'right', 'leftright'): raise ValueError("side must be one of 'left', 'right', " "or 'leftright'") out = self.copy() # identify the second stationary point away from each boundary, # else default to half the TimeSeries width nleft, nright = 0, 0 mini, = signal.argrelmin(out.value) maxi, = signal.argrelmax(out.value) if 'left' in side: nleft = max(mini[0], maxi[0]) nleft = min(nleft, self.size/2) if 'right' in side: nright = out.size - min(mini[-1], maxi[-1]) nright = min(nright, self.size/2) out *= planck(out.size, nleft=nleft, nright=nright) return out
def taper(self, side='leftright')
Taper the ends of this `TimeSeries` smoothly to zero. Parameters ---------- side : `str`, optional the side of the `TimeSeries` to taper, must be one of `'left'`, `'right'`, or `'leftright'` Returns ------- out : `TimeSeries` a copy of `self` tapered at one or both ends Raises ------ ValueError if `side` is not one of `('left', 'right', 'leftright')` Examples -------- To see the effect of the Planck-taper window, we can taper a sinusoidal `TimeSeries` at both ends: >>> import numpy >>> from gwpy.timeseries import TimeSeries >>> t = numpy.linspace(0, 1, 2048) >>> series = TimeSeries(numpy.cos(10.5*numpy.pi*t), times=t) >>> tapered = series.taper() We can plot it to see how the ends now vary smoothly from 0 to 1: >>> from gwpy.plot import Plot >>> plot = Plot(series, tapered, separate=True, sharex=True) >>> plot.show() Notes ----- The :meth:`TimeSeries.taper` automatically tapers from the second stationary point (local maximum or minimum) on the specified side of the input. However, the method will never taper more than half the full width of the `TimeSeries`, and will fail if there are no stationary points. See :func:`~gwpy.signal.window.planck` for the generic Planck taper window, and see :func:`scipy.signal.get_window` for other common window formats.
3.831065
3.244447
1.180807
# compute the ASD fftlength = fftlength if fftlength else _fft_length_default(self.dt) if asd is None: asd = self.asd(fftlength, overlap=overlap, method=method, window=window, **kwargs) asd = asd.interpolate(1./self.duration.decompose().value) # design whitening filter, with highpass if requested ncorner = int(highpass / asd.df.decompose().value) if highpass else 0 ntaps = int((fduration * self.sample_rate).decompose().value) tdw = filter_design.fir_from_transfer(1/asd.value, ntaps=ntaps, window=window, ncorner=ncorner) # condition the input data and apply the whitening filter in_ = self.copy().detrend(detrend) out = in_.convolve(tdw, window=window) return out * numpy.sqrt(2 * in_.dt.decompose().value)
def whiten(self, fftlength=None, overlap=0, method=DEFAULT_FFT_METHOD, window='hanning', detrend='constant', asd=None, fduration=2, highpass=None, **kwargs)
Whiten this `TimeSeries` using inverse spectrum truncation Parameters ---------- fftlength : `float`, optional FFT integration length (in seconds) for ASD estimation, default: choose based on sample rate overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 method : `str`, optional FFT-averaging method window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats detrend : `str`, optional type of detrending to do before FFT (see `~TimeSeries.detrend` for more details), default: ``'constant'`` asd : `~gwpy.frequencyseries.FrequencySeries`, optional the amplitude spectral density using which to whiten the data, overrides other ASD arguments, default: `None` fduration : `float`, optional duration (in seconds) of the time-domain FIR whitening filter, must be no longer than `fftlength`, default: 2 seconds highpass : `float`, optional highpass corner frequency (in Hz) of the FIR whitening filter, default: `None` **kwargs other keyword arguments are passed to the `TimeSeries.asd` method to estimate the amplitude spectral density `FrequencySeries` of this `TimeSeries` Returns ------- out : `TimeSeries` a whitened version of the input data with zero mean and unit variance See Also -------- TimeSeries.asd for details on the ASD calculation TimeSeries.convolve for details on convolution with the overlap-save method gwpy.signal.filter_design.fir_from_transfer for FIR filter design through spectrum truncation Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms The ``window`` argument is used in ASD estimation, FIR filter design, and in preventing spectral leakage in the output. Due to filter settle-in, a segment of length ``0.5*fduration`` will be corrupted at the beginning and end of the output. See `~TimeSeries.convolve` for more details. The input is detrended and the output normalised such that, if the input is stationary and Gaussian, then the output will have zero mean and unit variance. For more on inverse spectrum truncation, see arXiv:gr-qc/0509116.
5.261531
4.265552
1.233494
try: from scipy.signal import find_peaks except ImportError as exc: exc.args = ("Must have scipy>=1.1.0 to utilize this method.",) raise # Find points to gate based on a threshold data = self.whiten(**whiten_kwargs) if whiten else self window_samples = cluster_window * data.sample_rate.value gates = find_peaks(abs(data.value), height=threshold, distance=window_samples)[0] out = self.copy() # Iterate over list of indices to gate and apply each one nzero = int(abs(tzero) * self.sample_rate.value) npad = int(abs(tpad) * self.sample_rate.value) half = nzero + npad ntotal = 2 * half for gate in gates: # Set the boundaries for windowed data in the original time series left_idx = max(0, gate - half) right_idx = min(gate + half, len(self.value) - 1) # Choose which part of the window will replace the data # This must be done explicitly for edge cases where a window # overlaps index 0 or the end of the time series left_idx_window = half - (gate - left_idx) right_idx_window = half + (right_idx - gate) window = 1 - planck(ntotal, nleft=npad, nright=npad) window = window[left_idx_window:right_idx_window] out[left_idx:right_idx] *= window return out
def gate(self, tzero=1.0, tpad=0.5, whiten=True, threshold=50., cluster_window=0.5, **whiten_kwargs)
Removes high amplitude peaks from data using inverse Planck window. Points will be discovered automatically using a provided threshold and clustered within a provided time window. Parameters ---------- tzero : `int`, optional half-width time duration in which the time series is set to zero tpad : `int`, optional half-width time duration in which the Planck window is tapered whiten : `bool`, optional if True, data will be whitened before gating points are discovered, use of this option is highly recommended threshold : `float`, optional amplitude threshold, if the data exceeds this value a gating window will be placed cluster_window : `float`, optional time duration over which gating points will be clustered **whiten_kwargs other keyword arguments that will be passed to the `TimeSeries.whiten` method if it is being used when discovering gating points Returns ------- out : `~gwpy.timeseries.TimeSeries` a copy of the original `TimeSeries` that has had gating windows applied Examples -------- Read data into a `TimeSeries` >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('H1', 1135148571, 1135148771) Apply gating using custom arguments >>> gated = data.gate(tzero=1.0, tpad=1.0, threshold=10.0, fftlength=4, overlap=2, method='median') Plot the original data and the gated data, whiten both for visualization purposes >>> overlay = data.whiten(4,2,method='median').plot(dpi=150, label='Ungated', color='dodgerblue', zorder=2) >>> ax = overlay.gca() >>> ax.plot(gated.whiten(4,2,method='median'), label='Gated', color='orange', zorder=3) >>> ax.set_xlim(1135148661, 1135148681) >>> ax.legend() >>> overlay.show()
4.274475
4.359374
0.980525
pad = int(numpy.ceil(fir.size/2)) nfft = min(8*fir.size, self.size) # condition the input data in_ = self.copy() window = signal.get_window(window, fir.size) in_.value[:pad] *= window[:pad] in_.value[-pad:] *= window[-pad:] # if FFT length is long enough, perform only one convolution if nfft >= self.size/2: conv = signal.fftconvolve(in_.value, fir, mode='same') # else use the overlap-save algorithm else: nstep = nfft - 2*pad conv = numpy.zeros(self.size) # handle first chunk separately conv[:nfft-pad] = signal.fftconvolve(in_.value[:nfft], fir, mode='same')[:nfft-pad] # process chunks of length nstep k = nfft - pad while k < self.size - nfft + pad: yk = signal.fftconvolve(in_.value[k-pad:k+nstep+pad], fir, mode='same') conv[k:k+yk.size-2*pad] = yk[pad:-pad] k += nstep # handle last chunk separately conv[-nfft+pad:] = signal.fftconvolve(in_.value[-nfft:], fir, mode='same')[-nfft+pad:] out = type(self)(conv) out.__array_finalize__(self) return out
def convolve(self, fir, window='hanning')
Convolve this `TimeSeries` with an FIR filter using the overlap-save method Parameters ---------- fir : `numpy.ndarray` the time domain filter to convolve with window : `str`, optional window function to apply to boundaries, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- out : `TimeSeries` the result of the convolution See Also -------- scipy.signal.fftconvolve for details on the convolution scheme used here TimeSeries.filter for an alternative method designed for short filters Notes ----- The output `TimeSeries` is the same length and has the same timestamps as the input. Due to filter settle-in, a segment half the length of `fir` will be corrupted at the left and right boundaries. To prevent spectral leakage these segments will be windowed before convolving.
2.931277
2.803582
1.045547
self.is_compatible(mfilter) # condition data if whiten is True: fftlength = asd_kw.pop('fftlength', _fft_length_default(self.dt)) overlap = asd_kw.pop('overlap', None) if overlap is None: overlap = recommended_overlap(window) * fftlength asd = self.asd(fftlength, overlap, window=window, **asd_kw) # pad the matched-filter to prevent corruption npad = int(wduration * mfilter.sample_rate.decompose().value / 2) mfilter = mfilter.pad(npad) # whiten (with errors on division by zero) with numpy.errstate(all='raise'): in_ = self.whiten(window=window, fduration=wduration, asd=asd, highpass=highpass, detrend=detrend) mfilter = mfilter.whiten(window=window, fduration=wduration, asd=asd, highpass=highpass, detrend=detrend)[npad:-npad] else: in_ = self.detrend(detrend) mfilter = mfilter.detrend(detrend) # compute matched-filter SNR and normalise stdev = numpy.sqrt((mfilter.value**2).sum()) snr = in_.convolve(mfilter[::-1], window=window) / stdev snr.__array_finalize__(self) return snr
def correlate(self, mfilter, window='hanning', detrend='linear', whiten=False, wduration=2, highpass=None, **asd_kw)
Cross-correlate this `TimeSeries` with another signal Parameters ---------- mfilter : `TimeSeries` the time domain signal to correlate with window : `str`, optional window function to apply to timeseries prior to FFT, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats detrend : `str`, optional type of detrending to do before FFT (see `~TimeSeries.detrend` for more details), default: ``'linear'`` whiten : `bool`, optional boolean switch to enable (`True`) or disable (`False`) data whitening, default: `False` wduration : `float`, optional duration (in seconds) of the time-domain FIR whitening filter, only used if `whiten=True`, defaults to 2 seconds highpass : `float`, optional highpass corner frequency (in Hz) of the FIR whitening filter, only used if `whiten=True`, default: `None` **asd_kw keyword arguments to pass to `TimeSeries.asd` to generate an ASD, only used if `whiten=True` Returns ------- snr : `TimeSeries` the correlated signal-to-noise ratio (SNR) timeseries See Also -------- TimeSeries.asd for details on the ASD calculation TimeSeries.convolve for details on convolution with the overlap-save method Notes ----- The `window` argument is used in ASD estimation, whitening, and preventing spectral leakage in the output. It is not used to condition the matched-filter, which should be windowed before passing to this method. Due to filter settle-in, a segment half the length of `mfilter` will be corrupted at the beginning and end of the output. See `~TimeSeries.convolve` for more details. The input and matched-filter will be detrended, and the output will be normalised so that the SNR measures number of standard deviations from the expected mean.
4.29108
3.835301
1.118838
data = signal.detrend(self.value, type=detrend).view(type(self)) data.__metadata_finalize__(self) data._unit = self.unit return data
def detrend(self, detrend='constant')
Remove the trend from this `TimeSeries` This method just wraps :func:`scipy.signal.detrend` to return an object of the same type as the input. Parameters ---------- detrend : `str`, optional the type of detrending. Returns ------- detrended : `TimeSeries` the detrended input series See Also -------- scipy.signal.detrend for details on the options for the `detrend` argument, and how the operation is done
7.325825
13.733705
0.533419
zpk = filter_design.notch(frequency, self.sample_rate.value, type=type, **kwargs) return self.filter(*zpk, filtfilt=filtfilt)
def notch(self, frequency, type='iir', filtfilt=True, **kwargs)
Notch out a frequency in this `TimeSeries`. Parameters ---------- frequency : `float`, `~astropy.units.Quantity` frequency (default in Hertz) at which to apply the notch type : `str`, optional type of filter to apply, currently only 'iir' is supported **kwargs other keyword arguments to pass to `scipy.signal.iirdesign` Returns ------- notched : `TimeSeries` a notch-filtered copy of the input `TimeSeries` See Also -------- TimeSeries.filter for details on the filtering method scipy.signal.iirdesign for details on the IIR filter design method
4.593302
9.281566
0.494884
qscan, _ = qtransform.q_scan(self, mismatch=mismatch, qrange=qrange, frange=frange, **kwargs) qgram = qscan.table(snrthresh=snrthresh) return qgram
def q_gram(self, qrange=qtransform.DEFAULT_QRANGE, frange=qtransform.DEFAULT_FRANGE, mismatch=qtransform.DEFAULT_MISMATCH, snrthresh=5.5, **kwargs)
Scan a `TimeSeries` using the multi-Q transform and return an `EventTable` of the most significant tiles Parameters ---------- qrange : `tuple` of `float`, optional `(low, high)` range of Qs to scan frange : `tuple` of `float`, optional `(low, high)` range of frequencies to scan mismatch : `float`, optional maximum allowed fractional mismatch between neighbouring tiles snrthresh : `float`, optional lower inclusive threshold on individual tile SNR to keep in the table **kwargs other keyword arguments to be passed to :meth:`QTiling.transform`, including ``'epoch'`` and ``'search'`` Returns ------- qgram : `EventTable` a table of time-frequency tiles on the most significant `QPlane` See Also -------- TimeSeries.q_transform for a method to interpolate the raw Q-transform over a regularly gridded spectrogram gwpy.signal.qtransform for code and documentation on how the Q-transform is implemented gwpy.table.EventTable.tile to render this `EventTable` as a collection of polygons Notes ----- Only tiles with signal energy greater than or equal to `snrthresh ** 2 / 2` will be stored in the output `EventTable`. The table columns are ``'time'``, ``'duration'``, ``'frequency'``, ``'bandwidth'``, and ``'energy'``.
4.054126
4.442862
0.912503
# delete current value if given None if value is None: return delattr(self, key) _key = "_{}".format(key) index = "{[0]}index".format(axis) unit = "{[0]}unit".format(axis) # convert float to Quantity if not isinstance(value, Quantity): try: value = Quantity(value, getattr(self, unit)) except TypeError: value = Quantity(float(value), getattr(self, unit)) # if value is changing, delete current index try: curr = getattr(self, _key) except AttributeError: delattr(self, index) else: if ( value is None or getattr(self, key) is None or not value.unit.is_equivalent(curr.unit) or value != curr ): delattr(self, index) # set new value setattr(self, _key, value) return value
def _update_index(self, axis, key, value)
Update the current axis index based on a given key or value This is an internal method designed to set the origin or step for an index, whilst updating existing Index arrays as appropriate Examples -------- >>> self._update_index("x0", 0) >>> self._update_index("dx", 0) To actually set an index array, use `_set_index`
3.515366
3.885486
0.904743
axis = key[0] origin = "{}0".format(axis) delta = "d{}".format(axis) if index is None: return delattr(self, key) if not isinstance(index, Index): try: unit = index.unit except AttributeError: unit = getattr(self, "_default_{}unit".format(axis)) index = Index(index, unit=unit, copy=False) setattr(self, origin, index[0]) if index.regular: setattr(self, delta, index[1] - index[0]) else: delattr(self, delta) setattr(self, "_{}".format(key), index)
def _set_index(self, key, index)
Set a new index array for this series
3.375111
3.350677
1.007292
try: return self._x0 except AttributeError: self._x0 = Quantity(0, self.xunit) return self._x0
def x0(self)
X-axis coordinate of the first data point :type: `~astropy.units.Quantity` scalar
3.766933
3.359772
1.121187
try: return self._dx except AttributeError: try: self._xindex except AttributeError: self._dx = Quantity(1, self.xunit) else: if not self.xindex.regular: raise AttributeError("This series has an irregular x-axis " "index, so 'dx' is not well defined") self._dx = self.xindex[1] - self.xindex[0] return self._dx
def dx(self)
X-axis sample separation :type: `~astropy.units.Quantity` scalar
4.014585
3.729476
1.076447
try: return self._xindex except AttributeError: self._xindex = Index.define(self.x0, self.dx, self.shape[0]) return self._xindex
def xindex(self)
Positions of the data on the x-axis :type: `~astropy.units.Quantity` array
4.169078
4.798732
0.868787
try: return self._dx.unit except AttributeError: try: return self._x0.unit except AttributeError: return self._default_xunit
def xunit(self)
Unit of x-axis index :type: `~astropy.units.Unit`
5.320044
4.801516
1.107992
from ..plot import Plot from ..plot.text import default_unit_label # correct for log scales and zeros if kwargs.get('xscale') == 'log' and self.x0.value == 0: kwargs.setdefault('xlim', (self.dx.value, self.xspan[1])) # make plot plot = Plot(self, method=method, **kwargs) # set default y-axis label (xlabel is set by Plot()) default_unit_label(plot.gca().yaxis, self.unit) return plot
def plot(self, method='plot', **kwargs)
Plot the data for this series Returns ------- figure : `~matplotlib.figure.Figure` the newly created figure, with populated Axes. See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes matplotlib.axes.Axes.plot for documentation of keyword arguments used in rendering the data
6.377798
6.972721
0.914679
kwargs.setdefault('linestyle', kwargs.pop('where', 'steps-post')) data = self.append(self.value[-1:], inplace=False) return data.plot(**kwargs)
def step(self, **kwargs)
Create a step plot of this series
10.864863
8.668324
1.253398
self.x0 = self.x0 + Quantity(delta, self.xunit)
def shift(self, delta)
Shift this `Series` forward on the X-axis by ``delta`` This modifies the series in-place. Parameters ---------- delta : `float`, `~astropy.units.Quantity`, `str` The amount by which to shift (in x-axis units if `float`), give a negative value to shift backwards in time Examples -------- >>> from gwpy.types import Series >>> a = Series([1, 2, 3, 4, 5], x0=0, dx=1, xunit='m') >>> print(a.x0) 0.0 m >>> a.shift(5) >>> print(a.x0) 5.0 m >>> a.shift('-1 km') -995.0 m
9.859048
10.354621
0.95214
x = Quantity(x, self.xindex.unit).value try: idx = (self.xindex.value == x).nonzero()[0][0] except IndexError as e: e.args = ("Value %r not found in array index" % x,) raise return self[idx]
def value_at(self, x)
Return the value of this `Series` at the given `xindex` value Parameters ---------- x : `float`, `~astropy.units.Quantity` the `xindex` value at which to search Returns ------- y : `~astropy.units.Quantity` the value of this Series at the given `xindex` value
4.452055
4.744074
0.938446
out = super(Array, self).diff(n=n, axis=axis) try: out.x0 = self.x0 + self.dx * n except AttributeError: # irregular xindex out.x0 = self.xindex[n] return out
def diff(self, n=1, axis=-1)
Calculate the n-th order discrete difference along given axis. The first order difference is given by ``out[n] = a[n+1] - a[n]`` along the given axis, higher order differences are calculated by using `diff` recursively. Parameters ---------- n : int, optional The number of times values are differenced. axis : int, optional The axis along which the difference is taken, default is the last axis. Returns ------- diff : `Series` The `n` order differences. The shape of the output is the same as the input, except along `axis` where the dimension is smaller by `n`. See Also -------- numpy.diff for documentation on the underlying method
4.747847
7.633544
0.621971
self.is_compatible(other) if isinstance(other, type(self)): if abs(float(self.xspan[1] - other.xspan[0])) < tol: return 1 elif abs(float(other.xspan[1] - self.xspan[0])) < tol: return -1 return 0 elif type(other) in [list, tuple, numpy.ndarray]: return 1
def is_contiguous(self, other, tol=1/2.**18)
Check whether other is contiguous with self. Parameters ---------- other : `Series`, `numpy.ndarray` another series of the same type to test for contiguity tol : `float`, optional the numerical tolerance of the test Returns ------- 1 if `other` is contiguous with this series, i.e. would attach seamlessly onto the end -1 if `other` is anti-contiguous with this seires, i.e. would attach seamlessly onto the start 0 if `other` is completely dis-contiguous with thie series Notes ----- if a raw `numpy.ndarray` is passed as other, with no metadata, then the contiguity check will always pass
2.663657
3.120688
0.853548
if isinstance(other, type(self)): # check step size, if possible try: if not self.dx == other.dx: raise ValueError("%s sample sizes do not match: " "%s vs %s." % (type(self).__name__, self.dx, other.dx)) except AttributeError: raise ValueError("Series with irregular xindexes cannot " "be compatible") # check units if not self.unit == other.unit and not ( self.unit in [dimensionless_unscaled, None] and other.unit in [dimensionless_unscaled, None]): raise ValueError("%s units do not match: %s vs %s." % (type(self).__name__, str(self.unit), str(other.unit))) else: # assume an array-like object, and just check that the shape # and dtype match arr = numpy.asarray(other) if arr.ndim != self.ndim: raise ValueError("Dimensionality does not match") if arr.dtype != self.dtype: warn("Array data types do not match: %s vs %s" % (self.dtype, other.dtype)) return True
def is_compatible(self, other)
Check whether this series and other have compatible metadata This method tests that the `sample size <Series.dx>`, and the `~Series.unit` match.
3.325328
3.052759
1.089286
out = other.append(self, inplace=False, gap=gap, pad=pad, resize=resize) if inplace: self.resize(out.shape, refcheck=False) self[:] = out[:] self.x0 = out.x0.copy() del out return self return out
def prepend(self, other, inplace=True, pad=None, gap=None, resize=True)
Connect another series onto the start of the current one. Parameters ---------- other : `Series` another series of the same type as this one inplace : `bool`, optional perform operation in-place, modifying current series, otherwise copy data and return new series, default: `True` .. warning:: `inplace` prepend bypasses the reference check in `numpy.ndarray.resize`, so be carefully to only use this for arrays that haven't been sharing their memory! pad : `float`, optional value with which to pad discontiguous series, by default gaps will result in a `ValueError`. gap : `str`, optional action to perform if there's a gap between the other series and this one. One of - ``'raise'`` - raise a `ValueError` - ``'ignore'`` - remove gap and join data - ``'pad'`` - pad gap with zeros If `pad` is given and is not `None`, the default is ``'pad'``, otherwise ``'raise'``. resize : `bool`, optional resize this array to accommodate new data, otherwise shift the old data to the left (potentially falling off the start) and put the new data in at the end, default: `True`. Returns ------- series : `TimeSeries` time-series containing joined data sets
4.027052
5.091743
0.790899
return self.append(other, inplace=inplace, resize=False)
def update(self, other, inplace=True)
Update this series by appending new data from an other and dropping the same amount of data off the start. This is a convenience method that just calls `~Series.append` with `resize=False`.
9.144273
6.329958
1.444602
x0, x1 = self.xspan xtype = type(x0) if isinstance(start, Quantity): start = start.to(self.xunit).value if isinstance(end, Quantity): end = end.to(self.xunit).value # pin early starts to time-series start if start == x0: start = None elif start is not None and xtype(start) < x0: warn('%s.crop given start smaller than current start, ' 'crop will begin when the Series actually starts.' % type(self).__name__) start = None # pin late ends to time-series end if end == x1: end = None if end is not None and xtype(end) > x1: warn('%s.crop given end larger than current end, ' 'crop will end when the Series actually ends.' % type(self).__name__) end = None # find start index if start is None: idx0 = None else: idx0 = int((xtype(start) - x0) // self.dx.value) # find end index if end is None: idx1 = None else: idx1 = int((xtype(end) - x0) // self.dx.value) if idx1 >= self.size: idx1 = None # crop if copy: return self[idx0:idx1].copy() return self[idx0:idx1]
def crop(self, start=None, end=None, copy=False)
Crop this series to the given x-axis extent. Parameters ---------- start : `float`, optional lower limit of x-axis to crop to, defaults to current `~Series.x0` end : `float`, optional upper limit of x-axis to crop to, defaults to current series end copy : `bool`, optional, default: `False` copy the input data to fresh memory, otherwise return a view Returns ------- series : `Series` A new series with a sub-set of the input data Notes ----- If either ``start`` or ``end`` are outside of the original `Series` span, warnings will be printed and the limits will be restricted to the :attr:`~Series.xspan`
2.621276
2.653542
0.98784
# format arguments kwargs.setdefault('mode', 'constant') if isinstance(pad_width, int): pad_width = (pad_width,) # form pad and view to this type new = numpy.pad(self, pad_width, **kwargs).view(type(self)) # numpy.pad has stripped all metadata, so copy it over new.__metadata_finalize__(self) new._unit = self.unit # finally move the starting index based on the amount of left-padding new.x0 -= self.dx * pad_width[0] return new
def pad(self, pad_width, **kwargs)
Pad this series to a new size Parameters ---------- pad_width : `int`, pair of `ints` number of samples by which to pad each end of the array. Single int to pad both ends by the same amount, or (before, after) `tuple` to give uneven padding **kwargs see :meth:`numpy.pad` for kwarg documentation Returns ------- series : `Series` the padded version of the input See also -------- numpy.pad for details on the underlying functionality
7.385843
8.818339
0.837555
# check Series compatibility self.is_compatible(other) if (self.xunit == second) and (other.xspan[0] < self.xspan[0]): other = other.crop(start=self.xspan[0]) if (self.xunit == second) and (other.xspan[1] > self.xspan[1]): other = other.crop(end=self.xspan[1]) ox0 = other.x0.to(self.x0.unit) idx = ((ox0 - self.x0) / self.dx).value if not idx.is_integer(): warn('Series have overlapping xspan but their x-axis values are ' 'uniformly offset. Returning a copy of the original Series.') return self.copy() # add the Series along their shared samples slice_ = slice(int(idx), int(idx) + other.size) out = self.copy() out.value[slice_] += other.value return out
def inject(self, other)
Add two compatible `Series` along their shared x-axis values. Parameters ---------- other : `Series` a `Series` whose xindex intersects with `self.xindex` Returns ------- out : `Series` the sum of `self` and `other` along their shared x-axis values Raises ------ ValueError if `self` and `other` have incompatible units or xindex intervals Notes ----- If `other.xindex` and `self.xindex` do not intersect, this method will return a copy of `self`. If the series have uniformly offset indices, this method will raise a warning. If `self.xindex` is an array of timestamps, and if `other.xspan` is not a subset of `self.xspan`, then `other` will be cropped before being adding to `self`. Users who wish to taper or window their `Series` should do so before passing it to this method. See :meth:`TimeSeries.taper` and :func:`~gwpy.signal.window.planck` for more information.
4.636046
3.372339
1.374727
if urlparse(url).netloc.startswith('geosegdb.'): # only DB2 server return cls.query_segdb return cls.query_dqsegdb
def _select_query_method(cls, url)
Select the correct query method based on the URL Works for `DataQualityFlag` and `DataQualityDict`
27.597717
28.351383
0.973417
# user passed SegmentList if len(args) == 1 and isinstance(args[0], SegmentList): return args[0] # otherwise unpack two arguments as a segment if len(args) == 1: args = args[0] # if not two arguments, panic try: start, end = args except ValueError as exc: exc.args = ('{0}() takes 2 arguments for start and end GPS time, ' 'or 1 argument containing a Segment or SegmentList'.format( func.__name__),) raise # return list with one Segment return SegmentList([Segment(to_gps(start), to_gps(end))])
def _parse_query_segments(args, func)
Parse *args for query_dqsegdb() or query_segdb() Returns a SegmentList in all cases
5.494829
5.232364
1.050162
query_ = _select_query_method( cls, kwargs.get('url', DEFAULT_SEGMENT_SERVER)) return query_(flag, *args, **kwargs)
def query(cls, flag, *args, **kwargs)
Query for segments of a given flag This method intelligently selects the `~DataQualityFlag.query_segdb` or the `~DataQualityFlag.query_dqsegdb` methods based on the ``url`` kwarg given. Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` See Also -------- DataQualityFlag.query_segdb DataQualityFlag.query_dqsegdb for details on the actual query engine, and documentation of other keyword arguments appropriate for each query Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately.
10.703208
5.982172
1.789184
warnings.warn("query_segdb is deprecated and will be removed in a " "future release", DeprecationWarning) # parse arguments qsegs = _parse_query_segments(args, cls.query_segdb) # process query try: flags = DataQualityDict.query_segdb([flag], qsegs, **kwargs) except TypeError as exc: if 'DataQualityDict' in str(exc): raise TypeError(str(exc).replace('DataQualityDict', cls.__name__)) else: raise if len(flags) > 1: raise RuntimeError("Multiple flags returned for single query, " "something went wrong:\n %s" % '\n '.join(flags.keys())) elif len(flags) == 0: raise RuntimeError("No flags returned for single query, " "something went wrong.") return flags[flag]
def query_segdb(cls, flag, *args, **kwargs)
Query the initial LIGO segment database for the given flag Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately.
3.493557
3.422475
1.020769