code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# if target file doesn't exist, we must generate it if not os.path.isfile(filename): return False # if we can interact with git, we can regenerate it, so we may as well try: import git except ImportError: return True else: try: git.Repo().tags except (TypeError, git.GitError): return True else: return False
def reuse_dist_file(filename)
Returns `True` if a distribution file can be reused Otherwise it should be regenerated
5.189032
5.187344
1.000325
# if not in git clone, it doesn't matter if not in_git_clone(): return 'GitPython' # otherwise, call out to get the git version try: gitv = subprocess.check_output('git --version', shell=True) except (OSError, IOError, subprocess.CalledProcessError): # no git installation, most likely git_version = '0.0.0' else: if isinstance(gitv, bytes): gitv = gitv.decode('utf-8') git_version = gitv.strip().split()[2] # if git>=2.15, we need GitPython>=2.1.8 if LooseVersion(git_version) >= '2.15': return 'GitPython>=2.1.8' return 'GitPython'
def get_gitpython_version()
Determine the required version of GitPython Because of target systems running very, very old versions of setuptools, we only specify the actual version we need when we need it.
3.157759
3.159362
0.999493
# don't force requirements if just asking for help if {'--help', '--help-commands'}.intersection(sys.argv): return list() # otherwise collect all requirements for all known commands reqlist = [] for cmd, dependencies in SETUP_REQUIRES.items(): if cmd in sys.argv: reqlist.extend(dependencies) return reqlist
def get_setup_requires()
Return the list of packages required for this setup.py run
5.757967
5.756772
1.000208
scripts = [] for (dirname, _, filenames) in os.walk(scripts_dir): scripts.extend([os.path.join(dirname, fn) for fn in filenames]) return scripts
def get_scripts(scripts_dir='bin')
Get relative file paths for all files under the ``scripts_dir``
2.286194
2.286692
0.999782
result = [] for part in years.split(','): if '-' in part: a, b = part.split('-') a, b = int(a), int(b) result.extend(range(a, b + 1)) else: a = int(part) result.append(a) return result
def _parse_years(years)
Parse string of ints include ranges into a `list` of `int` Source: https://stackoverflow.com/a/6405228/1307974
1.898205
1.713871
1.107554
def sub(x): return x[1] - x[0] ranges = [] for k, iterable in groupby(enumerate(sorted(years)), sub): rng = list(iterable) if len(rng) == 1: s = str(rng[0][1]) else: s = "{}-{}".format(rng[0][1], rng[-1][1]) ranges.append(s) return ", ".join(ranges)
def _format_years(years)
Format a list of ints into a string including ranges Source: https://stackoverflow.com/a/9471386/1307974
2.923335
2.542873
1.149619
with open(path, "r") as fobj: text = fobj.read().rstrip() match = COPYRIGHT_REGEX.search(text) x = match.start("years") y = match.end("years") if text[y-1] == " ": # don't strip trailing whitespace y -= 1 yearstr = match.group("years") years = set(_parse_years(yearstr)) | {year} with open(path, "w") as fobj: print(text[:x] + _format_years(years) + text[y:], file=fobj)
def update_copyright(path, year)
Update a file's copyright statement to include the given year
3.376858
3.500925
0.964562
# parse args and kwargs if not spectrograms: raise ValueError("Must give at least one Spectrogram") bins = kwargs.pop('bins', None) low = kwargs.pop('low', None) high = kwargs.pop('high', None) nbins = kwargs.pop('nbins', 500) log = kwargs.pop('log', False) norm = kwargs.pop('norm', False) density = kwargs.pop('density', False) if norm and density: raise ValueError("Cannot give both norm=True and density=True, " "please pick one") # get data and bins spectrogram = spectrograms[0] data = numpy.vstack(s.value for s in spectrograms) if bins is None: if low is None and log: low = numpy.log10(data.min() / 2) elif low is None: low = data.min()/2 elif log: low = numpy.log10(low) if high is None and log: high = numpy.log10(data.max() * 2) elif high is None: high = data.max() * 2 elif log: high = numpy.log10(high) if log: bins = numpy.logspace(low, high, num=nbins+1) else: bins = numpy.linspace(low, high, num=nbins+1) nbins = bins.size-1 bins = bins * spectrogram.unit # loop over frequencies out = numpy.zeros((data.shape[1], nbins)) for i in range(data.shape[1]): out[i, :], bins = numpy.histogram(data[:, i], bins, density=density) if norm and out[i, :].sum(): # normalise out[i, :] /= out[i, :].sum() # return SpectralVariance name = '%s variance' % spectrogram.name new = cls(out, bins, epoch=spectrogram.epoch, name=name, channel=spectrogram.channel, f0=spectrogram.f0, df=spectrogram.df) return new
def from_spectrogram(cls, *spectrograms, **kwargs)
Calculate a new `SpectralVariance` from a :class:`~gwpy.spectrogram.Spectrogram` Parameters ---------- spectrogram : `~gwpy.spectrogram.Spectrogram` input `Spectrogram` data bins : `~numpy.ndarray`, optional array of histogram bin edges, including the rightmost edge low : `float`, optional left edge of lowest amplitude bin, only read if ``bins`` is not given high : `float`, optional right edge of highest amplitude bin, only read if ``bins`` is not given nbins : `int`, optional number of bins to generate, only read if ``bins`` is not given, default: `500` log : `bool`, optional calculate amplitude bins over a logarithmic scale, only read if ``bins`` is not given, default: `False` norm : `bool`, optional normalise bin counts to a unit sum, default: `False` density : `bool`, optional normalise bin counts to a unit integral, default: `False` Returns ------- specvar : `SpectralVariance` 2D-array of spectral frequency-amplitude counts See Also -------- :func:`numpy.histogram` for details on specifying bins and weights
2.404007
2.172431
1.106598
rows, columns = self.shape out = numpy.zeros(rows) # Loop over frequencies for i in range(rows): # Calculate cumulative sum for array cumsumvals = numpy.cumsum(self.value[i, :]) # Find value nearest requested percentile abs_cumsumvals_minus_percentile = numpy.abs(cumsumvals - percentile) minindex = abs_cumsumvals_minus_percentile.argmin() val = self.bins[minindex] out[i] = val name = '%s %s%% percentile' % (self.name, percentile) return FrequencySeries(out, epoch=self.epoch, channel=self.channel, frequencies=self.bins[:-1], name=name)
def percentile(self, percentile)
Calculate a given spectral percentile for this `SpectralVariance` Parameters ---------- percentile : `float` percentile (0 - 100) of the bins to compute Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` the given percentile `FrequencySeries` calculated from this `SpectralVaraicence`
4.532392
3.993667
1.134895
if self.type is not None: return io_nds2.Nds2ChannelType.find(self.type).value
def ndstype(self)
NDS type integer for this channel. This property is mapped to the `Channel.type` string.
17.677505
13.372629
1.321917
if self.type not in [None, 'raw', 'reduced', 'online']: return '%s,%s' % (self.name, self.type) return self.name
def ndsname(self)
Name of this channel as stored in the NDS database
6.269231
6.091815
1.029124
channellist = ChannelList.query(name, use_kerberos=use_kerberos, debug=debug) if not channellist: raise ValueError("No channels found matching '%s'" % name) if len(channellist) > 1: raise ValueError("%d channels found matching '%s', please refine " "search, or use `ChannelList.query` to return " "all results" % (len(channellist), name)) return channellist[0]
def query(cls, name, use_kerberos=None, debug=False)
Query the LIGO Channel Information System for the `Channel` matching the given name Parameters ---------- name : `str` name of channel use_kerberos : `bool`, optional use an existing Kerberos ticket as the authentication credential, default behaviour will check for credentials and request username and password if none are found (`None`) debug : `bool`, optional print verbose HTTP connection status for debugging, default: `False` Returns ------- c : `Channel` a new `Channel` containing all of the attributes set from its entry in the CIS
3.111103
3.245557
0.958573
return ChannelList.query_nds2([name], host=host, port=port, connection=connection, type=type, unique=True)[0]
def query_nds2(cls, name, host=None, port=None, connection=None, type=None)
Query an NDS server for channel information Parameters ---------- name : `str` name of requested channel host : `str`, optional name of NDS2 server. port : `int`, optional port number for NDS2 connection connection : `nds2.connection` open connection to use for query type : `str`, `int` NDS2 channel type with which to restrict query Returns ------- channel : `Channel` channel with metadata retrieved from NDS2 server Raises ------ ValueError if multiple channels are found for a given name Notes ----- .. warning:: A `host` is required if an open `connection` is not given
5.230217
7.931287
0.659441
# extract metadata name = nds2channel.name sample_rate = nds2channel.sample_rate unit = nds2channel.signal_units if not unit: unit = None ctype = nds2channel.channel_type_to_string(nds2channel.channel_type) # get dtype dtype = { # pylint: disable: no-member nds2channel.DATA_TYPE_INT16: numpy.int16, nds2channel.DATA_TYPE_INT32: numpy.int32, nds2channel.DATA_TYPE_INT64: numpy.int64, nds2channel.DATA_TYPE_FLOAT32: numpy.float32, nds2channel.DATA_TYPE_FLOAT64: numpy.float64, nds2channel.DATA_TYPE_COMPLEX32: numpy.complex64, }.get(nds2channel.data_type) return cls(name, sample_rate=sample_rate, unit=unit, dtype=dtype, type=ctype)
def from_nds2(cls, nds2channel)
Generate a new channel using an existing nds2.channel object
2.221241
2.242755
0.990407
match = cls.MATCH.search(name) if match is None or (strict and ( match.start() != 0 or match.end() != len(name))): raise ValueError("Cannot parse channel name according to LIGO " "channel-naming convention T990033") return match.groupdict()
def parse_channel_name(cls, name, strict=True)
Decompose a channel name string into its components Parameters ---------- name : `str` name to parse strict : `bool`, optional require exact matching of format, with no surrounding text, default `True` Returns ------- match : `dict` `dict` of channel name components with the following keys: - `'ifo'`: the letter-number interferometer prefix - `'system'`: the top-level system name - `'subsystem'`: the second-level sub-system name - `'signal'`: the remaining underscore-delimited signal name - `'trend'`: the trend type - `'ndstype'`: the NDS2 channel suffix Any optional keys that aren't found will return a value of `None` Raises ------ ValueError if the name cannot be parsed with at least an IFO and SYSTEM Examples -------- >>> Channel.parse_channel_name('L1:LSC-DARM_IN1_DQ') {'ifo': 'L1', 'ndstype': None, 'signal': 'IN1_DQ', 'subsystem': 'DARM', 'system': 'LSC', 'trend': None} >>> Channel.parse_channel_name( 'H1:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_100M_300M.rms,m-trend') {'ifo': 'H1', 'ndstype': 'm-trend', 'signal': 'ST1_SENSCOR_GND_STS_X_BLRMS_100M_300M', 'subsystem': 'BS', 'system': 'ISI', 'trend': 'rms'}
6.822338
8.991736
0.758734
return datafind.find_frametype( self, gpstime=gpstime, frametype_match=frametype_match, host=host, port=port, return_all=return_all, allow_tape=allow_tape)
def find_frametype(self, gpstime=None, frametype_match=None, host=None, port=None, return_all=False, allow_tape=True)
Find the containing frametype(s) for this `Channel` Parameters ---------- gpstime : `int` a reference GPS time at which to search for frame files frametype_match : `str` a regular expression string to use to down-select from the list of all available frametypes host : `str` the name of the datafind server to use for frame file discovery port : `int` the port of the datafind server on the given host return_all: `bool`, default: `False` return all matched frame types, otherwise only the first match is returned allow_tape : `bool`, default: `True` include frame files on (slow) magnetic tape in the search Returns ------- frametype : `str`, `list` the first matching frametype containing the this channel (`return_all=False`, or a `list` of all matches
2.003092
2.908418
0.688722
new = type(self)(str(self)) new._init_from_channel(self) return new
def copy(self)
Returns a copy of this channel
8.794706
6.890396
1.276372
new = cls() for namestr in names: for name in cls._split_names(namestr): new.append(Channel(name)) return new
def from_names(cls, *names)
Create a new `ChannelList` from a list of names The list of names can include comma-separated sets of names, in which case the return will be a flattened list of all parsed channel names.
5.40339
4.236933
1.275307
out = [] namestr = QUOTE_REGEX.sub('', namestr) while True: namestr = namestr.strip('\' \n') if ',' not in namestr: break for nds2type in io_nds2.Nds2ChannelType.names() + ['']: if nds2type and ',%s' % nds2type in namestr: try: channel, ctype, namestr = namestr.split(',', 2) except ValueError: channel, ctype = namestr.split(',') namestr = '' out.append('%s,%s' % (channel, ctype)) break elif nds2type == '' and ',' in namestr: channel, namestr = namestr.split(',', 1) out.append(channel) break if namestr: out.append(namestr) return out
def _split_names(namestr)
Split a comma-separated list of channel names.
3.755205
3.513886
1.068676
for i, chan in enumerate(self): if name == chan.name: return i raise ValueError(name)
def find(self, name)
Find the `Channel` with a specific name in this `ChannelList`. Parameters ---------- name : `str` name of the `Channel` to find Returns ------- index : `int` the position of the first `Channel` in this `ChannelList` whose `~Channel.name` matches the search key. Raises ------ ValueError if no matching `Channel` is found.
5.030659
4.271799
1.177644
# format name regex if isinstance(name, Pattern): flags = name.flags name = name.pattern else: flags = 0 if exact_match: name = name if name.startswith(r'\A') else r"\A%s" % name name = name if name.endswith(r'\Z') else r"%s\Z" % name name_regexp = re.compile(name, flags=flags) matched = list(self) if name is not None: matched = [entry for entry in matched if name_regexp.search(entry.name) is not None] if sample_rate is not None: sample_rate = (sample_rate.value if isinstance(sample_rate, units.Quantity) else float(sample_rate)) matched = [entry for entry in matched if entry.sample_rate and entry.sample_rate.value == sample_rate] if sample_range is not None: matched = [entry for entry in matched if sample_range[0] <= entry.sample_rate.value <= sample_range[1]] for attr, val in others.items(): if val is not None: matched = [entry for entry in matched if (hasattr(entry, attr) and getattr(entry, attr) == val)] return self.__class__(matched)
def sieve(self, name=None, sample_rate=None, sample_range=None, exact_match=False, **others)
Find all `Channels <Channel>` in this list matching the specified criteria. Parameters ---------- name : `str`, or regular expression any part of the channel name against which to match (or full name if `exact_match=False` is given) sample_rate : `float` rate (number of samples per second) to match exactly sample_range : 2-`tuple` `[low, high]` closed interval or rates to match within exact_match : `bool` return channels matching `name` exactly, default: `False` Returns ------- new : `ChannelList` a new `ChannelList` containing the matching channels
2.212012
2.252137
0.982184
from .io import cis return cis.query(name, use_kerberos=use_kerberos, debug=debug)
def query(cls, name, use_kerberos=None, debug=False)
Query the LIGO Channel Information System a `ChannelList`. Parameters ---------- name : `str` name of channel, or part of it. use_kerberos : `bool`, optional use an existing Kerberos ticket as the authentication credential, default behaviour will check for credentials and request username and password if none are found (`None`) debug : `bool`, optional print verbose HTTP connection status for debugging, default: `False` Returns ------- channels : `ChannelList` a new list containing all `Channels <Channel>` found.
5.364487
7.306065
0.734251
ndschannels = io_nds2.find_channels(names, host=host, port=port, connection=connection, type=type, unique=unique) return cls(map(Channel.from_nds2, ndschannels))
def query_nds2(cls, names, host=None, port=None, connection=None, type=io_nds2.Nds2ChannelType.any(), unique=False)
Query an NDS server for channel information Parameters ---------- name : `str` name of requested channel host : `str`, optional name of NDS2 server. port : `int`, optional port number for NDS2 connection connection : `nds2.connection` open connection to use for query type : `str`, `int` NDS2 channel type with which to restrict query unique : `bool`, optional require a unique query result for each name given, default `False` Returns ------- channellist : `ChannelList` list of `Channels <Channel>` with metadata retrieved from NDS2 server Raises ------ ValueError if multiple channels are found for a given name and `unique=True` is given Notes ----- .. warning:: A `host` is required if an open `connection` is not given
2.866861
4.805436
0.596587
start = int(to_gps(start)) end = int(ceil(to_gps(end))) chans = io_nds2.find_channels(channels, connection=connection, unique=True, epoch=(start, end), type=ctype) availability = io_nds2.get_availability(chans, start, end, connection=connection) return type(availability)(zip(channels, availability.values()))
def query_nds2_availability(cls, channels, start, end, ctype=126, connection=None, host=None, port=None)
Query for when data are available for these channels in NDS2 Parameters ---------- channels : `list` list of `Channel` or `str` for which to search start : `int` GPS start time of search, or any acceptable input to :meth:`~gwpy.time.to_gps` end : `int` GPS end time of search, or any acceptable input to :meth:`~gwpy.time.to_gps` connection : `nds2.connection`, optional open connection to an NDS(2) server, if not given, one will be created based on ``host`` and ``port`` keywords host : `str`, optional name of NDS server host port : `int`, optional port number for NDS connection Returns ------- segdict : `~gwpy.segments.SegmentListDict` dict of ``(name, SegmentList)`` pairs
5.085903
5.891138
0.863314
from sqlalchemy.engine import create_engine from sqlalchemy.exc import ProgrammingError # connect if needed if engine is None: conn_kw = {} for key in ('db', 'host', 'user', 'passwd'): try: conn_kw[key] = kwargs.pop(key) except KeyError: pass engine = create_engine(get_connection_str(**conn_kw)) try: return GravitySpyTable(fetch(engine, tablename, **kwargs)) except ProgrammingError as exc: if 'relation "%s" does not exist' % tablename in str(exc): msg = exc.args[0] msg = msg.replace( 'does not exist', 'does not exist, the following tablenames are ' 'acceptable:\n %s\n' % '\n '.join(engine.table_names())) exc.args = (msg,) raise
def get_gravityspy_triggers(tablename, engine=None, **kwargs)
Fetch data into an `GravitySpyTable` Parameters ---------- table : `str`, The name of table you are attempting to receive triggers from. selection other filters you would like to supply underlying reader method for the given format .. note:: For now it will attempt to automatically connect you to a specific DB. In the future, this may be an input argument. Returns ------- table : `GravitySpyTable`
3.196382
3.16039
1.011388
if (not user) or (not passwd): user = os.getenv('GRAVITYSPY_DATABASE_USER', None) passwd = os.getenv('GRAVITYSPY_DATABASE_PASSWD', None) if (not user) or (not passwd): raise ValueError('Remember to either pass ' 'or export GRAVITYSPY_DATABASE_USER ' 'and export GRAVITYSPY_DATABASE_PASSWD in order ' 'to access the Gravity Spy Data: ' 'https://secrets.ligo.org/secrets/144/' ' description is username and secret is password.') return 'postgresql://{0}:{1}@{2}:5432/{3}'.format(user, passwd, host, db)
def get_connection_str(db='gravityspy', host='gravityspy.ciera.northwestern.edu', user=None, passwd=None)
Create string to pass to create_engine Parameters ---------- db : `str`, default: ``gravityspy`` The name of the SQL database your connecting to. host : `str`, default: ``gravityspy.ciera.northwestern.edu`` The name of the server the database you are connecting to lives on. user : `str`, default: `None` Your username for authentication to this database. passwd : `str`, default: `None` Your password for authentication to this database. .. note:: `user` and `passwd` should be given together, otherwise they will be ignored and values will be resolved from the ``GRAVITYSPY_DATABASE_USER`` and ``GRAVITYSPY_DATABASE_PASSWD`` environment variables. Returns ------- conn_string : `str` A SQLAlchemy engine compliant connection string
3.770517
4.128802
0.913223
import pytz dt = dt or datetime.datetime.now() offset = pytz.timezone(get_timezone(ifo)).utcoffset(dt) return offset.days * 86400 + offset.seconds + offset.microseconds * 1e-6
def get_timezone_offset(ifo, dt=None)
Return the offset in seconds between UTC and the given interferometer Parameters ---------- ifo : `str` prefix of interferometer, e.g. ``'X1'`` dt : `datetime.datetime`, optional the time at which to calculate the offset, defaults to now Returns ------- offset : `int` the offset in seconds between the timezone of the interferometer and UTC
2.310876
2.870042
0.805172
# parse keywords if kwargs is None: kwargs = dict() samp = series.sample_rate fftlength = kwargs.pop('fftlength', None) or series.duration overlap = kwargs.pop('overlap', None) window = kwargs.pop('window', None) # parse function library and name if func is None: method = library = None else: method = func.__name__ library = _fft_library(func) # fftlength -> nfft nfft = seconds_to_samples(fftlength, samp) # overlap -> noverlap noverlap = _normalize_overlap(overlap, window, nfft, samp, method=method) # create window window = _normalize_window(window, nfft, library, series.dtype) if window is not None: # allow FFT methods to use their own defaults kwargs['window'] = window # create FFT plan for LAL if library == 'lal' and kwargs.get('plan', None) is None: from ._lal import generate_fft_plan kwargs['plan'] = generate_fft_plan(nfft, dtype=series.dtype) kwargs.update({ 'nfft': nfft, 'noverlap': noverlap, }) return kwargs
def normalize_fft_params(series, kwargs=None, func=None)
Normalize a set of FFT parameters for processing This method reads the ``fftlength`` and ``overlap`` keyword arguments (presumed to be values in seconds), works out sensible defaults, then updates ``kwargs`` in place to include ``nfft`` and ``noverlap`` as values in sample counts. If a ``window`` is given, the ``noverlap`` parameter will be set to the recommended overlap for that window type, if ``overlap`` is not given. If a ``window`` is given as a `str`, it will be converted to a `numpy.ndarray` containing the correct window (of the correct length). Parameters ---------- series : `gwpy.timeseries.TimeSeries` the data that will be processed using an FFT-based method kwargs : `dict` the dict of keyword arguments passed by the user func : `callable`, optional the FFT method that will be called Examples -------- >>> from numpy.random import normal >>> from gwpy.timeseries import TimeSeries >>> normalize_fft_params(TimeSeries(normal(size=1024), sample_rate=256)) {'nfft': 1024, 'noverlap': 0} >>> normalize_fft_params(TimeSeries(normal(size=1024), sample_rate=256), ... {'window': 'hann'}) {'window': array([ 0.00000000e+00, 9.41235870e-06, ..., 3.76490804e-05, 9.41235870e-06]), 'noverlap': 0, 'nfft': 1024}
3.956865
4.028536
0.982209
if method == 'bartlett': return 0 if overlap is None and isinstance(window, string_types): return recommended_overlap(window, nfft) if overlap is None: return 0 return seconds_to_samples(overlap, samp)
def _normalize_overlap(overlap, window, nfft, samp, method='welch')
Normalise an overlap in physical units to a number of samples Parameters ---------- overlap : `float`, `Quantity`, `None` the overlap in some physical unit (seconds) window : `str` the name of the window function that will be used, only used if `overlap=None` is given nfft : `int` the number of samples that will be used in the fast Fourier transform samp : `Quantity` the sampling rate (Hz) of the data that will be transformed method : `str` the name of the averaging method, default: `'welch'`, only used to return `0` for `'bartlett'` averaging Returns ------- noverlap : `int` the number of samples to be be used for the overlap
5.085263
5.281792
0.962791
if library == '_lal' and isinstance(window, numpy.ndarray): from ._lal import window_from_array return window_from_array(window) if library == '_lal': from ._lal import generate_window return generate_window(nfft, window=window, dtype=dtype) if isinstance(window, string_types): window = canonical_name(window) if isinstance(window, string_types + (tuple,)): return get_window(window, nfft) return None
def _normalize_window(window, nfft, library, dtype)
Normalise a window specification for a PSD calculation Parameters ---------- window : `str`, `numpy.ndarray`, `None` the input window specification nfft : `int` the length of the Fourier transform, in samples library : `str` the name of the library that provides the PSD routine dtype : `type` the required type of the window array, only used if `library='lal'` is given Returns ------- window : `numpy.ndarray`, `lal.REAL8Window` a numpy-, or `LAL`-format window array
3.45319
3.806815
0.907107
@wraps(func) def wrapped_func(series, method_func, *args, **kwargs): if isinstance(series, tuple): data = series[0] else: data = series # normalise FFT parmeters for all libraries normalize_fft_params(data, kwargs=kwargs, func=method_func) return func(series, method_func, *args, **kwargs) return wrapped_func
def set_fft_params(func)
Decorate a method to automatically convert quantities to samples
4.592635
4.694556
0.97829
# decorator has translated the arguments for us, so just call psdn() return _psdn(timeseries, method_func, *args, **kwargs)
def psd(timeseries, method_func, *args, **kwargs)
Generate a PSD using a method function All arguments are presumed to be given in physical units Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries`, `tuple` the data to process, or a 2-tuple of series to correlate method_func : `callable` the function that will be called to perform the signal processing *args, **kwargs other arguments to pass to ``method_func`` when calling
12.612483
21.379608
0.589931
# unpack tuple of timeseries for cross spectrum try: timeseries, other = timeseries # or just calculate PSD except ValueError: return method_func(timeseries, kwargs.pop('nfft'), *args, **kwargs) else: return method_func(timeseries, other, kwargs.pop('nfft'), *args, **kwargs)
def _psdn(timeseries, method_func, *args, **kwargs)
Generate a PSD using a method function with FFT arguments in samples All arguments are presumed to be in sample counts, not physical units Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries`, `tuple` the data to process, or a 2-tuple of series to correlate method_func : `callable` the function that will be called to perform the signal processing *args, **kwargs other arguments to pass to ``method_func`` when calling
5.296835
5.763454
0.919038
# unpack CSD TimeSeries pair, or single timeseries try: timeseries, other = timeseries except ValueError: timeseries = timeseries other = None from ...spectrogram import Spectrogram nproc = kwargs.pop('nproc', 1) # get params epoch = timeseries.t0.value nstride = seconds_to_samples(stride, timeseries.sample_rate) kwargs['fftlength'] = kwargs.pop('fftlength', stride) or stride normalize_fft_params(timeseries, kwargs=kwargs, func=method_func) nfft = kwargs['nfft'] noverlap = kwargs['noverlap'] # sanity check parameters if nstride > timeseries.size: raise ValueError("stride cannot be greater than the duration of " "this TimeSeries") if nfft > nstride: raise ValueError("fftlength cannot be greater than stride") if noverlap >= nfft: raise ValueError("overlap must be less than fftlength") # set up single process Spectrogram method def _psd(series): psd_ = _psdn(series, method_func, *args, **kwargs) del psd_.epoch # fixes Segmentation fault (no idea why it faults) return psd_ # define chunks tschunks = _chunk_timeseries(timeseries, nstride, noverlap) if other is not None: otherchunks = _chunk_timeseries(other, nstride, noverlap) tschunks = zip(tschunks, otherchunks) # calculate PSDs psds = mp_utils.multiprocess_with_queues(nproc, _psd, tschunks) # recombobulate PSDs into a spectrogram return Spectrogram.from_spectra(*psds, epoch=epoch, dt=stride)
def average_spectrogram(timeseries, method_func, stride, *args, **kwargs)
Generate an average spectrogram using a method function Each time bin of the resulting spectrogram is a PSD generated using the method_func
5.196319
5.235093
0.992593
from ...spectrogram import Spectrogram # get params sampling = timeseries.sample_rate.to('Hz').value nproc = kwargs.pop('nproc', 1) nfft = kwargs.pop('nfft') noverlap = kwargs.pop('noverlap') nstride = nfft - noverlap # sanity check parameters if noverlap >= nfft: raise ValueError("overlap must be less than fftlength") # set up single process Spectrogram method def _psd(series): return method_func(series, nfft=nfft, **kwargs)[1] # define chunks chunks = [] x = 0 while x + nfft <= timeseries.size: y = min(timeseries.size, x + nfft) chunks.append((x, y)) x += nstride tschunks = (timeseries.value[i:j] for i, j in chunks) # calculate PSDs with multiprocessing psds = mp_utils.multiprocess_with_queues(nproc, _psd, tschunks) # convert PSDs to array with spacing for averages numtimes = 1 + int((timeseries.size - nstride) / nstride) numfreqs = int(nfft / 2 + 1) data = numpy.zeros((numtimes, numfreqs), dtype=timeseries.dtype) data[:len(psds)] = psds # create output spectrogram unit = fft_utils.scale_timeseries_unit( timeseries.unit, scaling=kwargs.get('scaling', 'density')) out = Spectrogram(numpy.empty((numtimes, numfreqs), dtype=timeseries.dtype), copy=False, dt=nstride * timeseries.dt, t0=timeseries.t0, f0=0, df=sampling/nfft, unit=unit, name=timeseries.name, channel=timeseries.channel) # normalize over-dense grid density = nfft // nstride weights = get_window('triangle', density) for i in range(numtimes): # get indices of overlapping columns x = max(0, i+1-density) y = min(i+1, numtimes-density+1) if x == 0: wgt = weights[-y:] elif y == numtimes - density + 1: wgt = weights[:y-x] else: wgt = weights # calculate weighted average out.value[i, :] = numpy.average(data[x:y], axis=0, weights=wgt) return out
def spectrogram(timeseries, method_func, **kwargs)
Generate a spectrogram using a method function Each time bin of the resulting spectrogram is a PSD estimate using a single FFT
4.183105
4.227568
0.989482
re_name_def = re.compile(r'^\s*%\s+(?P<colname>\w+)') self.names = [] for line in lines: if not line: # ignore empty lines in header (windows) continue if not line.startswith('%'): # end of header lines break match = re_name_def.search(line) if match: self.names.append(match.group('colname')) if not self.names: raise core.InconsistentTableError( 'No column names found in Omega header') self.cols = [] # pylint: disable=attribute-defined-outside-init for name in self.names: col = core.Column(name=name) self.cols.append(col)
def get_cols(self, lines)
Initialize Column objects from a multi-line ASCII header Parameters ---------- lines : `list` List of table lines
3.333544
3.301822
1.009608
if self.args.norm: return 'Normalized to {}'.format(self.args.norm) if len(self.units) == 1 and self.usetex: return r'ASD $\left({0}\right)$'.format( self.units[0].to_string('latex').strip('$')) elif len(self.units) == 1: return 'ASD ({0})'.format(self.units[0].to_string('generic')) return super(Spectrogram, self).get_color_label()
def get_color_label(self)
Text for colorbar label
4.609358
4.168011
1.105889
fftlength = float(self.args.secpfft) overlap = fftlength * self.args.overlap stride = fftlength - overlap nfft = self.duration / stride # number of FFTs ffps = int(nfft / (self.width * 0.8)) # FFTs per second if ffps > 3: return max(2 * fftlength, ffps * stride + fftlength - 1) return None
def get_stride(self)
Calculate the stride for the spectrogram This method returns the stride as a `float`, or `None` to indicate selected usage of `TimeSeries.spectrogram2`.
8.457479
8.691771
0.973044
args = self.args fftlength = float(args.secpfft) overlap = fftlength * args.overlap self.log(2, "Calculating spectrogram secpfft: %s, overlap: %s" % (fftlength, overlap)) stride = self.get_stride() if stride: specgram = self.timeseries[0].spectrogram( stride, fftlength=fftlength, overlap=overlap, window=args.window) nfft = stride * (stride // (fftlength - overlap)) self.log(3, 'Spectrogram calc, stride: %s, fftlength: %s, ' 'overlap: %sf, #fft: %d' % (stride, fftlength, overlap, nfft)) else: specgram = self.timeseries[0].spectrogram2( fftlength=fftlength, overlap=overlap, window=args.window) nfft = specgram.shape[0] self.log(3, 'HR-Spectrogram calc, fftlength: %s, overlap: %s, ' '#fft: %d' % (fftlength, overlap, nfft)) return specgram ** (1/2.)
def get_spectrogram(self)
Calculate the spectrogram to be plotted This exists as a separate method to allow subclasses to override this and not the entire `get_plot` method, e.g. `Coherencegram`. This method should not apply the normalisation from `args.norm`.
3.833516
3.671339
1.044174
args = self.args # constant input causes unhelpful (weird) error messages # translate them to English inmin = self.timeseries[0].min().value if inmin == self.timeseries[0].max().value: if not self.got_error: self.log(0, 'ERROR: Input has constant values [{:g}]. ' 'Spectrogram-like products cannot process ' 'them.'.format(inmin)) self.got_error = True else: # create 'raw' spectrogram specgram = self.get_spectrogram() # there may be data that can't be processed if specgram is not None: # <-DMM: why is this here? # apply normalisation if args.norm: specgram = specgram.ratio(args.norm) self.result = specgram # -- update plot defaults if not args.ymin: args.ymin = 1/args.secpfft if args.yscale == 'log' else 0 norm = 'log' if args.color_scale == 'log' else None # vmin/vmax set in scale_axes_from_data() return specgram.plot(figsize=self.figsize, dpi=self.dpi, norm=norm, cmap=args.cmap)
def make_plot(self)
Generate the plot from time series and arguments
9.551649
9.230311
1.034813
if len(self.units) == 1: return r'ASD $\left({0}\right)$'.format( self.units[0].to_string('latex').strip('$')) return 'ASD'
def get_ylabel(self)
Text for y-axis label
5.87254
5.668336
1.036025
args = self.args fftlength = float(args.secpfft) overlap = args.overlap self.log(2, "Calculating spectrum secpfft: {0}, overlap: {1}".format( fftlength, overlap)) overlap *= fftlength # create plot plot = Plot(figsize=self.figsize, dpi=self.dpi) ax = plot.gca() # handle user specified plot labels if self.args.legend: nlegargs = len(self.args.legend[0]) else: nlegargs = 0 if nlegargs > 0 and nlegargs != self.n_datasets: warnings.warn('The number of legends specified must match ' 'the number of time series' ' (channels * start times). ' 'There are {:d} series and {:d} legends'.format( len(self.timeseries), len(self.args.legend))) nlegargs = 0 # don't use themm for i in range(0, self.n_datasets): series = self.timeseries[i] if nlegargs: label = self.args.legend[0][i] else: label = series.channel.name if len(self.start_list) > 1: label += ', {0}'.format(series.epoch.gps) asd = series.asd(fftlength=fftlength, overlap=overlap) self.spectra.append(asd) if self.usetex: label = label_to_latex(label) ax.plot(asd, label=label) if args.xscale == 'log' and not args.xmin: args.xmin = 1/fftlength return plot
def make_plot(self)
Generate the plot from time series and arguments
4.933257
4.807483
1.026162
# get tight limits for X-axis if self.args.xmin is None: self.args.xmin = min(fs.xspan[0] for fs in self.spectra) if self.args.xmax is None: self.args.xmax = max(fs.xspan[1] for fs in self.spectra) # autoscale view for Y-axis cropped = [fs.crop(self.args.xmin, self.args.xmax) for fs in self.spectra] ymin = min(fs.value.min() for fs in cropped) ymax = max(fs.value.max() for fs in cropped) self.plot.gca().yaxis.set_data_interval(ymin, ymax, ignore=True) self.plot.gca().autoscale_view(scalex=False)
def scale_axes_from_data(self)
Restrict data limits for Y-axis based on what you can see
3.13328
2.982123
1.050687
# if reading a cache, read it now and sieve if io_cache.is_cache(source): from .cache import preformat_cache source = preformat_cache(source, *args[1:], start=kwargs.get('start'), end=kwargs.get('end')) # get join arguments pad = kwargs.pop('pad', None) gap = kwargs.pop('gap', 'raise' if pad is None else 'pad') joiner = _join_factory(cls, gap, pad) # read return io_read_multi(joiner, cls, source, *args, **kwargs)
def read(cls, source, *args, **kwargs)
Read data from a source into a `gwpy.timeseries` object. This method is just the internal worker for `TimeSeries.read`, and `TimeSeriesDict.read`, and isn't meant to be called directly.
7.040054
7.161863
0.982992
if issubclass(cls, dict): def _join(data): out = cls() data = list(data) while data: tsd = data.pop(0) out.append(tsd, gap=gap, pad=pad) del tsd return out else: from .. import TimeSeriesBaseList def _join(arrays): list_ = TimeSeriesBaseList(*arrays) return list_.join(pad=pad, gap=gap) return _join
def _join_factory(cls, gap, pad)
Build a joiner for the given cls, and the given padding options
4.494005
4.503016
0.997999
# find group if isinstance(source, h5py.File): source, ifo = _find_table_group(source, ifo=ifo) # -- by this point 'source' is guaranteed to be an h5py.Group # parse default columns if columns is None: columns = list(_get_columns(source)) readcols = set(columns) # parse selections selection = parse_column_filters(selection or []) if selection: readcols.update(list(zip(*selection))[0]) # set up meta dict meta = {'ifo': ifo} meta.update(source.attrs) if extended_metadata: meta.update(_get_extended_metadata(source)) if loudest: loudidx = source['loudest'][:] # map data to columns data = [] for name in readcols: # convert hdf5 dataset into Column try: arr = source[name][:] except KeyError: if name in GET_COLUMN: arr = GET_COLUMN[name](source) else: raise if loudest: arr = arr[loudidx] data.append(Table.Column(arr, name=name)) # read, applying selection filters, and column filters return filter_table(Table(data, meta=meta), selection)[columns]
def table_from_file(source, ifo=None, columns=None, selection=None, loudest=False, extended_metadata=True)
Read a `Table` from a PyCBC live HDF5 file Parameters ---------- source : `str`, `h5py.File`, `h5py.Group` the file path of open `h5py` object from which to read the data ifo : `str`, optional the interferometer prefix (e.g. ``'G1'``) to read; this is required if reading from a file path or `h5py.File` and the containing file stores data for multiple interferometers columns : `list` or `str`, optional the list of column names to read, defaults to all in group loudest : `bool`, optional read only those events marked as 'loudest', default: `False` (read all) extended_metadata : `bool`, optional record non-column datasets found in the H5 group (e.g. ``'psd'``) in the ``meta`` dict, default: `True` Returns ------- table : `~gwpy.table.EventTable`
4.292508
4.349437
0.986911
exclude = ('background',) if ifo is None: try: ifo, = [key for key in h5file if key not in exclude] except ValueError as exc: exc.args = ("PyCBC live HDF5 file contains dataset groups " "for multiple interferometers, please specify " "the prefix of the relevant interferometer via " "the `ifo` keyword argument, e.g: `ifo=G1`",) raise try: return h5file[ifo], ifo except KeyError as exc: exc.args = ("No group for ifo %r in PyCBC live HDF5 file" % ifo,) raise
def _find_table_group(h5file, ifo=None)
Find the right `h5py.Group` within the given `h5py.File`
4.938243
4.845187
1.019206
columns = set() for name in sorted(h5group): if (not isinstance(h5group[name], h5py.Dataset) or name == 'template_boundaries'): continue if name.endswith('_template') and name[:-9] in columns: continue columns.add(name) return columns - META_COLUMNS
def _get_columns(h5group)
Find valid column names from a PyCBC HDF5 Group Returns a `set` of names.
4.334207
4.621003
0.937936
meta = dict() # get PSD try: psd = h5group['psd'] except KeyError: pass else: from gwpy.frequencyseries import FrequencySeries meta['psd'] = FrequencySeries( psd[:], f0=0, df=psd.attrs['delta_f'], name='pycbc_live') # get everything else for key in META_COLUMNS - {'psd'}: try: value = h5group[key][:] except KeyError: pass else: meta[key] = value return meta
def _get_extended_metadata(h5group)
Extract the extended metadata for a PyCBC table in HDF5 This method packs non-table-column datasets in the given h5group into a metadata `dict` Returns ------- meta : `dict` the metadata dict
4.121046
4.357939
0.945641
return type(files)([f for f in files if not empty_hdf5_file(f, ifo=ifo)])
def filter_empty_files(files, ifo=None)
Remove empty PyCBC-HDF5 files from a list Parameters ---------- files : `list` A list of file paths to test. ifo : `str`, optional prefix for the interferometer of interest (e.g. ``'L1'``), include this for a more robust test of 'emptiness' Returns ------- nonempty : `list` the subset of the input ``files`` that are considered not empty See also -------- empty_hdf5_file for details of the 'emptiness' test
5.786528
7.676573
0.753791
# the decorator opens the HDF5 file for us, so h5f is guaranteed to # be an h5py.Group object h5f = h5f.file if list(h5f) == []: return True if ifo is not None and (ifo not in h5f or list(h5f[ifo]) == ['psd']): return True return False
def empty_hdf5_file(h5f, ifo=None)
Determine whether PyCBC-HDF5 file is empty A file is considered empty if it contains no groups at the base level, or if the ``ifo`` group contains only the ``psd`` dataset. Parameters ---------- h5f : `str` path of the pycbc_live file to test ifo : `str`, optional prefix for the interferometer of interest (e.g. ``'L1'``), include this for a more robust test of 'emptiness' Returns ------- empty : `bool` `True` if the file looks to have no content, otherwise `False`
5.417045
5.665971
0.956066
if identify_hdf5(origin, filepath, fileobj, *args, **kwargs) and ( filepath is not None and PYCBC_FILENAME.match(basename(filepath))): return True return False
def identify_pycbc_live(origin, filepath, fileobj, *args, **kwargs)
Identify a PyCBC Live file as an HDF5 with the correct name
5.259977
4.641081
1.133352
def get_new_snr(h5group, q=6., n=2.): # pylint: disable=invalid-name newsnr = h5group['snr'][:].copy() rchisq = h5group['chisq'][:] idx = numpy.where(rchisq > 1.)[0] newsnr[idx] *= _new_snr_scale(rchisq[idx], q=q, n=n) return newsnr
Calculate the 'new SNR' column for this PyCBC HDF5 table group
null
null
null
mass1 = h5group['mass1'][:] mass2 = h5group['mass2'][:] return (mass1 * mass2) ** (3/5.) / (mass1 + mass2) ** (1/5.)
def get_mchirp(h5group)
Calculate the chipr mass column for this PyCBC HDF5 table group
3.079647
2.983162
1.032343
if not isinstance(item, tuple): item = (item,) return item[:ndim] + (None,) * (ndim - len(item))
def format_nd_slice(item, ndim)
Preformat a getitem argument as an N-tuple
2.740594
2.706557
1.012576
slice_ = as_slice(slice_) # attribute names index = '{}index'.format origin = '{}0'.format delta = 'd{}'.format # if array has an index set already, use it if hasattr(old, '_{}index'.format(oldaxis)): setattr(new, index(newaxis), getattr(old, index(oldaxis))[slice_]) # otherwise if using a slice, use origin and delta properties elif isinstance(slice_, slice) or not numpy.sum(slice_): if isinstance(slice_, slice): offset = slice_.start or 0 step = slice_.step or 1 else: # empty ndarray slice (so just set attributes) offset = 0 step = 1 dx = getattr(old, delta(oldaxis)) x0 = getattr(old, origin(oldaxis)) # set new.x0 / new.y0 setattr(new, origin(newaxis), x0 + offset * dx) # set new.dx / new.dy setattr(new, delta(newaxis), dx * step) # otherwise slice with an index array else: setattr(new, index(newaxis), getattr(old, index(oldaxis))[slice_]) return new
def slice_axis_attributes(old, oldaxis, new, newaxis, slice_)
Set axis metadata for ``new`` by slicing an axis of ``old`` This is primarily for internal use in slice functions (__getitem__) Parameters ---------- old : `Array` array being sliced oldaxis : ``'x'`` or ``'y'`` the axis to slice new : `Array` product of slice newaxis : ``'x'`` or ``'y'`` the target axis slice_ : `slice`, `numpy.ndarray` the slice to apply to old (or an index array) See Also -------- Series.__getitem__ Array2D.__getitem__
3.828182
4.261489
0.89832
try: slice_ = as_slice(slice_) except TypeError: return False if isinstance(slice_, numpy.ndarray) and numpy.all(slice_): return True if isinstance(slice_, slice) and slice_ in ( slice(None, None, None), slice(0, None, 1) ): return True
def null_slice(slice_)
Returns True if a slice will have no affect
3.131219
2.991912
1.046561
if isinstance(slice_, (Integral, numpy.integer, type(None))): return slice(0, None, 1) if isinstance(slice_, (slice, numpy.ndarray)): return slice_ if isinstance(slice_, (list, tuple)): return tuple(map(as_slice, slice_)) raise TypeError("Cannot format {!r} as slice".format(slice_))
def as_slice(slice_)
Convert an object to a slice, if possible
3.145234
3.069124
1.024798
url = '%s/?q=%s' % (CIS_API_URL, name) more = True out = ChannelList() while more: reply = _get(url, use_kerberos=use_kerberos, debug=debug) try: out.extend(map(parse_json, reply[u'results'])) except KeyError: pass except TypeError: # reply is a list out.extend(map(parse_json, reply)) break more = 'next' in reply and reply['next'] is not None if more: url = reply['next'] else: break out.sort(key=lambda c: c.name) return out
def query(name, use_kerberos=None, debug=False)
Query the Channel Information System for details on the given channel name Parameters ---------- name : `~gwpy.detector.Channel`, or `str` Name of the channel of interest Returns ------- channel : `~gwpy.detector.Channel` Channel with all details as acquired from the CIS
3.639685
3.743507
0.972266
from ligo.org import request # perform query try: response = request(url, debug=debug, use_kerberos=use_kerberos) except HTTPError: raise ValueError("Channel not found at URL %s " "Information System. Please double check the " "name and try again." % url) if isinstance(response, bytes): response = response.decode('utf-8') return json.loads(response)
def _get(url, use_kerberos=None, debug=False)
Perform a GET query against the CIS
6.245858
6.224207
1.003478
sample_rate = data['datarate'] unit = data['units'] dtype = CIS_DATA_TYPE[data['datatype']] model = data['source'] url = data['displayurl'] return Channel(data['name'], sample_rate=sample_rate, unit=unit, dtype=dtype, model=model, url=url)
def parse_json(data)
Parse the input data dict into a `Channel`. Parameters ---------- data : `dict` input data from CIS json query Returns ------- c : `Channel` a `Channel` built from the data
5.815801
5.078989
1.145071
units = self.units if len(units) == 1 and str(units[0]) == '': # dimensionless return '' if len(units) == 1 and self.usetex: return units[0].to_string('latex') elif len(units) == 1: return units[0].to_string() elif len(units) > 1: return 'Multiple units' return super(TimeSeries, self).get_ylabel()
def get_ylabel(self)
Text for y-axis label, check if channel defines it
3.566445
3.369139
1.058563
plot = Plot(figsize=self.figsize, dpi=self.dpi) ax = plot.gca(xscale='auto-gps') # handle user specified plot labels if self.args.legend: nlegargs = len(self.args.legend[0]) else: nlegargs = 0 if nlegargs > 0 and nlegargs != self.n_datasets: warnings.warn('The number of legends specified must match ' 'the number of time series' ' (channels * start times). ' 'There are {:d} series and {:d} legends'.format( len(self.timeseries), len(self.args.legend))) nlegargs = 0 # don't use them for i in range(0, self.n_datasets): series = self.timeseries[i] if nlegargs: label = self.args.legend[0][i] else: label = series.channel.name if self.usetex: label = label_to_latex(label) ax.plot(series, label=label) return plot
def make_plot(self)
Generate the plot from time series and arguments
4.467722
4.309464
1.036723
# get tight limits for X-axis if self.args.xmin is None: self.args.xmin = min(ts.xspan[0] for ts in self.timeseries) if self.args.xmax is None: self.args.xmax = max(ts.xspan[1] for ts in self.timeseries) # autoscale view for Y-axis cropped = [ts.crop(self.args.xmin, self.args.xmax) for ts in self.timeseries] ymin = min(ts.value.min() for ts in cropped) ymax = max(ts.value.max() for ts in cropped) self.plot.gca().yaxis.set_data_interval(ymin, ymax, ignore=True) self.plot.gca().autoscale_view(scalex=False)
def scale_axes_from_data(self)
Restrict data limits for Y-axis based on what you can see
3.099314
2.943221
1.053035
from ...utils.lal import (find_typed_function, to_lal_type_str) # generate key for caching plan laltype = to_lal_type_str(dtype) key = (length, bool(forward), laltype) # find existing plan try: return LAL_FFTPLANS[key] # or create one except KeyError: create = find_typed_function(dtype, 'Create', 'FFTPlan') if level is None: level = LAL_FFTPLAN_LEVEL LAL_FFTPLANS[key] = create(length, int(bool(forward)), level) return LAL_FFTPLANS[key]
def generate_fft_plan(length, level=None, dtype='float64', forward=True)
Build a `REAL8FFTPlan` for a fast Fourier transform. Parameters ---------- length : `int` number of samples to plan for in each FFT. level : `int`, optional amount of work to do when planning the FFT, default set by `LAL_FFTPLAN_LEVEL` module variable. dtype : :class:`numpy.dtype`, `type`, `str`, optional numeric type of data to plan for forward : bool, optional, default: `True` whether to create a forward or reverse FFT plan Returns ------- plan : `REAL8FFTPlan` or similar FFT plan of the relevant data type
4.383822
4.052071
1.081872
from ...utils.lal import (find_typed_function, to_lal_type_str) if window is None: window = ('kaiser', 24) # generate key for caching window laltype = to_lal_type_str(dtype) key = (length, str(window), laltype) # find existing window try: return LAL_WINDOWS[key] # or create one except KeyError: # parse window as name and arguments, e.g. ('kaiser', 24) if isinstance(window, (list, tuple)): window, beta = window else: beta = 0 window = canonical_name(window) # create window create = find_typed_function(dtype, 'CreateNamed', 'Window') LAL_WINDOWS[key] = create(window, beta, length) return LAL_WINDOWS[key]
def generate_window(length, window=None, dtype='float64')
Generate a time-domain window for use in a LAL FFT Parameters ---------- length : `int` length of window in samples. window : `str`, `tuple` name of window to generate, default: ``('kaiser', 24)``. Give `str` for simple windows, or tuple of ``(name, *args)`` for complicated windows dtype : :class:`numpy.dtype` numeric type of window, default `numpy.dtype(numpy.float64)` Returns ------- `window` : `REAL8Window` or similar time-domain window to use for FFT
4.955379
4.629022
1.070502
from ...utils.lal import (find_typed_function) dtype = array.dtype # create sequence seq = find_typed_function(dtype, 'Create', 'Sequence')(array.size) seq.data = array # create window from sequence return find_typed_function(dtype, 'Create', 'WindowFromSequence')(seq)
def window_from_array(array)
Convert a `numpy.ndarray` into a LAL `Window` object
7.332073
6.357369
1.153319
import lal from ...utils.lal import find_typed_function # default to 50% overlap if noverlap is None: noverlap = int(segmentlength // 2) stride = segmentlength - noverlap # get window if window is None: window = generate_window(segmentlength, dtype=timeseries.dtype) # get FFT plan if plan is None: plan = generate_fft_plan(segmentlength, dtype=timeseries.dtype) method = method.lower() # check data length size = timeseries.size numsegs = 1 + int((size - segmentlength) / stride) if method == 'median-mean' and numsegs % 2: numsegs -= 1 if not numsegs: raise ValueError("Cannot calculate median-mean spectrum with " "this small a TimeSeries.") required = int((numsegs - 1) * stride + segmentlength) if size != required: warnings.warn("Data array is the wrong size for the correct number " "of averages given the input parameters. The trailing " "%d samples will not be used in this calculation." % (size - required)) timeseries = timeseries[:required] # generate output spectrum create = find_typed_function(timeseries.dtype, 'Create', 'FrequencySeries') lalfs = create(timeseries.name, lal.LIGOTimeGPS(timeseries.epoch.gps), 0, 1 / segmentlength, lal.StrainUnit, int(segmentlength // 2 + 1)) # find LAL method (e.g. median-mean -> lal.REAL8AverageSpectrumMedianMean) methodname = ''.join(map(str.title, re.split('[-_]', method))) spec_func = find_typed_function(timeseries.dtype, '', 'AverageSpectrum{}'.format(methodname)) # calculate spectrum spec_func(lalfs, timeseries.to_lal(), segmentlength, stride, window, plan) # format and return spec = FrequencySeries.from_lal(lalfs) spec.name = timeseries.name spec.channel = timeseries.channel spec.override_unit(scale_timeseries_unit( timeseries.unit, scaling='density')) return spec
def _lal_spectrum(timeseries, segmentlength, noverlap=None, method='welch', window=None, plan=None)
Generate a PSD `FrequencySeries` using |lal|_ Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. method : `str` average PSD method noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `lal.REAL8Window`, optional window to apply to timeseries prior to FFT plan : `lal.REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries`
4.700131
4.37315
1.07477
return _lal_spectrum(timeseries, segmentlength, noverlap=noverlap, method='welch', window=window, plan=plan)
def welch(timeseries, segmentlength, noverlap=None, window=None, plan=None)
Calculate an PSD of this `TimeSeries` using Welch's method Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumWelch
4.01445
5.897809
0.680668
# pylint: disable=unused-argument return _lal_spectrum(timeseries, segmentlength, noverlap=0, method='welch', window=window, plan=plan)
def bartlett(timeseries, segmentlength, noverlap=None, window=None, plan=None)
Calculate an PSD of this `TimeSeries` using Bartlett's method Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumWelch
5.554236
9.257452
0.599975
return _lal_spectrum(timeseries, segmentlength, noverlap=noverlap, method='median', window=window, plan=plan)
def median(timeseries, segmentlength, noverlap=None, window=None, plan=None)
Calculate a PSD of this `TimeSeries` using a median average method The median average is similar to Welch's method, using a median average rather than mean. Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumMedian
4.615837
6.524323
0.707481
return _lal_spectrum(timeseries, segmentlength, noverlap=noverlap, method='median-mean', window=window, plan=plan)
def median_mean(timeseries, segmentlength, noverlap=None, window=None, plan=None)
Calculate a PSD of this `TimeSeries` using a median-mean average method The median-mean average method divides overlapping segments into "even" and "odd" segments, and computes the bin-by-bin median of the "even" segments and the "odd" segments, and then takes the bin-by-bin average of these two median averages. Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumMedianMean
4.247206
5.773282
0.735666
if isinstance(source, FILE_LIKE): source = source.name if isinstance(source, CacheEntry): source = source.path # read cache file if (isinstance(source, string_types) and source.endswith(('.lcf', '.cache'))): return lalframe.FrStreamCacheOpen(lal.CacheImport(source)) # read glue cache object if isinstance(source, list) and is_cache(source): cache = lal.Cache() for entry in file_list(source): cache = lal.CacheMerge(cache, lal.CacheGlob(*os.path.split(entry))) return lalframe.FrStreamCacheOpen(cache) # read lal cache object if isinstance(source, lal.Cache): return lalframe.FrStreamCacheOpen(source) # read single file if isinstance(source, string_types): return lalframe.FrStreamOpen(*map(str, os.path.split(source))) raise ValueError("Don't know how to open data source of type %r" % type(source))
def open_data_source(source)
Open a GWF file source into a `lalframe.XLALFrStream` object Parameters ---------- source : `str`, `file`, `list` Data source to read. Returns ------- stream : `lalframe.FrStream` An open `FrStream`. Raises ------ ValueError If the input format cannot be identified.
4.343031
3.872535
1.121496
epoch = lal.LIGOTimeGPS(stream.epoch.gpsSeconds, stream.epoch.gpsNanoSeconds) # loop over each file in the stream cache and query its duration nfile = stream.cache.length duration = 0 for dummy_i in range(nfile): for dummy_j in range(lalframe.FrFileQueryNFrame(stream.file)): duration += lalframe.FrFileQueryDt(stream.file, 0) lalframe.FrStreamNext(stream) # rewind stream and return lalframe.FrStreamSeek(stream, epoch) return duration
def get_stream_duration(stream)
Find the duration of time stored in a frame stream Parameters ---------- stream : `lal.FrStream` stream of data to search Returns ------- duration : `float` the duration (seconds) of the data for this channel
7.303968
7.234735
1.00957
# scaled must be provided to provide a consistent API with frameCPP if scaled is not None: warnings.warn( "the `scaled` keyword argument is not supported by lalframe, " "if you require ADC scaling, please install " "python-ldas-tools-framecpp", ) stream = open_data_source(source) # parse times and restrict to available data epoch = lal.LIGOTimeGPS(stream.epoch.gpsSeconds, stream.epoch.gpsNanoSeconds) streamdur = get_stream_duration(stream) if start is None: start = epoch else: start = max(epoch, lalutils.to_lal_ligotimegps(start)) if end is None: offset = float(start - epoch) duration = streamdur - offset else: end = min(epoch + streamdur, lalutils.to_lal_ligotimegps(end)) duration = float(end - start) # read data out = series_class.DictClass() for name in channels: out[name] = series_class.from_lal( _read_channel(stream, str(name), start=start, duration=duration), copy=False) lalframe.FrStreamSeek(stream, epoch) return out
def read(source, channels, start=None, end=None, series_class=TimeSeries, scaled=None)
Read data from one or more GWF files using the LALFrame API
6.445489
6.311217
1.021275
if not start: start = list(tsdict.values())[0].xspan[0] if not end: end = list(tsdict.values())[0].xspan[1] duration = end - start # get ifos list detectors = 0 for series in tsdict.values(): try: idx = list(lalutils.LAL_DETECTORS.keys()).index(series.channel.ifo) detectors |= 1 << 2*idx except (KeyError, AttributeError): continue # create new frame frame = lalframe.FrameNew(start, duration, name, run, 0, detectors) for series in tsdict.values(): # convert to LAL lalseries = series.to_lal() # find adder add_ = lalutils.find_typed_function( series.dtype, 'FrameAdd', 'TimeSeriesProcData', module=lalframe) # add time series to frame add_(frame, lalseries) # write frame lalframe.FrameWrite(frame, outfile)
def write(tsdict, outfile, start=None, end=None, name='gwpy', run=0)
Write data to a GWF file using the LALFrame API
5.103479
4.824402
1.057847
from ligo.lw.lsctables import (SegmentTable, SegmentDefTable, SegmentSumTable) from ligo.lw.ligolw import PartialLIGOLWContentHandler def _filter(name, attrs): return reduce( operator.or_, [table_.CheckProperties(name, attrs) for table_ in (SegmentTable, SegmentDefTable, SegmentSumTable)]) return build_content_handler(PartialLIGOLWContentHandler, _filter)
def segment_content_handler()
Build a `~xml.sax.handlers.ContentHandler` to read segment XML tables
9.586926
10.05608
0.953346
xmldoc = read_ligolw(source, contenthandler=segment_content_handler()) # parse tables with patch_ligotimegps(type(xmldoc.childNodes[0]).__module__): out = DataQualityDict.from_ligolw_tables( *xmldoc.childNodes, names=names, **kwargs ) # coalesce if coalesce: for flag in out: out[flag].coalesce() return out
def read_ligolw_dict(source, names=None, coalesce=False, **kwargs)
Read segments for the given flag from the LIGO_LW XML file. Parameters ---------- source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list` one (or more) open files or file paths, or LIGO_LW `Document` objects names : `list`, `None`, optional list of names to read or `None` to read all into a single `DataQualityFlag`. coalesce : `bool`, optional if `True`, coalesce all parsed `DataQualityFlag` objects before returning, default: `False` **kwargs other keywords are passed to :meth:`DataQualityDict.from_ligolw_tables` Returns ------- flagdict : `DataQualityDict` a new `DataQualityDict` of `DataQualityFlag` entries with ``active`` and ``known`` segments seeded from the XML tables in the given file ``fp``.
9.817524
7.654399
1.282599
name = [name] if name is not None else None return list(read_ligolw_dict(source, names=name, **kwargs).values())[0]
def read_ligolw_flag(source, name=None, **kwargs)
Read a single `DataQualityFlag` from a LIGO_LW XML file
4.590827
4.705293
0.975673
if isinstance(flags, DataQualityFlag): flags = DataQualityDict({flags.name: flags}) return write_tables( target, flags.to_ligolw_tables(ilwdchar_compat=ilwdchar_compat, **attrs or dict()), **kwargs )
def write_ligolw(flags, target, attrs=None, ilwdchar_compat=None, **kwargs)
Write this `DataQualityFlag` to the given LIGO_LW Document Parameters ---------- flags : `DataQualityFlag`, `DataQualityDict` `gwpy.segments` object to write target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into attrs : `dict`, optional extra attributes to write into segment tables **kwargs keyword arguments to use when writing See also -------- gwpy.io.ligolw.write_ligolw_tables for details of acceptable keyword arguments
6.341293
4.225077
1.500871
# set units if scaling == 'density': baseunit = units.Hertz elif scaling == 'spectrum': baseunit = units.dimensionless_unscaled else: raise ValueError("Unknown scaling: %r" % scaling) if tsunit: specunit = tsunit ** 2 / baseunit else: specunit = baseunit ** -1 return specunit
def scale_timeseries_unit(tsunit, scaling='density')
Scale the unit of a `TimeSeries` to match that of a `FrequencySeries` Parameters ---------- tsunit : `~astropy.units.UnitBase` input unit from `TimeSeries` scaling : `str` type of frequency series, either 'density' for a PSD, or 'spectrum' for a power spectrum. Returns ------- unit : `~astropy.units.Unit` unit to be applied to the resulting `FrequencySeries`.
3.955378
3.975545
0.994927
try: path = os.path.abspath(cachefile.name) except AttributeError: path = None for line in cachefile: try: yield _CacheEntry.parse(line, gpstype=LIGOTimeGPS) except ValueError: # virgo FFL format (seemingly) supports nested FFL files parts = line.split() if len(parts) == 3 and os.path.abspath(parts[0]) != path: with open(parts[0], 'r') as cache2: for entry in _iter_cache(cache2): yield entry else: raise
def _iter_cache(cachefile, gpstype=LIGOTimeGPS)
Internal method that yields a `_CacheEntry` for each line in the file This method supports reading LAL- and (nested) FFL-format cache files.
4.345787
3.360662
1.293134
# open file if not isinstance(cachefile, FILE_LIKE): with open(file_path(cachefile), 'r') as fobj: return read_cache(fobj, coltype=coltype, sort=sort, segment=segment) # read file cache = [x.path for x in _iter_cache(cachefile, gpstype=coltype)] # sieve and sort if segment: cache = sieve(cache, segment=segment) if sort: cache.sort(key=sort) # read simple paths return cache
def read_cache(cachefile, coltype=LIGOTimeGPS, sort=None, segment=None)
Read a LAL- or FFL-format cache file as a list of file paths Parameters ---------- cachefile : `str`, `file` Input file or file path to read. coltype : `LIGOTimeGPS`, `int`, optional Type for GPS times. sort : `callable`, optional A callable key function by which to sort the output list of file paths segment : `gwpy.segments.Segment`, optional A GPS `[start, stop)` interval, if given only files overlapping this interval will be returned. Returns ------- paths : `list` of `str` A list of file paths as read from the cache file.
4.768357
4.574959
1.042273
# open file if isinstance(fobj, string_types): with open(fobj, 'w') as fobj2: return write_cache(cache, fobj2, format=format) if format is None: formatter = str elif format.lower() == "lal": formatter = _format_entry_lal elif format.lower() == "ffl": formatter = _format_entry_ffl else: raise ValueError("Unrecognised cache format {!r}".format(format)) # write file for line in map(formatter, cache): try: print(line, file=fobj) except TypeError: # bytes-mode fobj.write("{}\n".format(line).encode("utf-8"))
def write_cache(cache, fobj, format=None)
Write a `list` of cache entries to a file Parameters ---------- cache : `list` of `str` The list of file paths to write fobj : `file`, `str` The open file object, or file path to write to. format : `str`, optional The format to write to, one of - `None` : format each entry using `str` - ``'lal'`` : write a LAL-format cache - ``'ffl'`` : write an FFL-format cache
3.175042
2.687367
1.181469
if isinstance(cache, string_types + FILE_LIKE): try: return bool(len(read_cache(cache))) except (TypeError, ValueError, UnicodeDecodeError, ImportError): # failed to parse cache return False if HAS_CACHE and isinstance(cache, Cache): return True if (isinstance(cache, (list, tuple)) and cache and all(map(is_cache_entry, cache))): return True return False
def is_cache(cache)
Returns `True` if ``cache`` is a readable cache file or object Parameters ---------- cache : `str`, `file`, `list` Object to detect as cache Returns ------- iscache : `bool` `True` if the input object is a cache, or a file in LAL cache format, otherwise `False`
4.645636
4.888706
0.950279
if HAS_CACHEENTRY and isinstance(path, CacheEntry): return True try: file_segment(path) except (ValueError, TypeError, AttributeError): return False return True
def is_cache_entry(path)
Returns `True` if ``path`` can be represented as a cache entry In practice this just tests whether the input is |LIGO-T050017|_ compliant. Parameters ---------- path : `str`, :class:`lal.utils.CacheEntry` The input to test Returns ------- isentry : `bool` `True` if ``path`` is an instance of `CacheEntry`, or can be parsed using |LIGO-T050017|_.
6.222993
7.643209
0.814186
from ..segments import Segment name = Path(filename).name try: obs, desc, start, dur = name.split('-') except ValueError as exc: exc.args = ('Failed to parse {!r} as LIGO-T050017-compatible ' 'filename'.format(name),) raise start = float(start) dur = dur.rsplit('.', 1)[0] while True: # recursively remove extension components try: dur = float(dur) except ValueError: if '.' not in dur: raise dur = dur.rsplit('.', 1)[0] else: break return obs, desc, Segment(start, start+dur)
def filename_metadata(filename)
Return metadata parsed from a filename following LIGO-T050017 This method is lenient with regards to integers in the GPS start time of the file, as opposed to `gwdatafind.utils.filename_metadata`, which is strict. Parameters ---------- filename : `str` the path name of a file Returns ------- obs : `str` the observatory metadata tag : `str` the file tag segment : `gwpy.segments.Segment` the GPS ``[float, float)`` interval for this file Notes ----- `LIGO-T050017 <https://dcc.ligo.org/LIGO-T050017>`__ declares a file naming convention that includes documenting the GPS start integer and integer duration of a file, see that document for more details. Examples -------- >>> from gwpy.io.cache import filename_metadata >>> filename_metadata("A-B-0-1.txt") ('A', 'B', Segment(0, 1)) >>> filename_metadata("A-B-0.456-1.345.txt") ("A", "B", Segment(0.456, 1.801))
5.272496
3.83671
1.374223
from ..segments import Segment try: # CacheEntry return Segment(filename.segment) except AttributeError: # file path (str) return filename_metadata(filename)[2]
def file_segment(filename)
Return the data segment for a filename following T050017 Parameters --------- filename : `str`, :class:`~lal.utils.CacheEntry` the path name of a file Returns ------- segment : `~gwpy.segments.Segment` the ``[start, stop)`` GPS segment covered by the given file Notes ----- |LIGO-T050017|_ declares a filenaming convention that includes documenting the GPS start integer and integer duration of a file, see that document for more details.
20.600485
12.98062
1.587019
from ..segments import SegmentList out = SegmentList() for cache in caches: out.extend(file_segment(e) for e in cache) return out.coalesce()
def cache_segments(*caches)
Returns the segments of data covered by entries in the cache(s). Parameters ---------- *caches : `list` One or more lists of file paths (`str` or :class:`~lal.utils.CacheEntry`). Returns ------- segments : `~gwpy.segments.SegmentList` A list of segments for when data should be available
7.603915
10.777946
0.705507
return list(OrderedDict.fromkeys(e for c in caches for e in c))
def flatten(*caches)
Flatten a nested list of cache entries Parameters ---------- *caches : `list` One or more lists of file paths (`str` or :class:`~lal.utils.CacheEntry`). Returns ------- flat : `list` A flat `list` containing the unique set of entries across each input.
6.699638
15.519653
0.431687
flat = flatten(*caches) for segment in cache_segments(flat): yield sieve(flat, segment=segment)
def find_contiguous(*caches)
Separate one or more cache entry lists into time-contiguous sub-lists Parameters ---------- *caches : `list` One or more lists of file paths (`str` or :class:`~lal.utils.CacheEntry`). Returns ------- caches : `iter` of `list` an interable yielding each contiguous cache
11.335654
22.0786
0.513423
return type(cache)(e for e in cache if segment.intersects(file_segment(e)))
def sieve(cache, segment=None)
Filter the cache to find those entries that overlap ``segment`` Parameters ---------- cache : `list` Input list of file paths segment : `~gwpy.segments.Segment` The ``[start, stop)`` interval to match against.
15.303133
15.00766
1.019688
group = parser.add_argument_group('Q-transform options') group.add_argument('--plot', nargs='+', type=float, default=[.5], help='One or more times to plot') group.add_argument('--frange', nargs=2, type=float, help='Frequency range to plot') group.add_argument('--qrange', nargs=2, type=float, help='Search Q range') group.add_argument('--nowhiten', action='store_true', help='do not whiten input before transform')
def arg_qxform(cls, parser)
Add an `~argparse.ArgumentGroup` for Q-transform options
4.508719
3.971705
1.13521
gps = args.gps search = args.search # ensure we have enough data for filter settling max_plot = max(args.plot) search = max(search, max_plot * 2 + 8) args.search = search self.log(3, "Search window: {0:.0f} sec, max plot window {1:.0f}". format(search, max_plot)) # make sure we don't create too big interpolations xpix = 1200. if args.geometry: m = re.match('(\\d+)x(\\d+)', args.geometry) if m: xpix = float(m.group(1)) # save output x for individual tres calulation args.nx = xpix self.args.tres = search / xpix / 2 self.log(3, 'Max time resolution (tres) set to {:.4f}'.format( self.args.tres)) args.start = [[int(gps - search/2)]] if args.epoch is None: args.epoch = args.gps args.duration = search args.chan = [[args.chan]] if args.color_scale is None: args.color_scale = 'linear' args.overlap = 0 # so that FFTMixin._finalize_arguments doesn't fail xmin = args.xmin xmax = args.xmax super(Qtransform, self)._finalize_arguments(args) # unset defaults from `TimeDomainProduct` args.xmin = xmin args.xmax = xmax
def _finalize_arguments(self, args)
Derive standard args from our weird ones :type args: Namespace with command line arguments
7.836951
7.820472
1.002107
def fformat(x): # float format if isinstance(x, (list, tuple)): return '[{0}]'.format(', '.join(map(fformat, x))) if isinstance(x, Quantity): x = x.value elif isinstance(x, str): warnings.warn('WARNING: fformat called with a' + ' string. This has ' + 'been depricated and may disappear ' + 'in a future release.') x = float(x) return '{0:.2f}'.format(x) bits = [('Q', fformat(self.result.q))] bits.append(('tres', '{:.3g}'.format(self.qxfrm_args['tres']))) if self.qxfrm_args.get('qrange'): bits.append(('q-range', fformat(self.qxfrm_args['qrange']))) if self.qxfrm_args['whiten']: bits.append(('whitened',)) bits.extend([ ('f-range', fformat(self.result.yspan)), ('e-range', '[{:.3g}, {:.3g}]'.format(self.result.min(), self.result.max())), ]) return ', '.join([': '.join(bit) for bit in bits])
def get_title(self)
Default title for plot
4.156477
4.034821
1.030152
args = self.args asd = self.timeseries[0].asd().value if (asd.min() == 0): self.log(0, 'Input data has a zero in ASD. ' 'Q-transform not possible.') self.got_error = True qtrans = None else: gps = self.qxfrm_args['gps'] outseg = Segment(gps, gps).protract(args.plot[self.plot_num]) # This section tries to optimize the amount of data that is # processed and the time resolution needed to create a good # image. NB:For each time span specified # NB: the timeseries h enough data for the longest plot inseg = outseg.protract(4) & self.timeseries[0].span proc_ts = self.timeseries[0].crop(*inseg) # time resolution is calculated to provide about 4 times # the number of output pixels for interpolation tres = float(outseg.end - outseg.start) / 4 / self.args.nx self.qxfrm_args['tres'] = tres self.qxfrm_args['search'] = int(len(proc_ts) * proc_ts.dt.value) self.log(3, 'Q-transform arguments:') self.log(3, '{0:>15s} = {1}'.format('outseg', outseg)) for key in sorted(self.qxfrm_args): self.log(3, '{0:>15s} = {1}'.format(key, self.qxfrm_args[key])) qtrans = proc_ts.q_transform(outseg=outseg, **self.qxfrm_args) if args.ymin is None: # set before Spectrogram.make_plot args.ymin = qtrans.yspan[0] return qtrans
def get_spectrogram(self)
Worked on a single timesharing and generates a single Q-transform spectrogram
7.415803
7.048828
1.052062
from lal import LIGOTimeGPS try: return LIGOTimeGPS(s, ns) except TypeError: return LIGOTimeGPS(int(s), int(ns))
def _ligotimegps(s, ns=0)
Catch TypeError and cast `s` and `ns` to `int`
3.160939
2.38328
1.326297
module = import_module(module) orig = module.LIGOTimeGPS module.LIGOTimeGPS = _ligotimegps try: yield finally: module.LIGOTimeGPS = orig
def patch_ligotimegps(module="ligo.lw.lsctables")
Context manager to on-the-fly patch LIGOTimeGPS to accept all int types
2.420704
2.314279
1.045986
from ligo.lw.ligolw import PartialLIGOLWContentHandler from ligo.lw.table import Table if issubclass(element, Table): def _element_filter(name, attrs): return element.CheckProperties(name, attrs) else: def _element_filter(name, _): return name == element.tagName return build_content_handler(PartialLIGOLWContentHandler, _element_filter)
def get_partial_contenthandler(element)
Build a `PartialLIGOLWContentHandler` to read only this element Parameters ---------- element : `type`, subclass of :class:`~ligo.lw.ligolw.Element` the element class to be read, Returns ------- contenthandler : `type` a subclass of :class:`~ligo.lw.ligolw.PartialLIGOLWContentHandler` to read only the given `element`
6.65189
4.243205
1.567657
from ligo.lw.ligolw import FilteringLIGOLWContentHandler from ligo.lw.table import Table if issubclass(element, Table): def _element_filter(name, attrs): return ~element.CheckProperties(name, attrs) else: def _element_filter(name, _): # pylint: disable=unused-argument return name != element.tagName return build_content_handler(FilteringLIGOLWContentHandler, _element_filter)
def get_filtering_contenthandler(element)
Build a `FilteringLIGOLWContentHandler` to exclude this element Parameters ---------- element : `type`, subclass of :class:`~ligo.lw.ligolw.Element` the element to exclude (and its children) Returns ------- contenthandler : `type` a subclass of :class:`~ligo.lw.ligolw.FilteringLIGOLWContentHandler` to exclude an element and its children
7.143211
4.791013
1.49096