code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
self._check_row_size(array) self._valign = array return self
def set_cols_valign(self, array)
Set the desired columns vertical alignment - the elements of the array should be either "t", "m" or "b": * "t": column aligned on the top of the cell * "m": column aligned on the middle of the cell * "b": column aligned on the bottom of the cell
8.903702
10.909585
0.816136
self._check_row_size(array) self._dtype = array return self
def set_cols_dtype(self, array)
Set the desired columns datatype for the cols. - the elements of the array should be either a callable or any of "a", "t", "f", "e" or "i": * "a": automatic (try to use the most appropriate datatype) * "t": treat as text * "f": treat as float in decimal format * "e": treat as float in exponential format * "i": treat as int * a callable: should return formatted string for any value given - by default, automatic datatyping is used for each column
12.810209
13.696265
0.935307
if not type(width) is int or width < 0: raise ValueError('width must be an integer greater then 0') self._precision = width return self
def set_precision(self, width)
Set the desired precision for float/exponential formats - width must be an integer >= 0 - default value is set to 3
4.417966
4.304027
1.026473
self._check_row_size(array) self._header = list(map(obj2unicode, array)) return self
def header(self, array)
Specify the header of the table
10.640293
10.231775
1.039926
self._check_row_size(array) if not hasattr(self, "_dtype"): self._dtype = ["a"] * self._row_size cells = [] for i, x in enumerate(array): cells.append(self._str(i, x)) self._rows.append(cells) return self
def add_row(self, array)
Add a row in the rows stack - cells can contain newlines and tabs
4.576127
4.549932
1.005757
# nb: don't use 'iter' on by-dimensional arrays, to get a # usable code for python 2.1 if header: if hasattr(rows, '__iter__') and hasattr(rows, 'next'): self.header(rows.next()) else: self.header(rows[0]) rows = rows[1:] for row in rows: self.add_row(row) return self
def add_rows(self, rows, header=True)
Add several rows in the rows stack - The 'rows' argument can be either an iterator returning arrays, or a by-dimensional array - 'header' specifies if the first row should be used as the header of the table
5.630295
4.399379
1.279793
if not self._header and not self._rows: return self._compute_cols_width() self._check_align() out = "" if self._has_border(): out += self._hline() if self._header: out += self._draw_line(self._header, isheader=True) if self._has_header(): out += self._hline_header() length = 0 for row in self._rows: length += 1 out += self._draw_line(row) if self._has_hlines() and length < len(self._rows): out += self._hline() if self._has_border(): out += self._hline() return out[:-1]
def draw(self)
Draw the table - the table is returned as a whole string
3.32173
3.057335
1.086479
return str(int(round(cls._to_float(x))))
def _fmt_int(cls, x, **kw)
Integer formatting class-method. - x will be float-converted and then used.
8.965778
11.281359
0.794743
n = kw.get('n') return '%.*f' % (n, cls._to_float(x))
def _fmt_float(cls, x, **kw)
Float formatting class-method. - x parameter is ignored. Instead kw-argument f being x float-converted will be used. - precision will be taken from `n` kw-argument.
5.978054
6.452609
0.926455
n = kw.get('n') return '%.*e' % (n, cls._to_float(x))
def _fmt_exp(cls, x, **kw)
Exponential formatting class-method. - x parameter is ignored. Instead kw-argument f being x float-converted will be used. - precision will be taken from `n` kw-argument.
6.496162
6.699805
0.969605
f = cls._to_float(x) if abs(f) > 1e8: fn = cls._fmt_exp else: if f - round(f) == 0: fn = cls._fmt_int else: fn = cls._fmt_float return fn(x, **kw)
def _fmt_auto(cls, x, **kw)
auto formatting class-method.
3.217529
3.220004
0.999232
FMT = { 'a':self._fmt_auto, 'i':self._fmt_int, 'f':self._fmt_float, 'e':self._fmt_exp, 't':self._fmt_text, } n = self._precision dtype = self._dtype[i] try: if callable(dtype): return dtype(x) else: return FMT[dtype](x, n=n) except FallbackToText: return self._fmt_text(x)
def _str(self, i, x)
Handles string formatting of cell data i - index of the cell datatype in self._dtype x - cell data to format
4.016864
3.778497
1.063085
if not self._hline_string: self._hline_string = self._build_hline() return self._hline_string
def _hline(self)
Print an horizontal line
5.006301
4.129464
1.212337
horiz = self._char_horiz if (is_header): horiz = self._char_header # compute cell separator s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()], horiz) # build the line l = s.join([horiz * n for n in self._width]) # add border if needed if self._has_border(): l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz, self._char_corner) else: l += "\n" return l
def _build_hline(self, is_header=False)
Return a string used to separated rows or separate header from rows
3.956599
3.966283
0.997559
cell_lines = cell.split('\n') maxi = 0 for line in cell_lines: length = 0 parts = line.split('\t') for part, i in zip(parts, list(range(1, len(parts) + 1))): length = length + len(part) if i < len(parts): length = (length//8 + 1) * 8 maxi = max(maxi, length) return maxi
def _len_cell(self, cell)
Return the width of the cell Special characters are taken into account to return the width of the cell, such like newlines and tabs
3.146478
2.877729
1.093389
if hasattr(self, "_width"): return maxi = [] if self._header: maxi = [ self._len_cell(x) for x in self._header ] for row in self._rows: for cell,i in zip(row, list(range(len(row)))): try: maxi[i] = max(maxi[i], self._len_cell(cell)) except (TypeError, IndexError): maxi.append(self._len_cell(cell)) ncols = len(maxi) content_width = sum(maxi) deco_width = 3*(ncols-1) + [0,4][self._has_border()] if self._max_width and (content_width + deco_width) > self._max_width: if self._max_width < (ncols + deco_width): raise ValueError('max_width too low to render data') available_width = self._max_width - deco_width newmaxi = [0] * ncols i = 0 while available_width > 0: if newmaxi[i] < maxi[i]: newmaxi[i] += 1 available_width -= 1 i = (i + 1) % ncols maxi = newmaxi self._width = maxi
def _compute_cols_width(self)
Return an array with the width of each column If a specific width has been specified, exit. If the total of the columns width exceed the table desired width, another width will be computed to fit, and cells will be wrapped.
2.815896
2.736536
1.029
line_wrapped = [] for cell, width in zip(line, self._width): array = [] for c in cell.split('\n'): if c.strip() == "": array.append("") else: array.extend(textwrapper(c, width)) line_wrapped.append(array) max_cell_lines = reduce(max, list(map(len, line_wrapped))) for cell, valign in zip(line_wrapped, self._valign): if isheader: valign = "t" if valign == "m": missing = max_cell_lines - len(cell) cell[:0] = [""] * int(missing / 2) cell.extend([""] * int(missing / 2 + missing % 2)) elif valign == "b": cell[:0] = [""] * (max_cell_lines - len(cell)) else: cell.extend([""] * (max_cell_lines - len(cell))) return line_wrapped
def _splitit(self, line, isheader)
Split each element of line to fit the column width Each element is turned into a list, result of the wrapping of the string to the desired width
2.73609
2.705108
1.011453
assert len(color) in [3, 4, 6, 8] if len(color) in [3, 4]: color = "".join([c*2 for c in color]) n = int(color, 16) t = ((n >> 16) & 255, (n >> 8) & 255, n & 255) if len(color) == 8: t = t + ((n >> 24) & 255,) return t
def color_hex_to_dec_tuple(color)
Converts a color from hexadecimal to decimal tuple, color can be in the following formats: 3-digit RGB, 4-digit ARGB, 6-digit RGB and 8-digit ARGB.
1.876091
1.837283
1.021122
box = (int(rect[0]), int(rect[1]), int(rect[0]) + int(rect[2]), int(rect[1]) + int(rect[3])) if box[2] > self.img.size[0] or box[3] > self.img.size[1]: raise errors.RectangleError("Region out-of-bounds") self.img = self.img.crop(box) return self
def region(self, rect)
Selects a sub-region of the image using the supplied rectangle, x, y, width, height.
2.274163
2.21306
1.02761
opts = Image._normalize_options(kwargs) size = self._get_size(width, height) if opts["mode"] == "adapt": self._adapt(size, opts) elif opts["mode"] == "clip": self._clip(size, opts) elif opts["mode"] == "fill": self._fill(size, opts) elif opts["mode"] == "scale": self._scale(size, opts) else: self._crop(size, opts) return self
def resize(self, width, height, **kwargs)
Resizes the image to the supplied width/height. Returns the instance. Supports the following optional keyword arguments: mode - The resizing mode to use, see Image.MODES filter - The filter to use: see Image.FILTERS background - The hexadecimal background fill color, RGB or ARGB position - The position used to crop: see Image.POSITIONS for pre-defined positions or a custom position ratio retain - The minimum percentage of the original image to retain when cropping
2.327694
2.524567
0.922017
opts = Image._normalize_options(kwargs) if deg == "auto": if self._orig_format == "JPEG": try: exif = self.img._getexif() or dict() deg = _orientation_to_rotation.get(exif.get(274, 0), 0) except Exception: logger.warn('unable to parse exif') deg = 0 else: deg = 0 deg = 360 - (int(deg) % 360) if deg % 90 == 0: if deg == 90: self.img = self.img.transpose(PIL.Image.ROTATE_90) elif deg == 180: self.img = self.img.transpose(PIL.Image.ROTATE_180) elif deg == 270: self.img = self.img.transpose(PIL.Image.ROTATE_270) else: self.img = self.img.rotate(deg, expand=bool(int(opts["expand"]))) return self
def rotate(self, deg, **kwargs)
Rotates the image clockwise around its center. Returns the instance. Supports the following optional keyword arguments: expand - Expand the output image to fit rotation
2.411281
2.496971
0.965682
opts = Image._normalize_options(kwargs) outfile = BytesIO() if opts["pil"]["format"]: fmt = opts["pil"]["format"] else: fmt = self._orig_format save_kwargs = dict() if Image._isint(opts["quality"]): save_kwargs["quality"] = int(opts["quality"]) if int(opts["optimize"]): save_kwargs["optimize"] = True if int(opts["progressive"]): save_kwargs["progressive"] = True if int(opts["preserve_exif"]): save_kwargs["exif"] = self._exif color = color_hex_to_dec_tuple(opts["background"]) if self.img.mode == "RGBA": self._background(fmt, color) if fmt == "JPEG": if self.img.mode == "P": # Converting old GIF and PNG files to JPEG can raise # IOError: cannot write mode P as JPEG # https://mail.python.org/pipermail/python-list/2000-May/036017.html self.img = self.img.convert("RGB") elif self.img.mode == "RGBA": # JPEG does not have an alpha channel so cannot be # saved as RGBA. It must be converted to RGB. self.img = self.img.convert("RGB") if self._orig_format == "JPEG": self.img.format = self._orig_format save_kwargs["subsampling"] = "keep" if opts["quality"] == "keep": save_kwargs["quality"] = "keep" try: self.img.save(outfile, fmt, **save_kwargs) except IOError as e: raise errors.ImageSaveError(str(e)) self.img.format = fmt outfile.seek(0) return outfile
def save(self, **kwargs)
Returns a buffer to the image for saving, supports the following optional keyword arguments: format - The format to save as: see Image.FORMATS optimize - The image file size should be optimized preserve_exif - Preserve the Exif information in JPEGs progressive - The output should be progressive JPEG quality - The quality used to save JPEGs: integer from 1 - 100
3.378223
3.286263
1.027983
key, qs = (key or "", qs or "") return hmac.new(key.encode(), qs.encode(), hashlib.sha1).hexdigest()
def derive_signature(key, qs)
Derives the signature from the supplied query string using the key.
3.703678
3.128129
1.183992
sig = derive_signature(key, qs) return "%s&%s" % (qs, urlencode([("sig", sig)]))
def sign(key, qs)
Signs the query string using the key.
5.065592
4.933053
1.026867
unsigned_qs = re.sub(r'&?sig=[^&]*', '', qs) sig = derive_signature(key, unsigned_qs) return urlparse.parse_qs(qs).get("sig", [None])[0] == sig
def verify_signature(key, qs)
Verifies that the signature in the query string is correct.
3.686776
3.673236
1.003686
if name.lower() == 'planck': # make sure to handle the Planck window return 'planck' try: # use equivalence introduced in scipy 0.16.0 # pylint: disable=protected-access return scipy_windows._win_equiv[name.lower()].__name__ except AttributeError: # old scipy try: return getattr(scipy_windows, name.lower()).__name__ except AttributeError: # no match pass # raise later except KeyError: # no match pass # raise later raise ValueError('no window function in scipy.signal equivalent to %r' % name,)
def canonical_name(name)
Find the canonical name for the given window in scipy.signal Parameters ---------- name : `str` the name of the window you want Returns ------- realname : `str` the name of the window as implemented in `scipy.signal.window` Raises ------- ValueError if ``name`` cannot be resolved to a window function in `scipy.signal` Examples -------- >>> from gwpy.signal.window import canonical_name >>> canonical_name('hanning') 'hann' >>> canonical_name('ksr') 'kaiser'
6.273047
6.422605
0.976714
try: name = canonical_name(name) except KeyError as exc: raise ValueError(str(exc)) try: rov = ROV[name] except KeyError: raise ValueError("no recommended overlap for %r window" % name) if nfft: return int(ceil(nfft * rov)) return rov
def recommended_overlap(name, nfft=None)
Returns the recommended fractional overlap for the given window If ``nfft`` is given, the return is in samples Parameters ---------- name : `str` the name of the window you are using nfft : `int`, optional the length of the window Returns ------- rov : `float`, `int` the recommended overlap (ROV) for the given window, in samples if ``nfft` is given (`int`), otherwise fractional (`float`) Examples -------- >>> from gwpy.signal.window import recommended_overlap >>> recommended_overlap('hann') 0.5 >>> recommended_overlap('blackmanharris', nfft=128) 85
4.463305
4.372502
1.020767
# construct a Planck taper window w = numpy.ones(N) if nleft: w[0] *= 0 zleft = numpy.array([nleft * (1./k + 1./(k-nleft)) for k in range(1, nleft)]) w[1:nleft] *= expit(-zleft) if nright: w[N-1] *= 0 zright = numpy.array([-nright * (1./(k-nright) + 1./k) for k in range(1, nright)]) w[N-nright:N-1] *= expit(-zright) return w
def planck(N, nleft=0, nright=0)
Return a Planck taper window. Parameters ---------- N : `int` Number of samples in the output window nleft : `int`, optional Number of samples to taper on the left, should be less than `N/2` nright : `int`, optional Number of samples to taper on the right, should be less than `N/2` Returns ------- w : `ndarray` The window, with the maximum value normalized to 1 and at least one end tapered smoothly to 0. Examples -------- To taper 0.1 seconds on both ends of one second of data sampled at 2048 Hz: >>> from gwpy.signal.window import planck >>> w = planck(2048, nleft=205, nright=205) References ---------- .. [1] McKechan, D.J.A., Robinson, C., and Sathyaprakash, B.S. (April 2010). "A tapering window for time-domain templates and simulated signals in the detection of gravitational waves from coalescing compact binaries". Classical and Quantum Gravity 27 (8). :doi:`10.1088/0264-9381/27/8/084020` .. [2] Wikipedia, "Window function", https://en.wikipedia.org/wiki/Window_function#Planck-taper_window
3.044208
3.189614
0.954413
try: return os.environ[key].lower() in TRUE except KeyError: return default
def bool_env(key, default=False)
Parse an environment variable as a boolean switch `True` is returned if the variable value matches one of the following: - ``'1'`` - ``'y'`` - ``'yes'`` - ``'true'`` The match is case-insensitive (so ``'Yes'`` will match as `True`) Parameters ---------- key : `str` the name of the environment variable to find default : `bool` the default return value if the key is not found Returns ------- True if the environment variable matches as 'yes' or similar False otherwise Examples -------- >>> import os >>> from gwpy.utils.env import bool_env >>> os.environ['GWPY_VALUE'] = 'yes' >>> print(bool_env('GWPY_VALUE')) True >>> os.environ['GWPY_VALUE'] = 'something else' >>> print(bool_env('GWPY_VALUE')) False >>> print(bool_env('GWPY_VALUE2')) False
3.879018
11.657843
0.332739
if isinstance(cmd, (list, tuple)): cmdstr = ' '.join(cmd) kwargs.setdefault('shell', False) else: cmdstr = str(cmd) kwargs.setdefault('shell', True) proc = Popen(cmd, stdout=stdout, stderr=stderr, **kwargs) out, err = proc.communicate() if proc.returncode: if on_error == 'ignore': pass elif on_error == 'warn': e = CalledProcessError(proc.returncode, cmdstr) warnings.warn(str(e)) else: raise CalledProcessError(proc.returncode, cmdstr) return out.decode('utf-8'), err.decode('utf-8')
def call(cmd, stdout=PIPE, stderr=PIPE, on_error='raise', **kwargs)
Call out to the shell using `subprocess.Popen` Parameters ---------- stdout : `file-like`, optional stream for stdout stderr : `file-like`, optional stderr for stderr on_error : `str`, optional what to do when the command fails, one of - 'ignore' - do nothing - 'warn' - print a warning - 'raise' - raise an exception **kwargs other keyword arguments to pass to `subprocess.Popen` Returns ------- out : `str` the output stream of the command err : `str` the error stream from the command Raises ------ OSError if `cmd` is a `str` (or `shell=True` is passed) and the executable is not found subprocess.CalledProcessError if the command fails otherwise
1.740955
1.913849
0.909662
def wrapper(*args, **kwargs): # parse columns argument columns = kwargs.pop("columns", None) # read table tab = func(*args, **kwargs) # filter on columns if columns is None: return tab return tab[columns] return _safe_wraps(wrapper, func)
def read_with_columns(func)
Decorate a Table read method to use the ``columns`` keyword
3.57593
3.898554
0.917245
def wrapper(*args, **kwargs): # parse selection selection = kwargs.pop('selection', None) or [] # read table tab = func(*args, **kwargs) # apply selection if selection: return filter_table(tab, selection) return tab return _safe_wraps(wrapper, func)
def read_with_selection(func)
Decorate a Table read method to apply ``selection`` keyword
4.013562
4.019795
0.998449
reader = registry.get_reader(name, data_class) wrapped = ( # noqa read_with_columns( # use ``columns`` read_with_selection( # use ``selection`` reader )) ) return registry.register_reader(name, data_class, wrapped, force=True)
def decorate_registered_reader( name, data_class=EventTable, columns=True, selection=True, )
Wrap an existing registered reader to use GWpy's input decorators Parameters ---------- name : `str` the name of the registered format data_class : `type`, optional the class for whom the format is registered columns : `bool`, optional use the `read_with_columns` decorator selection : `bool`, optional use the `read_with_selection` decorator
5.930943
6.165484
0.961959
import root_numpy # parse column filters into tree2array ``selection`` keyword # NOTE: not all filters can be passed directly to root_numpy, so we store # those separately and apply them after-the-fact before returning try: selection = kwargs.pop('selection') except KeyError: # no filters filters = None else: rootfilters = [] filters = [] for col, op_, value in parse_column_filters(selection): try: opstr = [key for key in OPERATORS if OPERATORS[key] is op_][0] except (IndexError, KeyError): # cannot filter with root_numpy filters.append((col, op_, value)) else: # can filter with root_numpy rootfilters.append('{0} {1} {2!r}'.format(col, opstr, value)) kwargs['selection'] = ' && '.join(rootfilters) # pass file name (not path) if not isinstance(source, string_types): source = source.name # find single tree (if only one tree present) if treename is None: trees = root_numpy.list_trees(source) if len(trees) == 1: treename = trees[0] elif not trees: raise ValueError("No trees found in %s" % source) else: raise ValueError("Multiple trees found in %s, please select on " "via the `treename` keyword argument, e.g. " "`treename='events'`. Available trees are: %s." % (source, ', '.join(map(repr, trees)))) # read, filter, and return t = Table(root_numpy.root2array( source, treename, branches=columns, **kwargs )) if filters: return filter_table(t, *filters) return t
def table_from_root(source, treename=None, columns=None, **kwargs)
Read a Table from a ROOT tree
3.754179
3.736725
1.004671
import root_numpy root_numpy.array2root(table.as_array(), filename, **kwargs)
def table_to_root(table, filename, **kwargs)
Write a Table to a ROOT file
4.748611
4.509607
1.052999
s = "{}".format(f) if "e" in s or "E" in s: return "{0:.{1}f}".format(f, n) i, p, d = s.partition(".") return ".".join([i, (d+"0"*n)[:n]])
def _truncate(f, n)
Truncates/pads a float `f` to `n` decimal places without rounding From https://stackoverflow.com/a/783927/1307974 (CC-BY-SA)
1.871304
1.674474
1.117547
class FixedGPSScale(GPSScale): name = str('{0}s'.format(unit.long_names[0] if unit.long_names else unit.names[0])) def __init__(self, axis, epoch=None): super(FixedGPSScale, self).__init__(axis, epoch=epoch, unit=unit) return FixedGPSScale
def _gps_scale_factory(unit)
Construct a GPSScale for this unit
4.974586
4.684566
1.06191
if epoch is None: self._epoch = None return if isinstance(epoch, (Number, Decimal)): self._epoch = float(epoch) else: self._epoch = float(to_gps(epoch))
def set_epoch(self, epoch)
Set the GPS epoch
4.251186
3.58775
1.184917
# accept all core time units if unit is None or (isinstance(unit, units.NamedUnit) and unit.physical_type == 'time'): self._unit = unit return # convert float to custom unit in seconds if isinstance(unit, Number): unit = units.Unit(unit * units.second) # otherwise, should be able to convert to a time unit try: unit = units.Unit(unit) except ValueError as exc: # catch annoying plurals try: unit = units.Unit(str(unit).rstrip('s')) except ValueError: raise exc # decompose and check that it's actually a time unit dec = unit.decompose() if dec.bases != [units.second]: raise ValueError("Cannot set GPS unit to %s" % unit) # check equivalent units for other in TIME_UNITS: if other.decompose().scale == dec.scale: self._unit = other return raise ValueError("Unrecognised unit: %s" % unit)
def set_unit(self, unit)
Set the GPS step scale
4.765186
4.630484
1.02909
if not self.unit: return None name = sorted(self.unit.names, key=len)[-1] return '%ss' % name
def get_unit_name(self)
Returns the name of the unit for this GPS scale Note that this returns a simply-pluralised version of the name.
6.692297
6.480286
1.032716
scale = self.scale or 1 epoch = self.epoch or 0 values = numpy.asarray(values) # handle simple or data transformations with floats if self._parents or ( # part of composite transform (from draw()) epoch == 0 and # no large additions scale == 1 # no multiplications ): return self._transform(values, float(epoch), float(scale)) # otherwise do things carefully (and slowly) with Decimals # -- ideally this only gets called for transforming tick positions flat = values.flatten() def _trans(x): return self._transform_decimal(x, epoch, scale) return numpy.asarray(list(map(_trans, flat))).reshape(values.shape)
def transform_non_affine(self, values)
Transform an array of GPS times. This method is designed to filter out transformations that will generate text elements that require exact precision, and use `Decimal` objects to do the transformation, and simple `float` otherwise.
10.701996
9.694779
1.103893
vdec = Decimal(_truncate(value, 12)) edec = Decimal(_truncate(epoch, 12)) sdec = Decimal(_truncate(scale, 12)) return type(value)(cls._transform(vdec, edec, sdec))
def _transform_decimal(cls, value, epoch, scale)
Transform to/from GPS using `decimal.Decimal` for precision
3.576073
3.65302
0.978936
@wraps(func) def wrapped_func(*args, **kwargs): warnings.warn( DEPRECATED_FUNCTION_WARNING.format(func), category=DeprecationWarning, stacklevel=2, ) return func(*args, **kwargs) return wrapped_func
def deprecated_function(func, warning=DEPRECATED_FUNCTION_WARNING)
Adds a `DeprecationWarning` to a function Parameters ---------- func : `callable` the function to decorate with a `DeprecationWarning` warning : `str`, optional the warning to present Notes ----- The final warning message is formatted as ``warning.format(func)`` so you can use attribute references to the function itself. See the default message as an example.
1.949576
2.454638
0.794242
def decorator(func): # @wraps(func) <- we can't use this as normal because it doesn't work # on python < 3 for instance methods, # see workaround below def wrapped(*args, **kwargs): result = func(*args, **kwargs) try: return returntype(result) except (TypeError, ValueError) as exc: exc.args = ( 'failed to cast return from {0} as {1}: {2}'.format( func.__name__, returntype.__name__, str(exc)), ) raise try: return wraps(func)(wrapped) except AttributeError: # python < 3.0.0 wrapped.__doc__ == func.__doc__ return wrapped return decorator
def return_as(returntype)
Decorator to cast return of function as the given type Parameters ---------- returntype : `type` the desired return type of the decorated function
4.586749
4.634669
0.989661
# query for metadata url = ('{url}/api/records/?' 'page=1&' 'size={hits}&' 'q=conceptrecid:"{id}"&' 'sort=-version&' 'all_versions=True'.format(id=zid, url=url, hits=hits)) metadata = requests.get(url).json() lines = [] for i, hit in enumerate(metadata['hits']['hits']): version = hit['metadata']['version'][len(tag_prefix):] lines.append('-' * len(version)) lines.append(version) lines.append('-' * len(version)) lines.append('') lines.append('.. image:: {badge}\n' ' :target: {doi}'.format(**hit['links'])) if i < hits - 1: lines.append('') return '\n'.join(lines)
def format_citations(zid, url='https://zenodo.org/', hits=10, tag_prefix='v')
Query and format a citations page from Zenodo entries Parameters ---------- zid : `int`, `str` the Zenodo ID of the target record url : `str`, optional the base URL of the Zenodo host, defaults to ``https://zenodo.org`` hist : `int`, optional the maximum number of hits to show, default: ``10`` tag_prefix : `str`, optional the prefix for git tags. This is removed to generate the section headers in the output RST Returns ------- rst : `str` an RST-formatted string of DOI badges with URLs
3.24423
3.598234
0.901617
# pylint: disable=redefined-builtin # parse input source source = file_list(source) # parse type ctype = channel_dict_kwarg(type, channels, (str,)) # read each individually and append out = series_class.DictClass() for i, file_ in enumerate(source): if i == 1: # force data into fresh memory so that append works for name in out: out[name] = numpy.require(out[name], requirements=['O']) # read frame out.append(read_gwf(file_, channels, start=start, end=end, ctype=ctype, scaled=scaled, series_class=series_class), copy=False) return out
def read(source, channels, start=None, end=None, scaled=None, type=None, series_class=TimeSeries)
Read a dict of series from one or more GWF files Parameters ---------- source : `str`, `list` Source of data, any of the following: - `str` path of single data file, - `str` path of cache file, - `list` of paths. channels : `~gwpy.detector.ChannelList`, `list` a list of channels to read from the source. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional GPS start time of required data, anything parseable by :func:`~gwpy.time.to_gps` is fine. end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS end time of required data, anything parseable by :func:`~gwpy.time.to_gps` is fine. scaled : `bool`, optional apply slope and bias calibration to ADC data. type : `dict`, optional a `dict` of ``(name, channel-type)`` pairs, where ``channel-type`` can be one of ``'adc'``, ``'proc'``, or ``'sim'``. series_class : `type`, optional the `Series` sub-type to return. Returns ------- data : `~gwpy.timeseries.TimeSeriesDict` or similar a dict of ``(channel, series)`` pairs read from the GWF source(s).
6.725556
6.939919
0.969112
# parse kwargs if not start: start = 0 if not end: end = 0 span = Segment(start, end) # open file stream = io_gwf.open_gwf(filename, 'r') nframes = stream.GetNumberOfFrames() # find channels out = series_class.DictClass() # loop over frames in GWF i = 0 while True: this = i i += 1 # read frame try: frame = stream.ReadFrameNSubset(this, 0) except IndexError: if this >= nframes: break raise # check whether we need this frame at all if not _need_frame(frame, start, end): continue # get epoch for this frame epoch = LIGOTimeGPS(*frame.GetGTime()) # and read all the channels for channel in channels: _scaled = _dynamic_scaled(scaled, channel) try: new = _read_channel(stream, this, str(channel), ctype.get(channel, None), epoch, start, end, scaled=_scaled, series_class=series_class) except _Skip: # don't need this frame for this channel continue try: out[channel].append(new) except KeyError: out[channel] = numpy.require(new, requirements=['O']) # if we have all of the data we want, stop now if all(span in out[channel].span for channel in out): break # if any channels weren't read, something went wrong for channel in channels: if channel not in out: msg = "Failed to read {0!r} from {1!r}".format( str(channel), filename) if start or end: msg += ' for {0}'.format(span) raise ValueError(msg) return out
def read_gwf(filename, channels, start=None, end=None, scaled=None, ctype=None, series_class=TimeSeries)
Read a dict of series data from a single GWF file Parameters ---------- filename : `str` the GWF path from which to read channels : `~gwpy.detector.ChannelList`, `list` a list of channels to read from the source. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional GPS start time of required data, anything parseable by :func:`~gwpy.time.to_gps` is fine. end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS end time of required data, anything parseable by :func:`~gwpy.time.to_gps` is fine. scaled : `bool`, optional apply slope and bias calibration to ADC data. type : `dict`, optional a `dict` of ``(name, channel-type)`` pairs, where ``channel-type`` can be one of ``'adc'``, ``'proc'``, or ``'sim'``. series_class : `type`, optional the `Series` sub-type to return. Returns ------- data : `~gwpy.timeseries.TimeSeriesDict` or similar a dict of ``(channel, series)`` pairs read from the GWF file.
4.955553
4.848082
1.022168
data = _get_frdata(stream, num, name, ctype=ctype) return read_frdata(data, epoch, start, end, scaled=scaled, series_class=series_class)
def _read_channel(stream, num, name, ctype, epoch, start, end, scaled=True, series_class=TimeSeries)
Read a channel from a specific frame in a stream
4.150007
3.917412
1.059375
ctypes = (ctype,) if ctype else ('adc', 'proc', 'sim') for ctype in ctypes: _reader = getattr(stream, 'ReadFr{0}Data'.format(ctype.title())) try: return _reader(num, name) except IndexError as exc: if FRERR_NO_CHANNEL_OF_TYPE.match(str(exc)): continue raise raise ValueError("no Fr{{Adc,Proc,Sim}}Data structures with the " "name {0}".format(name))
def _get_frdata(stream, num, name, ctype=None)
Brute force-ish method to return the FrData structure for a channel This saves on pulling the channel type from the TOC
7.701176
7.773127
0.990744
datastart = epoch + frdata.GetTimeOffset() try: trange = frdata.GetTRange() except AttributeError: # not proc channel trange = 0. # check overlap with user-requested span if (end and datastart >= end) or (trange and datastart + trange < start): raise _Skip() # get scaling try: slope = frdata.GetSlope() bias = frdata.GetBias() except AttributeError: # not FrAdcData slope = None bias = None null_scaling = True else: null_scaling = slope == 1. and bias == 0. out = None for j in range(frdata.data.size()): # we use range(frdata.data.size()) to avoid segfault # related to iterating directly over frdata.data try: new = read_frvect(frdata.data[j], datastart, start, end, name=frdata.GetName(), series_class=series_class) except _Skip: continue # apply ADC scaling (only if interesting; this prevents unnecessary # type-casting errors) if scaled and not null_scaling: new *= slope new += bias if slope is not None: # user has deliberately disabled the ADC calibration, so # the stored engineering unit is not valid, revert to 'counts': new.override_unit('count') if out is None: out = new else: out.append(new) return out
def read_frdata(frdata, epoch, start, end, scaled=True, series_class=TimeSeries)
Read a series from an `FrData` structure Parameters ---------- frdata : `LDAStools.frameCPP.FrAdcData` or similar the data structure to read epoch : `float` the GPS start time of the containing frame (`LDAStools.frameCPP.FrameH.GTime`) start : `float` the GPS start time of the user request end : `float` the GPS end time of the user request scaled : `bool`, optional apply slope and bias calibration to ADC data. series_class : `type`, optional the `Series` sub-type to return. Returns ------- series : `~gwpy.timeseries.TimeSeriesBase` the formatted data series Raises ------ _Skip if this data structure doesn't overlap with the requested ``[start, end)`` interval.
6.815979
6.149443
1.10839
# only read FrVect with matching name (or no name set) # frame spec allows for arbitrary other FrVects # to hold other information if vect.GetName() and name and vect.GetName() != name: raise _Skip() # get array arr = vect.GetDataArray() nsamp = arr.size # and dimensions dim = vect.GetDim(0) dx = dim.dx x0 = dim.startX # start and end GPS times of this FrVect dimstart = epoch + x0 dimend = dimstart + nsamp * dx # index of first required sample nxstart = int(max(0., float(start-dimstart)) / dx) # requested start time is after this frame, skip if nxstart >= nsamp: raise _Skip() # index of end sample if end: nxend = int(nsamp - ceil(max(0., float(dimend-end)) / dx)) else: nxend = None if nxstart or nxend: arr = arr[nxstart:nxend] # -- cast as a series # get unit unit = vect.GetUnitY() or None # create array series = series_class(arr, t0=dimstart+nxstart*dx, dt=dx, name=name, channel=name, unit=unit, copy=False) # add information to channel series.channel.sample_rate = series.sample_rate.value series.channel.unit = unit series.channel.dtype = series.dtype return series
def read_frvect(vect, epoch, start, end, name=None, series_class=TimeSeries)
Read an array from an `FrVect` structure Parameters ---------- vect : `LDASTools.frameCPP.FrVect` the frame vector structur to read start : `float` the GPS start time of the request end : `float` the GPS end time of the request epoch : `float` the GPS start time of the containing `FrData` structure name : `str`, optional the name of the output `series_class`; this is also used to ignore ``FrVect`` structures containing other information series_class : `type`, optional the `Series` sub-type to return. Returns ------- series : `~gwpy.timeseries.TimeSeriesBase` the formatted data series Raises ------ _Skip if this vect doesn't overlap with the requested ``[start, end)`` interval, or the name doesn't match.
5.977864
5.35394
1.116536
# set frame header metadata if not start: starts = {LIGOTimeGPS(tsdict[key].x0.value) for key in tsdict} if len(starts) != 1: raise RuntimeError("Cannot write multiple TimeSeries to a single " "frame with different start times, " "please write into different frames") start = list(starts)[0] if not end: ends = {tsdict[key].span[1] for key in tsdict} if len(ends) != 1: raise RuntimeError("Cannot write multiple TimeSeries to a single " "frame with different end times, " "please write into different frames") end = list(ends)[0] duration = end - start start = LIGOTimeGPS(start) ifos = {ts.channel.ifo for ts in tsdict.values() if ts.channel and ts.channel.ifo and hasattr(frameCPP, 'DETECTOR_LOCATION_{0}'.format(ts.channel.ifo))} # create frame frame = io_gwf.create_frame(time=start, duration=duration, name=name, run=run, ifos=ifos) # append channels for i, key in enumerate(tsdict): try: # pylint: disable=protected-access ctype = tsdict[key].channel._ctype or 'proc' except AttributeError: ctype = 'proc' append_to_frame(frame, tsdict[key].crop(start, end), type=ctype, channelid=i) # write frame to file io_gwf.write_frames(outfile, [frame], compression=compression, compression_level=compression_level)
def write(tsdict, outfile, start=None, end=None, name='gwpy', run=0, compression=257, compression_level=6)
Write data to a GWF file using the frameCPP API
3.88862
3.680876
1.056439
# pylint: disable=redefined-builtin if timeseries.channel: channel = str(timeseries.channel) else: channel = str(timeseries.name) offset = float(LIGOTimeGPS(timeseries.t0.value) - LIGOTimeGPS(*frame.GetGTime())) # create the data container if type.lower() == 'adc': frdata = frameCPP.FrAdcData( channel, 0, # channel group channelid, # channel number in group 16, # number of bits in ADC timeseries.sample_rate.value, # sample rate ) frdata.SetTimeOffset(offset) append = frame.AppendFrAdcData elif type.lower() == 'proc': frdata = frameCPP.FrProcData( channel, # channel name str(timeseries.name), # comment frameCPP.FrProcData.TIME_SERIES, # ID as time-series frameCPP.FrProcData.UNKNOWN_SUB_TYPE, # empty sub-type (fseries) offset, # offset of first sample relative to frame start abs(timeseries.span), # duration of data 0., # heterodyne frequency 0., # phase of heterodyne 0., # frequency range 0., # resolution bandwidth ) append = frame.AppendFrProcData elif type.lower() == 'sim': frdata = frameCPP.FrSimData( str(timeseries.channel), # channel name str(timeseries.name), # comment timeseries.sample_rate.value, # sample rate offset, # time offset of first sample 0., # heterodyne frequency 0., # phase of heterodyne ) append = frame.AppendFrSimData else: raise RuntimeError("Invalid channel type {!r}, please select one of " "'adc, 'proc', or 'sim'".format(type)) # append an FrVect frdata.AppendData(create_frvect(timeseries)) append(frdata)
def append_to_frame(frame, timeseries, type='proc', channelid=0)
Append data from a `TimeSeries` to a `~frameCPP.FrameH` Parameters ---------- frame : `~frameCPP.FrameH` frame object to append to timeseries : `TimeSeries` the timeseries to append type : `str` the type of the channel, one of 'adc', 'proc', 'sim' channelid : `int`, optional the ID of the channel within the group (only used for ADC channels)
4.283941
4.084064
1.048941
# create timing dimension dims = frameCPP.Dimension( timeseries.size, timeseries.dx.value, str(timeseries.dx.unit), 0) # create FrVect vect = frameCPP.FrVect( timeseries.name or '', FRVECT_TYPE_FROM_NUMPY[timeseries.dtype.type], 1, dims, str(timeseries.unit)) # populate FrVect and return vect.GetDataArray()[:] = numpy.require(timeseries.value, requirements=['C']) return vect
def create_frvect(timeseries)
Create a `~frameCPP.FrVect` from a `TimeSeries` This method is primarily designed to make writing data to GWF files a bit easier. Parameters ---------- timeseries : `TimeSeries` the input `TimeSeries` Returns ------- frvect : `~frameCPP.FrVect` the output `FrVect`
8.771232
7.248174
1.21013
array = iter(array) i = 0 while True: try: # get next value val = next(array) except StopIteration: # end of array return if val: # start of new segment n = 1 # count consecutive True try: while next(array): # run until segment will end n += 1 except StopIteration: # have reached the end return # stop finally: # yield segment (including at StopIteration) if n >= minlen: # ... if long enough yield (start + i * delta, start + (i + n) * delta) i += n i += 1
def _bool_segments(array, start=0, delta=1, minlen=1)
Yield segments of consecutive `True` values in a boolean array Parameters ---------- array : `iterable` An iterable of boolean-castable values. start : `float` The value of the first sample on the indexed axis (e.g.the GPS start time of the array). delta : `float` The step size on the indexed axis (e.g. sample duration). minlen : `int`, optional The minimum number of consecutive `True` values for a segment. Yields ------ segment : `tuple` ``(start + i * delta, start + (i + n) * delta)`` for a sequence of ``n`` consecutive True values starting at position ``i``. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3. The datatype of the values returned will be the larger of the types of ``start`` and ``delta``. Examples -------- >>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1])) [(1, 2), (5, 8), (9, 10)] >>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1] ... start=100., delta=0.1)) [(100.1, 100.2), (100.5, 100.8), (100.9, 101.0)]
4.74381
4.777408
0.992967
from ..segments import DataQualityFlag # format dtype if dtype is None: dtype = self.t0.dtype if isinstance(dtype, numpy.dtype): # use callable dtype dtype = dtype.type start = dtype(self.t0.value) dt = dtype(self.dt.value) # build segmentlists (can use simple objects since DQFlag converts) active = _bool_segments(self.value, start, dt, minlen=int(minlen)) known = [tuple(map(dtype, self.span))] # build flag and return out = DataQualityFlag(name=name or self.name, active=active, known=known, label=label or self.name, description=description) if round: return out.round() return out
def to_dqflag(self, name=None, minlen=1, dtype=None, round=False, label=None, description=None)
Convert this series into a `~gwpy.segments.DataQualityFlag`. Each contiguous set of `True` values are grouped as a `~gwpy.segments.Segment` running from the GPS time the first found `True`, to the GPS time of the next `False` (or the end of the series) Parameters ---------- minlen : `int`, optional minimum number of consecutive `True` values to identify as a `~gwpy.segments.Segment`. This is useful to ignore single bit flips, for example. dtype : `type`, `callable` output segment entry type, can pass either a type for simple casting, or a callable function that accepts a float and returns another numeric type, defaults to the `dtype` of the time index round : `bool`, optional choose to round each `~gwpy.segments.Segment` to its inclusive integer boundaries label : `str`, optional the :attr:`~gwpy.segments.DataQualityFlag.label` for the output flag. description : `str`, optional the :attr:`~gwpy.segments.DataQualityFlag.description` for the output flag. Returns ------- dqflag : `~gwpy.segments.DataQualityFlag` a segment representation of this `StateTimeSeries`, the span defines the `known` segments, while the contiguous `True` sets defined each of the `active` segments
6.630913
5.818784
1.13957
try: return self._bits except AttributeError: if self.dtype.name.startswith(('uint', 'int')): nbits = self.itemsize * 8 self.bits = Bits(['Bit %d' % b for b in range(nbits)], channel=self.channel, epoch=self.epoch) return self.bits elif hasattr(self.channel, 'bits'): self.bits = self.channel.bits return self.bits return None
def bits(self)
list of `Bits` for this `StateVector` :type: `Bits`
4.192141
4.209911
0.995779
try: return self._boolean except AttributeError: nbits = len(self.bits) boolean = numpy.zeros((self.size, nbits), dtype=bool) for i, sample in enumerate(self.value): boolean[i, :] = [int(sample) >> j & 1 for j in range(nbits)] self._boolean = Array2D(boolean, name=self.name, x0=self.x0, dx=self.dx, y0=0, dy=1) return self.boolean
def boolean(self)
A mapping of this `StateVector` to a 2-D array containing all binary bits as booleans, for each time point.
3.64503
3.469272
1.050661
if bits is None: bits = [b for b in self.bits if b not in {None, ''}] bindex = [] for bit in bits: try: bindex.append((self.bits.index(bit), bit)) except (IndexError, ValueError) as exc: exc.args = ('Bit %r not found in StateVector' % bit,) raise self._bitseries = StateTimeSeriesDict() for i, bit in bindex: self._bitseries[bit] = StateTimeSeries( self.value >> i & 1, name=bit, epoch=self.x0.value, channel=self.channel, sample_rate=self.sample_rate) return self._bitseries
def get_bit_series(self, bits=None)
Get the `StateTimeSeries` for each bit of this `StateVector`. Parameters ---------- bits : `list`, optional a list of bit indices or bit names, defaults to all bits Returns ------- bitseries : `StateTimeSeriesDict` a `dict` of `StateTimeSeries`, one for each given bit
4.034274
3.451747
1.168763
return super(StateVector, cls).read(source, *args, **kwargs)
def read(cls, source, *args, **kwargs)
Read data into a `StateVector` Parameters ---------- source : `str`, `list` Source of data, any of the following: - `str` path of single data file, - `str` path of LAL-format cache file, - `list` of paths. channel : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of required data, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS end time of required data, defaults to end of data found; any input parseable by `~gwpy.time.to_gps` is fine bits : `list`, optional list of bits names for this `StateVector`, give `None` at any point in the list to mask that bit format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. nproc : `int`, optional, default: `1` number of parallel processes to use, serial process by default. gap : `str`, optional how to handle gaps in the cache, one of - 'ignore': do nothing, let the undelying reader method handle it - 'warn': do nothing except print a warning to the screen - 'raise': raise an exception upon finding a gap (default) - 'pad': insert a value to fill the gaps pad : `float`, optional value with which to fill gaps in the source data, only used if gap is not given, or `gap='pad'` is given Examples -------- To read the S6 state vector, with names for all the bits:: >>> sv = StateVector.read( 'H-H1_LDAS_C02_L2-968654592-128.gwf', 'H1:IFO-SV_STATE_VECTOR', bits=['Science mode', 'Conlog OK', 'Locked', 'No injections', 'No Excitations'], dtype='uint32') then you can convert these to segments >>> segments = sv.to_dqflags() or to read just the interferometer operations bits:: >>> sv = StateVector.read( 'H-H1_LDAS_C02_L2-968654592-128.gwf', 'H1:IFO-SV_STATE_VECTOR', bits=['Science mode', None, 'Locked'], dtype='uint32') Running `to_dqflags` on this example would only give 2 flags, rather than all five. Alternatively the `bits` attribute can be reset after reading, but before any further operations. Notes -----
5.982738
7.58818
0.788429
from ..segments import DataQualityDict out = DataQualityDict() bitseries = self.get_bit_series(bits=bits) for bit, sts in bitseries.items(): out[bit] = sts.to_dqflag(name=bit, minlen=minlen, round=round, dtype=dtype, description=self.bits.description[bit]) return out
def to_dqflags(self, bits=None, minlen=1, dtype=float, round=False)
Convert this `StateVector` into a `~gwpy.segments.DataQualityDict` The `StateTimeSeries` for each bit is converted into a `~gwpy.segments.DataQualityFlag` with the bits combined into a dict. Parameters ---------- minlen : `int`, optional, default: 1 minimum number of consecutive `True` values to identify as a `Segment`. This is useful to ignore single bit flips, for example. bits : `list`, optional a list of bit indices or bit names to select, defaults to `~StateVector.bits` Returns ------- DataQualityFlag list : `list` a list of `~gwpy.segments.flag.DataQualityFlag` reprensentations for each bit in this `StateVector` See Also -------- :meth:`StateTimeSeries.to_dqflag` for details on the segment representation method for `StateVector` bits
4.985367
4.251984
1.17248
new = cls.DictClass.fetch( [channel], start, end, host=host, port=port, verbose=verbose, connection=connection)[channel] if bits: new.bits = bits return new
def fetch(cls, channel, start, end, bits=None, host=None, port=None, verbose=False, connection=None, type=Nds2ChannelType.any())
Fetch data from NDS into a `StateVector`. Parameters ---------- channel : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of required data, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS end time of required data, any input parseable by `~gwpy.time.to_gps` is fine bits : `Bits`, `list`, optional definition of bits for this `StateVector` host : `str`, optional URL of NDS server to use, defaults to observatory site host port : `int`, optional port number for NDS server query, must be given with `host` verify : `bool`, optional, default: `True` check channels exist in database before asking for data connection : `nds2.connection` open NDS connection to use verbose : `bool`, optional print verbose output about NDS progress type : `int`, optional NDS2 channel type integer dtype : `type`, `numpy.dtype`, `str`, optional identifier for desired output data type
5.071735
9.261074
0.54764
new = cls.DictClass.get([channel], start, end, **kwargs)[channel] if bits: new.bits = bits return new
def get(cls, channel, start, end, bits=None, **kwargs)
Get data for this channel from frames or NDS Parameters ---------- channel : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of required data, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS end time of required data, any input parseable by `~gwpy.time.to_gps` is fine bits : `Bits`, `list`, optional definition of bits for this `StateVector` pad : `float`, optional value with which to fill gaps in the source data, only used if gap is not given, or ``gap='pad'`` is given dtype : `numpy.dtype`, `str`, `type`, or `dict` numeric data type for returned data, e.g. `numpy.float`, or `dict` of (`channel`, `dtype`) pairs nproc : `int`, optional, default: `1` number of parallel processes to use, serial process by default. verbose : `bool`, optional print verbose output about NDS progress. **kwargs other keyword arguments to pass to either :meth:`.find` (for direct GWF file access) or :meth:`.fetch` for remote NDS2 access See Also -------- StateVector.fetch for grabbing data from a remote NDS2 server StateVector.find for discovering and reading data from local GWF files
7.396625
21.463955
0.344607
if format == 'timeseries': return super(StateVector, self).plot(**kwargs) if format == 'segments': from ..plot import Plot kwargs.setdefault('xscale', 'auto-gps') return Plot(*self.to_dqflags(bits=bits).values(), projection='segments', **kwargs) raise ValueError("'format' argument must be one of: 'timeseries' or " "'segments'")
def plot(self, format='segments', bits=None, **kwargs)
Plot the data for this `StateVector` Parameters ---------- format : `str`, optional, default: ``'segments'`` The type of plot to make, either 'segments' to plot the SegmentList for each bit, or 'timeseries' to plot the raw data for this `StateVector` bits : `list`, optional A list of bit indices or bit names, defaults to `~StateVector.bits`. This argument is ignored if ``format`` is not ``'segments'`` **kwargs Other keyword arguments to be passed to either `~gwpy.plot.SegmentAxes.plot` or `~gwpy.plot.Axes.plot`, depending on ``format``. Returns ------- plot : `~gwpy.plot.Plot` output plot object See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.SegmentAxes.plot_flag for documentation of keyword arguments used in rendering each statevector flag.
7.264461
7.315366
0.993041
rate1 = self.sample_rate.value if isinstance(rate, units.Quantity): rate2 = rate.value else: rate2 = float(rate) # upsample if (rate2 / rate1).is_integer(): raise NotImplementedError("StateVector upsampling has not " "been implemented yet, sorry.") # downsample elif (rate1 / rate2).is_integer(): factor = int(rate1 / rate2) # reshape incoming data to one column per new sample newsize = int(self.size / factor) old = self.value.reshape((newsize, self.size // newsize)) # work out number of bits if self.bits: nbits = len(self.bits) else: max_ = self.value.max() nbits = int(ceil(log(max_, 2))) if max_ else 1 bits = range(nbits) # construct an iterator over the columns of the old array itr = numpy.nditer( [old, None], flags=['external_loop', 'reduce_ok'], op_axes=[None, [0, -1]], op_flags=[['readonly'], ['readwrite', 'allocate']]) dtype = self.dtype type_ = self.dtype.type # for each new sample, each bit is logical AND of old samples # bit is ON, for x, y in itr: y[...] = numpy.sum([type_((x >> bit & 1).all() * (2 ** bit)) for bit in bits], dtype=self.dtype) new = StateVector(itr.operands[1], dtype=dtype) new.__metadata_finalize__(self) new._unit = self.unit new.sample_rate = rate2 return new # error for non-integer resampling factors elif rate1 < rate2: raise ValueError("New sample rate must be multiple of input " "series rate if upsampling a StateVector") else: raise ValueError("New sample rate must be divisor of input " "series rate if downsampling a StateVector")
def resample(self, rate)
Resample this `StateVector` to a new rate Because of the nature of a state-vector, downsampling is done by taking the logical 'and' of all original samples in each new sampling interval, while upsampling is achieved by repeating samples. Parameters ---------- rate : `float` rate to which to resample this `StateVector`, must be a divisor of the original sample rate (when downsampling) or a multiple of the original (when upsampling). Returns ------- vector : `StateVector` resampled version of the input `StateVector`
4.932497
4.650179
1.060711
# pylint: disable=redefined-builtin def combiner(listofseglists): out = cls(seg for seglist in listofseglists for seg in seglist) if coalesce: return out.coalesce() return out return io_read_multi(combiner, cls, source, format=format, **kwargs)
def read(cls, source, format=None, coalesce=False, **kwargs)
Read segments from file into a `SegmentList` Parameters ---------- filename : `str` path of file to read format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. coalesce : `bool`, optional if `True` coalesce the segment list before returning, otherwise return exactly as contained in file(s). **kwargs other keyword arguments depend on the format, see the online documentation for details (:ref:`gwpy-segments-io`) Returns ------- segmentlist : `SegmentList` `SegmentList` active and known segments read from file. Notes -----
5.322148
7.039732
0.756016
return io_registry.write(self, target, *args, **kwargs)
def write(self, target, *args, **kwargs)
Write this `SegmentList` to a file Arguments and keywords depend on the output format, see the online documentation for full details for each format. Parameters ---------- target : `str` output filename Notes -----
6.673969
13.615602
0.490171
return EventTable.read(source, 'waveburst', *args, format='root', **kwargs)
def table_from_cwb(source, *args, **kwargs)
Read an `EventTable` from a Coherent WaveBurst ROOT file This function just redirects to the format='root' reader with appropriate defaults.
21.447979
8.365699
2.5638
re_name_def = re.compile( r'^\s*#\s+' # whitespace and comment marker r'(?P<colnumber>[0-9]+)\s+-\s+' # number of column r'(?P<colname>(.*))' ) self.names = [] include_cuts = False for line in lines: if not line: # ignore empty lines in header (windows) continue if not line.startswith('# '): # end of header lines break if line.startswith('# -/+'): include_cuts = True else: match = re_name_def.search(line) if match: self.names.append(match.group('colname').rstrip()) if not self.names: raise core.InconsistentTableError( 'No column names found in cWB header') if include_cuts: self.cols = [ # pylint: disable=attribute-defined-outside-init core.Column(name='selection cut 1'), core.Column(name='selection cut 2'), ] else: self.cols = [] # pylint: disable=attribute-defined-outside-init for name in self.names: col = core.Column(name=name) self.cols.append(col)
def get_cols(self, lines)
Initialize Column objects from a multi-line ASCII header Parameters ---------- lines : `list` List of table lines
3.465862
3.474288
0.997575
if name is None: name = get_backend() backend_name = (name[9:] if name.startswith("module://") else "matplotlib.backends.backend_{}".format(name.lower())) return importlib.import_module(backend_name)
def get_backend_mod(name=None)
Returns the imported module for the given backend name Parameters ---------- name : `str`, optional the name of the backend, defaults to the current backend. Returns ------- backend_mod: `module` the module as returned by :func:`importlib.import_module` Examples -------- >>> from gwpy.plot.plot import get_backend_mod >>> print(get_backend_mod('agg')) <module 'matplotlib.backends.backend_agg' from ... >
4.018173
4.163994
0.964981
# determine auto-separation if separate is None and inputs: # if given a nested list of data, multiple axes are required if any(isinstance(x, iterable_types + (dict,)) for x in inputs): separate = True # if data are of different types, default to separate elif not all(type(x) is type(inputs[0]) for x in inputs): # noqa: E721 separate = True # build list of lists out = [] for x in inputs: if isinstance(x, dict): # unwrap dict x = list(x.values()) # new group from iterable, notes: # the iterable is presumed to be a list of independent data # structures, unless its a list of scalars in which case we # should plot them all as one if ( isinstance(x, (KeysView, ValuesView)) or isinstance(x, (list, tuple)) and ( not x or not numpy.isscalar(x[0])) ): out.append(x) # dataset starts a new group elif separate or not out: out.append([x]) # dataset joins current group else: # append input to most recent group out[-1].append(x) if flat: return [s for group in out for s in group] return out
def _group_axes_data(inputs, separate=None, flat=False)
Determine the number of axes from the input args to this `Plot` Parameters ---------- inputs : `list` of array-like data sets A list of data arrays, or a list of lists of data sets sep : `bool`, optional Plot each set of data on a separate `Axes` flat : `bool`, optional Return a flattened list of data objects Returns ------- axesdata : `list` of lists of array-like data A `list` with one element per required `Axes` containing the array-like data sets for those `Axes`, unless ``flat=True`` is given. Notes ----- The logic for this method is as follows: - if a `list` of data arrays are given, and `separate=False`, use 1 `Axes` - if a `list` of data arrays are given, and `separate=True`, use N `Axes, one for each data array - if a nested `list` of data arrays are given, ignore `sep` and use one `Axes` for each group of arrays. Examples -------- >>> from gwpy.plot import Plot >>> Plot._group_axes_data([1, 2], separate=False) [[1, 2]] >>> Plot._group_axes_data([1, 2], separate=True) [[1], [2]] >>> Plot._group_axes_data([[1, 2], 3]) [[1, 2], [3]]
5.49665
5.788068
0.949652
if isinstance(sharex, bool): sharex = "all" if sharex else "none" if isinstance(sharey, bool): sharey = "all" if sharey else "none" # parse keywords axes_kw = {key: kwargs.pop(key) for key in utils.AXES_PARAMS if key in kwargs} # handle geometry and group axes if geometry is not None and geometry[0] * geometry[1] == len(data): separate = True axes_groups = _group_axes_data(data, separate=separate) if geometry is None: geometry = (len(axes_groups), 1) nrows, ncols = geometry if axes_groups and nrows * ncols != len(axes_groups): # mismatching data and geometry raise ValueError("cannot group data into {0} axes with a " "{1}x{2} grid".format(len(axes_groups), nrows, ncols)) # create grid spec gs = GridSpec(nrows, ncols) axarr = numpy.empty((nrows, ncols), dtype=object) # set default labels defxlabel = 'xlabel' not in axes_kw defylabel = 'ylabel' not in axes_kw flatdata = [s for group in axes_groups for s in group] for axis in ('x', 'y'): unit = _common_axis_unit(flatdata, axis=axis) if unit: axes_kw.setdefault('{}label'.format(axis), unit.to_string('latex_inline_dimensional')) # create axes for each group and draw each data object for group, (row, col) in zip_longest( axes_groups, itertools.product(range(nrows), range(ncols)), fillvalue=[]): # create Axes shared_with = {"none": None, "all": axarr[0, 0], "row": axarr[row, 0], "col": axarr[0, col]} axes_kw["sharex"] = shared_with[sharex] axes_kw["sharey"] = shared_with[sharey] axes_kw['xscale'] = xscale if xscale else _parse_xscale(group) ax = axarr[row, col] = self.add_subplot(gs[row, col], **axes_kw) # plot data plot_func = getattr(ax, method) if method in ('imshow', 'pcolormesh'): for obj in group: plot_func(obj, **kwargs) elif group: plot_func(*group, **kwargs) # set default axis labels for axis, share, pos, n, def_ in ( (ax.xaxis, sharex, row, nrows, defxlabel), (ax.yaxis, sharey, col, ncols, defylabel), ): # hide label if shared axis and not bottom left panel if share == 'all' and pos < n - 1: axis.set_label_text('') # otherwise set default status else: axis.isDefault_label = def_ return self.axes
def _init_axes(self, data, method='plot', xscale=None, sharex=False, sharey=False, geometry=None, separate=None, **kwargs)
Populate this figure with data, creating `Axes` as necessary
3.266866
3.274303
0.997729
for cbar in self.colorbars: cbar.draw_all() self.canvas.draw()
def refresh(self)
Refresh the current figure
6.366663
4.910529
1.296533
# this method tries to reproduce the functionality of pyplot.show, # mainly for user convenience. However, as of matplotlib-3.0.0, # pyplot.show() ends up calling _back_ to Plot.show(), # so we have to be careful not to end up in a recursive loop # # Developer note: if we ever make it pinning to matplotlib >=3.0.0 # this method can likely be completely removed # import inspect try: callframe = inspect.currentframe().f_back except AttributeError: pass else: if 'matplotlib' in callframe.f_code.co_filename: block = False # render super(Plot, self).show(warn=warn) # don't block on ipython with interactive backends if block is None and interactive_backend(): block = not IPYTHON # block in GUI loop (stolen from mpl.backend_bases._Backend.show) if block: backend_mod = get_backend_mod() try: backend_mod.Show().mainloop() except AttributeError: # matplotlib < 2.1.0 backend_mod.show.mainloop()
def show(self, block=None, warn=True)
Display the current figure (if possible). If blocking, this method replicates the behaviour of :func:`matplotlib.pyplot.show()`, otherwise it just calls up to :meth:`~matplotlib.figure.Figure.show`. This method also supports repeatedly showing the same figure, even after closing the display window, which isn't supported by `pyplot.show` (AFAIK). Parameters ---------- block : `bool`, optional open the figure and block until the figure is closed, otherwise open the figure as a detached window, default: `None`. If `None`, block if using an interactive backend and _not_ inside IPython. warn : `bool`, optional print a warning if matplotlib is not running in an interactive backend and cannot display the figure, default: `True`.
6.860847
6.612388
1.037575
from matplotlib.pyplot import close for ax in self.axes[::-1]: # avoid matplotlib/matplotlib#9970 ax.set_xscale('linear') ax.set_yscale('linear') # clear the axes ax.cla() # close the figure close(self)
def close(self)
Close the plot and release its memory.
5.326178
4.73083
1.125844
if projection is None: return self.axes return [ax for ax in self.axes if ax.name == projection.lower()]
def get_axes(self, projection=None)
Find all `Axes`, optionally matching the given projection Parameters ---------- projection : `str` name of axes types to return Returns ------- axlist : `list` of `~matplotlib.axes.Axes`
3.856565
4.624803
0.833887
# pre-process kwargs mappable, kwargs = gcbar.process_colorbar_kwargs( self, mappable, ax, cax=cax, fraction=fraction, **kwargs) # generate colour bar cbar = super(Plot, self).colorbar(mappable, **kwargs) self.colorbars.append(cbar) if label: # mpl<1.3 doesn't accept label in Colorbar constructor cbar.set_label(label) # update mappables for this axis if emit: ax = kwargs.pop('ax') norm = mappable.norm cmap = mappable.get_cmap() for map_ in ax.collections + ax.images: map_.set_norm(norm) map_.set_cmap(cmap) return cbar
def colorbar(self, mappable=None, cax=None, ax=None, fraction=0., label=None, emit=True, **kwargs)
Add a colorbar to the current `Plot` A colorbar must be associated with an `Axes` on this `Plot`, and an existing mappable element (e.g. an image). Parameters ---------- mappable : matplotlib data collection Collection against which to map the colouring cax : `~matplotlib.axes.Axes` Axes on which to draw colorbar ax : `~matplotlib.axes.Axes` Axes relative to which to position colorbar fraction : `float`, optional Fraction of original axes to use for colorbar, give `fraction=0` to not resize the original axes at all. emit : `bool`, optional If `True` update all mappables on `Axes` to match the same colouring as the colorbar. **kwargs other keyword arguments to be passed to the :meth:`~matplotlib.figure.Figure.colorbar` Returns ------- cbar : `~matplotlib.colorbar.Colorbar` the newly added `Colorbar` See Also -------- matplotlib.figure.Figure.colorbar matplotlib.colorbar.Colorbar Examples -------- >>> import numpy >>> from gwpy.plot import Plot To plot a simple image and add a colorbar: >>> plot = Plot() >>> ax = plot.gca() >>> ax.imshow(numpy.random.randn(120).reshape((10, 12))) >>> plot.colorbar(label='Value') >>> plot.show() Colorbars can also be generated by directly referencing the parent axes: >>> Plot = Plot() >>> ax = plot.gca() >>> ax.imshow(numpy.random.randn(120).reshape((10, 12))) >>> ax.colorbar(label='Value') >>> plot.show()
4.24789
4.954125
0.857445
warnings.warn( "{0}.add_colorbar was renamed {0}.colorbar, this warnings will " "result in an error in the future".format(type(self).__name__), DeprecationWarning) return self.colorbar(*args, **kwargs)
def add_colorbar(self, *args, **kwargs)
DEPRECATED, use `Plot.colorbar` instead
5.392192
4.659177
1.157327
# get axes to anchor against if not ax: ax = self.gca() # set options for new axes axes_kw = { 'pad': pad, 'add_to_figure': True, 'sharex': ax if sharex is True else sharex or None, 'axes_class': get_projection_class('segments'), } # map X-axis limit from old axes if axes_kw['sharex'] is ax and not ax.get_autoscalex_on(): axes_kw['xlim'] = ax.get_xlim() # if axes uses GPS scaling, copy the epoch as well try: axes_kw['epoch'] = ax.get_epoch() except AttributeError: pass # add new axes if ax.get_axes_locator(): divider = ax.get_axes_locator()._axes_divider else: from mpl_toolkits.axes_grid1 import make_axes_locatable divider = make_axes_locatable(ax) if location not in {'top', 'bottom'}: raise ValueError("Segments can only be positoned at 'top' or " "'bottom'.") segax = divider.append_axes(location, height, **axes_kw) # update anchor axes if axes_kw['sharex'] is ax and location == 'bottom': # map label segax.set_xlabel(ax.get_xlabel()) segax.xaxis.isDefault_label = ax.xaxis.isDefault_label ax.set_xlabel("") # hide ticks on original axes setp(ax.get_xticklabels(), visible=False) # plot segments segax.plot(segments, **plotargs) segax.grid(b=False, which='both', axis='y') segax.autoscale(axis='y', tight=True) return segax
def add_segments_bar(self, segments, ax=None, height=0.14, pad=0.1, sharex=True, location='bottom', **plotargs)
Add a segment bar `Plot` indicating state information. By default, segments are displayed in a thin horizontal set of Axes sitting immediately below the x-axis of the main, similarly to a colorbar. Parameters ---------- segments : `~gwpy.segments.DataQualityFlag` A data-quality flag, or `SegmentList` denoting state segments about this Plot ax : `Axes`, optional Specific `Axes` relative to which to position new `Axes`, defaults to :func:`~matplotlib.pyplot.gca()` height : `float, `optional Height of the new axes, as a fraction of the anchor axes pad : `float`, optional Padding between the new axes and the anchor, as a fraction of the anchor axes dimension sharex : `True`, `~matplotlib.axes.Axes`, optional Either `True` to set ``sharex=ax`` for the new segment axes, or an `Axes` to use directly location : `str`, optional Location for new segment axes, defaults to ``'bottom'``, acceptable values are ``'top'`` or ``'bottom'``. **plotargs extra keyword arguments are passed to :meth:`~gwpy.plot.SegmentAxes.plot`
4.249253
3.977894
1.068217
warnings.warn('add_state_segments() was renamed add_segments_bar(), ' 'this warning will result in an error in the future', DeprecationWarning) return self.add_segments_bar(*args, **kwargs)
def add_state_segments(self, *args, **kwargs)
DEPRECATED: use :meth:`Plot.add_segments_bar`
5.344138
3.927795
1.360595
# connect if needed if connection is None: if gps is None: gps = from_gps('now') if db is None: db = get_database_names(gps, gps)[0] connection = connect(db=db, **conectkwargs) # query out = query("select channel from job where monitorName = 'chacr'") return [r[0] for r in out]
def get_hacr_channels(db=None, gps=None, connection=None, **conectkwargs)
Return the names of all channels present in the given HACR database
6.186802
5.903428
1.048002
if columns is None: columns = HACR_COLUMNS columns = list(columns) span = Segment(*map(to_gps, (start, end))) # parse selection for SQL query (removing leading 'where ') selectionstr = 'and %s' % format_db_selection(selection, engine=None)[6:] # get database names and loop over each on databases = get_database_names(start, end) rows = [] for db in databases: conn = connect(db, **connectkwargs) cursor = conn.cursor() # find process ID(s) for this channel pids = query("select process_id, gps_start, gps_stop " "from job where monitorName = %r and channel = %r" % (monitor, str(channel)), connection=conn) for p, s, e in pids: # validate this process id if pid is not None and int(p) != int(pid): continue tspan = Segment(float(s), float(e)) if not tspan.intersects(span): continue # execute trigger query q = ('select %s from mhacr where process_id = %d and ' 'gps_start > %s and gps_start < %d %s order by gps_start asc' % (', '.join(columns), int(p), span[0], span[1], selectionstr)) n = cursor.execute(q) if n == 0: continue # get new events, convert to recarray, and append to table rows.extend(cursor.fetchall()) return EventTable(rows=rows, names=columns)
def get_hacr_triggers(channel, start, end, columns=HACR_COLUMNS, pid=None, monitor='chacr', selection=None, **connectkwargs)
Fetch a table of HACR triggers in the given interval
4.75491
4.778098
0.995147
try: import pymysql except ImportError as e: e.args = ('pymysql is required to fetch HACR triggers',) raise return pymysql.connect(host=host, user=user, passwd=passwd, db=db)
def connect(db, host=HACR_DATABASE_SERVER, user=HACR_DATABASE_USER, passwd=HACR_DATABASE_PASSWD)
Connect to the given SQL database
3.58639
3.807062
0.942036
if connection is None: connection = connect(**connectkwargs) cursor = connection.cursor() cursor.execute(querystr) return cursor.fetchall()
def query(querystr, connection=None, **connectkwargs)
Execute a query of the given SQL database
2.200898
2.220838
0.991022
if not analog: if not sample_rate: raise ValueError("Must give sample_rate frequency to display " "digital (analog=False) filter") sample_rate = Quantity(sample_rate, 'Hz').value dt = 2 * pi / sample_rate if not isinstance(frequencies, (type(None), int)): frequencies = numpy.atleast_1d(frequencies).copy() frequencies *= dt # parse filter (without digital conversions) _, fcomp = parse_filter(filter_, analog=False) if analog: lti = signal.lti(*fcomp) else: lti = signal.dlti(*fcomp, dt=dt) # calculate frequency response w, mag, phase = lti.bode(w=frequencies) # convert from decibels if not dB: mag = 10 ** (mag / 10.) # draw mline = self.maxes.plot(w, mag, **kwargs)[0] pline = self.paxes.plot(w, phase, **kwargs)[0] return mline, pline
def add_filter(self, filter_, frequencies=None, dB=True, analog=False, sample_rate=None, **kwargs)
Add a linear time-invariant filter to this BodePlot Parameters ---------- filter_ : `~scipy.signal.lti`, `tuple` the filter to plot, either as a `~scipy.signal.lti`, or a `tuple` with the following number and meaning of elements - 2: (numerator, denominator) - 3: (zeros, poles, gain) - 4: (A, B, C, D) frequencies : `numpy.ndarray`, optional list of frequencies (in Hertz) at which to plot dB : `bool`, optional if `True`, display magnitude in decibels, otherwise display amplitude, default: `True` **kwargs any other keyword arguments accepted by :meth:`~matplotlib.axes.Axes.plot` Returns ------- mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>` the lines drawn for the magnitude and phase of the filter.
4.682327
4.188264
1.117964
# parse spectrum arguments kwargs.setdefault('label', spectrum.name) # get magnitude mag = numpy.absolute(spectrum.value) if dB: mag = to_db(mag) if not power: mag *= 2. # get phase phase = numpy.angle(spectrum.value, deg=True) # plot w = spectrum.frequencies.value mline = self.maxes.plot(w, mag, **kwargs)[0] pline = self.paxes.plot(w, phase, **kwargs)[0] return mline, pline
def add_frequencyseries(self, spectrum, dB=True, power=False, **kwargs)
Plot the magnitude and phase of a complex-valued `FrequencySeries` Parameters ---------- spectrum : `~gwpy.frequencyseries.FrequencySeries` the (complex-valued) `FrequencySeries` to display db : `bool`, optional, default: `True` if `True`, display magnitude in decibels, otherwise display amplitude. power : `bool`, optional, default: `False` give `True` to incidate that ``spectrum`` holds power values, so ``dB = 10 * log(abs(spectrum))``, otherwise ``db = 20 * log(abs(spectrum))``. This argument is ignored if ``db=False``. **kwargs any other keyword arguments accepted by :meth:`~matplotlib.axes.Axes.plot` Returns ------- mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>` the lines drawn for the magnitude and phase of the filter.
4.033703
3.680231
1.096046
out = ChannelList() append = out.append if isinstance(source, FILE_LIKE): close = False else: source = open(source, 'r') close = True try: section = None while True: try: line = next(source) except StopIteration: break if line == '' or line == '\n' or line.startswith('#'): continue elif line.startswith('['): section = line[1:-2] elif line.startswith('{'): append(parse_omega_channel(source, section)) else: raise RuntimeError("Failed to parse Omega config line:\n%s" % line) finally: if close: source.close() return out
def read_omega_scan_config(source)
Parse an Omega-scan configuration file into a `ChannelList` Parameters ---------- source : `str` path of Omega configuration file to parse Returns ------- channels : `ChannelList` the list of channels (in order) as parsed Raises ------ RuntimeError if this method finds a line it cannot parse sensibly
3.090671
3.232265
0.956194
params = OrderedDict() while True: line = next(fobj) if line == '}\n': break key, value = line.split(':', 1) params[key.strip().rstrip()] = omega_param(value) out = Channel(params.get('channelName'), sample_rate=params.get('sampleFrequency'), frametype=params.get('frameType'), frequency_range=params.get('searchFrequencyRange')) out.group = section out.params = params return out
def parse_omega_channel(fobj, section=None)
Parse a `Channel` from an Omega-scan configuration file Parameters ---------- fobj : `file` the open file-like object to parse section : `str` name of section in which this channel should be recorded Returns ------- channel : `Channel` the channel as parsed from this `file`
4.476224
5.127945
0.872908
val = val.strip().rstrip() if val.startswith(('"', "'")): return str(val[1:-1]) if val.startswith('['): return tuple(map(float, val[1:-1].split())) return float(val)
def omega_param(val)
Parse a value from an Omega-scan configuration file This method tries to parse matlab-syntax parameters into a `str`, `float`, or `tuple`
3.868223
3.5919
1.07693
if isinstance(fobj, FILE_LIKE): close = False else: fobj = open(fobj, 'w') close = True try: # print header if header: print('# Q Scan configuration file', file=fobj) print('# Generated with GWpy from a ChannelList', file=fobj) group = None for channel in channellist: # print header if channel.group != group: group = channel.group print('\n[%s]' % group, file=fobj) print("", file=fobj) print_omega_channel(channel, file=fobj) finally: if close: fobj.close()
def write_omega_scan_config(channellist, fobj, header=True)
Write a `ChannelList` to an Omega-pipeline scan configuration file This method is dumb and assumes the channels are sorted in the right order already
3.816325
3.781779
1.009135
print('{', file=file) try: params = channel.params.copy() except AttributeError: params = OrderedDict() params.setdefault('channelName', str(channel)) params.setdefault('alwaysPlotFlag', int(params.pop('important', False))) if channel.frametype: params.setdefault('frameType', channel.frametype) if channel.sample_rate is not None: params.setdefault('sampleFrequency', channel.sample_rate.to('Hz').value) if channel.frequency_range is not None: low, high = channel.frequency_range.to('Hz').value params.setdefault('searchFrequencyRange', (low, high)) if 'qlow' in params or 'qhigh' in params: qlow = params.pop('qlow', 'sqrt(11)') qhigh = params.pop('qhigh', 64) params.setdefault('searchQRange', (qlow, qhigh)) # write params for key in ['channelName', 'frameType']: if key not in params: raise KeyError("No %r defined for %s" % (key, str(channel))) for key, value in params.items(): key = '%s:' % str(key) if isinstance(value, tuple): value = '[%s]' % ' '.join(map(str, value)) elif isinstance(value, float) and value.is_integer(): value = int(value) elif isinstance(value, str): value = repr(value) print(' {0: <30} {1}'.format(key, value), file=file) print('}', file=file)
def print_omega_channel(channel, file=sys.stdout)
Print a `Channel` in Omega-pipeline scan format
2.862173
2.832866
1.010345
if hasattr(channel, 'ndsname'): # gwpy.detector.Channel return channel.ndsname if hasattr(channel, 'channel_type'): # nds2.channel return '%s,%s' % (channel.name, channel.channel_type_to_string(channel.channel_type)) return str(channel)
def _get_nds2_name(channel)
Returns the NDS2-formatted name for a channel Understands how to format NDS name strings from `gwpy.detector.Channel` and `nds2.channel` objects
4.648852
3.117528
1.491198
hosts = [] for host in os.getenv(env).split(','): try: host, port = host.rsplit(':', 1) except ValueError: port = None else: port = int(port) if (host, port) not in hosts: hosts.append((host, port)) return hosts
def parse_nds_env(env='NDSSERVER')
Parse the NDSSERVER environment variable into a list of hosts Parameters ---------- env : `str`, optional environment variable name to use for server order, default ``'NDSSERVER'``. The contents of this variable should be a comma-separated list of `host:port` strings, e.g. ``'nds1.server.com:80,nds2.server.com:80'`` Returns ------- hostiter : `list` of `tuple` a list of (unique) ``(str, int)`` tuples for each host:port pair
2.345187
2.654043
0.883628
hosts = [] # if given environment variable exists, it will contain a # comma-separated list of host:port strings giving the logical ordering if env and os.getenv(env): hosts = parse_nds_env(env) # If that host fails, return the server for this IFO and the backup at CIT if to_gps('now') - to_gps(epoch) > lookback: ifolist = [None, ifo] else: ifolist = [ifo, None] for difo in ifolist: try: host, port = DEFAULT_HOSTS[difo] except KeyError: # unknown default NDS2 host for detector, if we don't have # hosts already defined (either by NDSSERVER or similar) # we should warn the user if not hosts: warnings.warn('No default host found for ifo %r' % ifo) else: if (host, port) not in hosts: hosts.append((host, port)) return list(hosts)
def host_resolution_order(ifo, env='NDSSERVER', epoch='now', lookback=14*86400)
Generate a logical ordering of NDS (host, port) tuples for this IFO Parameters ---------- ifo : `str` prefix for IFO of interest env : `str`, optional environment variable name to use for server order, default ``'NDSSERVER'``. The contents of this variable should be a comma-separated list of `host:port` strings, e.g. ``'nds1.server.com:80,nds2.server.com:80'`` epoch : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS epoch of data requested lookback : `float` duration of spinning-disk cache. This value triggers defaulting to the CIT NDS2 server over those at the LIGO sites Returns ------- hro : `list` of `2-tuples <tuple>` ordered `list` of ``(host, port)`` tuples
8.088875
7.247584
1.116079
import nds2 # pylint: disable=no-member # set default port for NDS1 connections (required, I think) if port is None and NDS1_HOSTNAME.match(host): port = 8088 if port is None: return nds2.connection(host) return nds2.connection(host, port)
def connect(host, port=None)
Open an `nds2.connection` to a given host and port Parameters ---------- host : `str` name of server with which to connect port : `int`, optional connection port Returns ------- connection : `nds2.connection` a new open connection to the given NDS host
6.256545
6.198998
1.009283
try: return connect(host, port) except RuntimeError as exc: if 'Request SASL authentication' not in str(exc): raise warnings.warn('Error authenticating against {0}:{1}'.format(host, port), NDSWarning) kinit() return connect(host, port)
def auth_connect(host, port=None)
Open an `nds2.connection` handling simple authentication errors This method will catch exceptions related to kerberos authentication, and execute a kinit() for the user before attempting to connect again. Parameters ---------- host : `str` name of server with which to connect port : `int`, optional connection port Returns ------- connection : `nds2.connection` a new open connection to the given NDS host
6.196336
5.185379
1.194963
@wraps(func) def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring if kwargs.get('connection', None) is None: try: host = kwargs.pop('host') except KeyError: raise TypeError("one of `connection` or `host` is required " "to query NDS2 server") kwargs['connection'] = auth_connect(host, kwargs.pop('port', None)) return func(*args, **kwargs) return wrapped_func
def open_connection(func)
Decorate a function to create a `nds2.connection` if required
3.400743
2.972252
1.144163
@wraps(func) def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring for kwd, enum_ in (('type', Nds2ChannelType), ('dtype', Nds2DataType)): if kwargs.get(kwd, None) is None: kwargs[kwd] = enum_.any() elif not isinstance(kwargs[kwd], int): kwargs[kwd] = enum_.find(kwargs[kwd]).value return func(*args, **kwargs) return wrapped_func
def parse_nds2_enums(func)
Decorate a function to translate a type string into an integer
3.103806
3.089409
1.00466
@wraps(func) def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring connection = kwargs.get('connection', None) epoch = connection.current_epoch() if connection else None try: return func(*args, **kwargs) finally: if epoch is not None: connection.set_epoch(epoch.gps_start, epoch.gps_stop) return wrapped_func
def reset_epoch(func)
Wrap a function to reset the epoch when finished This is useful for functions that wish to use `connection.set_epoch`.
2.863287
2.470685
1.158904
# pylint: disable=unused-argument,redefined-builtin # set epoch if not isinstance(epoch, tuple): epoch = (epoch or 'All',) connection.set_epoch(*epoch) # format sample_rate as tuple for find_channels call if isinstance(sample_rate, (int, float)): sample_rate = (sample_rate, sample_rate) elif sample_rate is None: sample_rate = tuple() # query for channels out = [] for name in _get_nds2_names(channels): out.extend(_find_channel(connection, name, type, dtype, sample_rate, unique=unique)) return out
def find_channels(channels, connection=None, host=None, port=None, sample_rate=None, type=Nds2ChannelType.any(), dtype=Nds2DataType.any(), unique=False, epoch='ALL')
Query an NDS2 server for channel information Parameters ---------- channels : `list` of `str` list of channel names to query, each can include bash-style globs connection : `nds2.connection`, optional open NDS2 connection to use for query host : `str`, optional name of NDS2 server to query, required if ``connection`` is not given port : `int`, optional port number on host to use for NDS2 connection sample_rate : `int`, `float`, `tuple`, optional a single number, representing a specific sample rate to match, or a tuple representing a ``(low, high)` interval to match type : `int`, optional the NDS2 channel type to match dtype : `int`, optional the NDS2 data type to match unique : `bool`, optional, default: `False` require one (and only one) match per channel epoch : `str`, `tuple` of `int`, optional the NDS epoch to restrict to, either the name of a known epoch, or a 2-tuple of GPS ``[start, stop)`` times Returns ------- channels : `list` of `nds2.channel` list of NDS2 channel objects See also -------- nds2.connection.find_channels for documentation on the underlying query method Examples -------- >>> from gwpy.io.nds2 import find_channels >>> find_channels(['G1:DER_DATA_H'], host='nds.ligo.caltech.edu') [<G1:DER_DATA_H (16384Hz, RDS, FLOAT64)>]
3.512577
4.673117
0.751656