code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# parse arguments qsegs = _parse_query_segments(args, cls.query_dqsegdb) # get server url = kwargs.pop('url', DEFAULT_SEGMENT_SERVER) # parse flag out = cls(name=flag) if out.ifo is None or out.tag is None: raise ValueError("Cannot parse ifo or tag (name) for flag %r" % flag) # process query for start, end in qsegs: # handle infinities if float(end) == +inf: end = to_gps('now').seconds # query try: data = query_segments(flag, int(start), int(end), host=url) except HTTPError as exc: if exc.code == 404: # if not found, annotate flag name exc.msg += ' [{0}]'.format(flag) raise # read from json buffer new = cls.read( BytesIO(json.dumps(data).encode('utf-8')), format='json', ) # restrict to query segments segl = SegmentList([Segment(start, end)]) new.known &= segl new.active &= segl out += new # replace metadata out.description = new.description out.isgood = new.isgood return out
def query_dqsegdb(cls, flag, *args, **kwargs)
Query the advanced LIGO DQSegDB for the given flag Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately.
6.799276
5.816278
1.169008
start = to_gps(start).gpsSeconds end = to_gps(end).gpsSeconds known = [(start, end)] active = timeline.get_segments(flag, start, end, **kwargs) return cls(flag.replace('_', ':', 1), known=known, active=active, label=flag)
def fetch_open_data(cls, flag, start, end, **kwargs)
Fetch Open Data timeline segments into a flag. flag : `str` the name of the flag to query start : `int`, `str` the GPS start time (or parseable date string) to query end : `int`, `str` the GPS end time (or parseable date string) to query verbose : `bool`, optional show verbose download progress, default: `False` timeout : `int`, optional timeout for download (seconds) host : `str`, optional URL of LOSC host, default: ``'losc.ligo.org'`` Returns ------- flag : `DataQualityFlag` a new flag with `active` segments filled from Open Data Examples -------- >>> from gwpy.segments import DataQualityFlag >>> print(DataQualityFlag.fetch_open_data('H1_DATA', 'Jan 1 2010', ... 'Jan 2 2010')) <DataQualityFlag('H1:DATA', known=[[946339215 ... 946425615)], active=[[946340946 ... 946351800) [946356479 ... 946360620) [946362652 ... 946369150) [946372854 ... 946382630) [946395595 ... 946396751) [946400173 ... 946404977) [946412312 ... 946413577) [946415770 ... 946422986)], description=None)>
8.889614
8.91155
0.997539
if 'flag' in kwargs: # pragma: no cover warnings.warn('\'flag\' keyword was renamed \'name\', this ' 'warning will result in an error in the future') kwargs.setdefault('name', kwargs.pop('flags')) coalesce = kwargs.pop('coalesce', False) def combiner(flags): out = flags[0] for flag in flags[1:]: out.known += flag.known out.active += flag.active if coalesce: return out.coalesce() return out return io_read_multi(combiner, cls, source, *args, **kwargs)
def read(cls, source, *args, **kwargs)
Read segments from file into a `DataQualityFlag`. Parameters ---------- filename : `str` path of file to read name : `str`, optional name of flag to read from file, otherwise read all segments. format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. coltype : `type`, optional, default: `float` datatype to force for segment times, only valid for ``format='segwizard'``. strict : `bool`, optional, default: `True` require segment start and stop times match printed duration, only valid for ``format='segwizard'``. coalesce : `bool`, optional if `True` coalesce the all segment lists before returning, otherwise return exactly as contained in file(s). nproc : `int`, optional, default: 1 number of CPUs to use for parallel reading of multiple files verbose : `bool`, optional, default: `False` print a progress bar showing read status Returns ------- dqflag : `DataQualityFlag` formatted `DataQualityFlag` containing the active and known segments read from file. Notes -----
5.465607
5.00454
1.09213
name = '%s:%s' % (veto.ifo, veto.name) try: name += ':%d' % int(veto.version) except TypeError: pass if veto.end_time == 0: veto.end_time = +inf known = Segment(veto.start_time, veto.end_time) pad = (veto.start_pad, veto.end_pad) return cls(name=name, known=[known], category=veto.category, description=veto.comment, padding=pad)
def from_veto_def(cls, veto)
Define a `DataQualityFlag` from a `VetoDef` Parameters ---------- veto : :class:`~ligo.lw.lsctables.VetoDef` veto definition to convert from
4.294533
4.599916
0.933611
tmp = DataQualityDict() tmp[self.name] = self tmp.populate(source=source, segments=segments, pad=pad, **kwargs) return tmp[self.name]
def populate(self, source=DEFAULT_SEGMENT_SERVER, segments=None, pad=True, **kwargs)
Query the segment database for this flag's active segments. This method assumes all of the metadata for each flag have been filled. Minimally, the following attributes must be filled .. autosummary:: ~DataQualityFlag.name ~DataQualityFlag.known Segments will be fetched from the database, with any :attr:`~DataQualityFlag.padding` added on-the-fly. This `DataQualityFlag` will be modified in-place. Parameters ---------- source : `str` source of segments for this flag. This must be either a URL for a segment database or a path to a file on disk. segments : `SegmentList`, optional a list of segments during which to query, if not given, existing known segments for this flag will be used. pad : `bool`, optional, default: `True` apply the `~DataQualityFlag.padding` associated with this flag, default: `True`. **kwargs any other keyword arguments to be passed to :meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`. Returns ------- self : `DataQualityFlag` a reference to this flag
5.223174
6.777109
0.770708
self.active = self.active.contract(x) return self.active
def contract(self, x)
Contract each of the `active` `Segments` by ``x`` seconds. This method adds ``x`` to each segment's lower bound, and subtracts ``x`` from the upper bound. The :attr:`~DataQualityFlag.active` `SegmentList` is modified in place. Parameters ---------- x : `float` number of seconds by which to contract each `Segment`.
7.383153
8.719253
0.846764
self.active = self.active.protract(x) return self.active
def protract(self, x)
Protract each of the `active` `Segments` by ``x`` seconds. This method subtracts ``x`` from each segment's lower bound, and adds ``x`` to the upper bound, while maintaining that each `Segment` stays within the `known` bounds. The :attr:`~DataQualityFlag.active` `SegmentList` is modified in place. Parameters ---------- x : `float` number of seconds by which to protact each `Segment`.
6.2531
9.047935
0.691108
if not args: start, end = self.padding else: start, end = args if kwargs.pop('inplace', False): new = self else: new = self.copy() if kwargs: raise TypeError("unexpected keyword argument %r" % list(kwargs.keys())[0]) new.known = [(s[0]+start, s[1]+end) for s in self.known] new.active = [(s[0]+start, s[1]+end) for s in self.active] return new
def pad(self, *args, **kwargs)
Apply a padding to each segment in this `DataQualityFlag` This method either takes no arguments, in which case the value of the :attr:`~DataQualityFlag.padding` attribute will be used, or two values representing the padding for the start and end of each segment. For both the `start` and `end` paddings, a positive value means pad forward in time, so that a positive `start` pad or negative `end` padding will contract a segment at one or both ends, and vice-versa. This method will apply the same padding to both the `~DataQualityFlag.known` and `~DataQualityFlag.active` lists, but will not :meth:`~DataQualityFlag.coalesce` the result. Parameters ---------- start : `float` padding to apply to the start of the each segment end : `float` padding to apply to the end of each segment inplace : `bool`, optional, default: `False` modify this object in-place, default is `False`, i.e. return a copy of the original object with padded segments Returns ------- paddedflag : `DataQualityFlag` a view of the modified flag
3.051567
2.552804
1.195379
def _round(seg): if contract: # round inwards a = type(seg[0])(ceil(seg[0])) b = type(seg[1])(floor(seg[1])) else: # round outwards a = type(seg[0])(floor(seg[0])) b = type(seg[1])(ceil(seg[1])) if a >= b: # if segment is too short, return 'null' segment return type(seg)(0, 0) # will get coalesced away return type(seg)(a, b) new = self.copy() new.active = type(new.active)(map(_round, new.active)) new.known = type(new.known)(map(_round, new.known)) return new.coalesce()
def round(self, contract=False)
Round this flag to integer segments. Parameters ---------- contract : `bool`, optional if `False` (default) expand each segment to the containing integer boundaries, otherwise contract each segment to the contained boundaries Returns ------- roundedflag : `DataQualityFlag` A copy of the original flag with the `active` and `known` segments padded out to integer boundaries.
3.516087
2.99034
1.175815
self.known = self.known.coalesce() self.active = self.active.coalesce() self.active = (self.known & self.active).coalesce() return self
def coalesce(self)
Coalesce the segments for this flag. This method does two things: - `coalesces <SegmentList.coalesce>` the `~DataQualityFlag.known` and `~DataQualityFlag.active` segment lists - forces the `active` segments to be a proper subset of the `known` segments .. note:: this operations is performed in-place. Returns ------- self a view of this flag, not a copy.
4.553412
3.186819
1.428827
from matplotlib import rcParams from ..plot import Plot if self.label: kwargs.setdefault('label', self.label) elif rcParams['text.usetex']: kwargs.setdefault('label', self.texname) else: kwargs.setdefault('label', self.name) kwargs.update(figsize=figsize, xscale=xscale) return Plot(self, projection='segments', **kwargs)
def plot(self, figsize=(12, 4), xscale='auto-gps', **kwargs)
Plot this flag on a segments projection. Parameters ---------- **kwargs all keyword arguments are passed to the :class:`~gwpy.plot.Plot` constructor. Returns ------- figure : `~matplotlib.figure.Figure` the newly created figure, with populated Axes. See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.SegmentAxes.plot_segmentlist for documentation of keyword arguments used in rendering the data
3.931042
3.499458
1.123329
if name is None: self.ifo = None self.tag = None self.version = None elif re_IFO_TAG_VERSION.match(name): match = re_IFO_TAG_VERSION.match(name).groupdict() self.ifo = match['ifo'] self.tag = match['tag'] self.version = int(match['version']) elif re_IFO_TAG.match(name): match = re_IFO_TAG.match(name).groupdict() self.ifo = match['ifo'] self.tag = match['tag'] self.version = None elif re_TAG_VERSION.match(name): match = re_TAG_VERSION.match(name).groupdict() self.ifo = None self.tag = match['tag'] self.version = int(match['version']) else: raise ValueError("No flag name structure detected in '%s', flags " "should be named as '{ifo}:{tag}:{version}'. " "For arbitrary strings, use the " "`DataQualityFlag.label` attribute" % name) return self.ifo, self.tag, self.version
def _parse_name(self, name)
Internal method to parse a `string` name into constituent `ifo, `name` and `version` components. Parameters ---------- name : `str`, `None` the full name of a `DataQualityFlag` to parse, e.g. ``'H1:DMT-SCIENCE:1'``, or `None` to set all components to `None` Returns ------- (ifo, name, version) A tuple of component string parts Raises ------ `ValueError` If the input ``name`` cannot be parsed into {ifo}:{tag}:{version} format.
2.52389
2.190074
1.152423
warnings.warn("query_segdb is deprecated and will be removed in a " "future release", DeprecationWarning) # parse segments qsegs = _parse_query_segments(args, cls.query_segdb) url = kwargs.pop('url', DEFAULT_SEGMENT_SERVER) if kwargs.pop('on_error', None) is not None: warnings.warn("DataQualityDict.query_segdb doesn't accept the " "on_error keyword argument") if kwargs.keys(): raise TypeError("DataQualityDict.query_segdb has no keyword " "argument '%s'" % list(kwargs.keys()[0])) # process query from glue.segmentdb import (segmentdb_utils as segdb_utils, query_engine as segdb_engine) connection = segdb_utils.setup_database(url) engine = segdb_engine.LdbdQueryEngine(connection) segdefs = [] for flag in flags: dqflag = DataQualityFlag(name=flag) ifo = dqflag.ifo name = dqflag.tag if dqflag.version is None: vers = '*' else: vers = dqflag.version for gpsstart, gpsend in qsegs: if float(gpsend) == +inf: gpsend = to_gps('now').seconds gpsstart = float(gpsstart) if not gpsstart.is_integer(): raise ValueError("Segment database queries can only" "operate on integer GPS times") gpsend = float(gpsend) if not gpsend.is_integer(): raise ValueError("Segment database queries can only" "operate on integer GPS times") segdefs += segdb_utils.expand_version_number( engine, (ifo, name, vers, gpsstart, gpsend, 0, 0)) segs = segdb_utils.query_segments(engine, 'segment', segdefs) segsum = segdb_utils.query_segments(engine, 'segment_summary', segdefs) # build output out = cls() for definition, segments, summary in zip(segdefs, segs, segsum): # parse flag name flag = ':'.join(map(str, definition[:3])) name = flag.rsplit(':', 1)[0] # if versionless if flag.endswith('*'): flag = name key = name # if asked for versionless, but returned a version elif flag not in flags and name in flags: key = name # other else: key = flag # define flag if key not in out: out[key] = DataQualityFlag(name=flag) # add segments out[key].known.extend(summary) out[key].active.extend(segments) return out
def query_segdb(cls, flags, *args, **kwargs)
Query the inital LIGO segment database for a list of flags. Parameters ---------- flags : `iterable` A list of flag names for which to query. *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments. url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flagdict : `DataQualityDict` An ordered `DataQualityDict` of (name, `DataQualityFlag`) pairs.
4.448066
4.057673
1.096211
# check on_error flag on_error = kwargs.pop('on_error', 'raise').lower() if on_error not in ['raise', 'warn', 'ignore']: raise ValueError("on_error must be one of 'raise', 'warn', " "or 'ignore'") # parse segments qsegs = _parse_query_segments(args, cls.query_dqsegdb) # set up threading inq = Queue() outq = Queue() for i in range(len(flags)): t = _QueryDQSegDBThread(inq, outq, qsegs, **kwargs) t.setDaemon(True) t.start() for i, flag in enumerate(flags): inq.put((i, flag)) # capture output inq.join() outq.join() new = cls() results = list(zip(*sorted([outq.get() for i in range(len(flags))], key=lambda x: x[0])))[1] for result, flag in zip(results, flags): if isinstance(result, Exception): result.args = ('%s [%s]' % (str(result), str(flag)),) if on_error == 'ignore': pass elif on_error == 'warn': warnings.warn(str(result)) else: raise result else: new[flag] = result return new
def query_dqsegdb(cls, flags, *args, **kwargs)
Query the advanced LIGO DQSegDB for a list of flags. Parameters ---------- flags : `iterable` A list of flag names for which to query. *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments. on_error : `str` how to handle an error querying for one flag, one of - `'raise'` (default): raise the Exception - `'warn'`: print a warning - `'ignore'`: move onto the next flag as if nothing happened url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flagdict : `DataQualityDict` An ordered `DataQualityDict` of (name, `DataQualityFlag`) pairs.
2.771688
2.608878
1.062406
on_missing = kwargs.pop('on_missing', 'error') coalesce = kwargs.pop('coalesce', False) if 'flags' in kwargs: # pragma: no cover warnings.warn('\'flags\' keyword was renamed \'names\', this ' 'warning will result in an error in the future') names = kwargs.pop('flags') def combiner(inputs): out = cls() # check all names are contained required = set(names or []) found = set(name for dqdict in inputs for name in dqdict) for name in required - found: # validate all names are found once msg = '{!r} not found in any input file'.format(name) if on_missing == 'ignore': continue if on_missing == 'warn': warnings.warn(msg) else: raise ValueError(msg) # combine flags for dqdict in inputs: for flag in dqdict: try: # repeated occurence out[flag].known.extend(dqdict[flag].known) out[flag].active.extend(dqdict[flag].active) except KeyError: # first occurence out[flag] = dqdict[flag] if coalesce: return out.coalesce() return out return io_read_multi(combiner, cls, source, names=names, format=format, on_missing='ignore', **kwargs)
def read(cls, source, names=None, format=None, **kwargs)
Read segments from file into a `DataQualityDict` Parameters ---------- source : `str` path of file to read format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. names : `list`, optional, default: read all names found list of names to read, by default all names are read separately. coalesce : `bool`, optional if `True` coalesce the all segment lists before returning, otherwise return exactly as contained in file(s). nproc : `int`, optional, default: 1 number of CPUs to use for parallel reading of multiple files verbose : `bool`, optional, default: `False` print a progress bar showing read status Returns ------- flagdict : `DataQualityDict` a new `DataQualityDict` of `DataQualityFlag` entries with ``active`` and ``known`` segments seeded from the XML tables in the given file. Notes -----
3.629748
3.380384
1.073768
if format != 'ligolw': raise NotImplementedError("Reading veto definer from non-ligolw " "format file is not currently " "supported") # read veto definer file with get_readable_fileobj(fp, show_progress=False) as fobj: from ..io.ligolw import read_table as read_ligolw_table veto_def_table = read_ligolw_table(fobj, 'veto_definer') if start is not None: start = to_gps(start) if end is not None: end = to_gps(end) # parse flag definitions out = cls() for row in veto_def_table: if ifo and row.ifo != ifo: continue if start and 0 < row.end_time <= start: continue elif start: row.start_time = max(row.start_time, start) if end and row.start_time >= end: continue elif end and not row.end_time: row.end_time = end elif end: row.end_time = min(row.end_time, end) flag = DataQualityFlag.from_veto_def(row) if flag.name in out: out[flag.name].known.extend(flag.known) out[flag.name].known.coalesce() else: out[flag.name] = flag return out
def from_veto_definer_file(cls, fp, start=None, end=None, ifo=None, format='ligolw')
Read a `DataQualityDict` from a LIGO_LW XML VetoDefinerTable. Parameters ---------- fp : `str` path of veto definer file to read start : `~gwpy.time.LIGOTimeGPS`, `int`, optional GPS start time at which to restrict returned flags end : `~gwpy.time.LIGOTimeGPS`, `int`, optional GPS end time at which to restrict returned flags ifo : `str`, optional interferometer prefix whose flags you want to read format : `str`, optional format of file to read, currently only 'ligolw' is supported Returns ------- flags : `DataQualityDict` a `DataQualityDict` of flags parsed from the `veto_def_table` of the input file. Notes ----- This method does not automatically `~DataQualityDict.populate` the `active` segment list of any flags, a separate call should be made for that as follows >>> flags = DataQualityDict.from_veto_definer_file('/path/to/file.xml') >>> flags.populate()
2.920703
2.835326
1.030112
out = cls() id_ = dict() # need to record relative IDs from LIGO_LW # read segment definers and generate DataQualityFlag object for row in segmentdeftable: ifos = sorted(row.instruments) ifo = ''.join(ifos) if ifos else None tag = row.name version = row.version name = ':'.join([str(k) for k in (ifo, tag, version) if k is not None]) if names is None or name in names: out[name] = DataQualityFlag(name) thisid = int(row.segment_def_id) try: id_[name].append(thisid) except (AttributeError, KeyError): id_[name] = [thisid] # verify all requested flags were found for flag in names or []: if flag not in out and on_missing != 'ignore': msg = ("no segment definition found for flag={0!r} in " "file".format(flag)) if on_missing == 'warn': warnings.warn(msg) else: raise ValueError(msg) # parse a table into the target DataQualityDict def _parse_segments(table, listattr): for row in table: for flag in out: # match row ID to list of IDs found for this flag if int(row.segment_def_id) in id_[flag]: getattr(out[flag], listattr).append( Segment(*map(gpstype, row.segment)), ) break # read segment summary table as 'known' _parse_segments(segmentsumtable, "known") # read segment table as 'active' _parse_segments(segmenttable, "active") return out
def from_ligolw_tables(cls, segmentdeftable, segmentsumtable, segmenttable, names=None, gpstype=LIGOTimeGPS, on_missing='error')
Build a `DataQualityDict` from a set of LIGO_LW segment tables Parameters ---------- segmentdeftable : :class:`~ligo.lw.lsctables.SegmentDefTable` the ``segment_definer`` table to read segmentsumtable : :class:`~ligo.lw.lsctables.SegmentSumTable` the ``segment_summary`` table to read segmenttable : :class:`~ligo.lw.lsctables.SegmentTable` the ``segment`` table to read names : `list` of `str`, optional a list of flag names to read, defaults to returning all gpstype : `type`, `callable`, optional class to use for GPS times in returned objects, can be a function to convert GPS time to something else, default is `~gwpy.time.LIGOTimeGPS` on_missing : `str`, optional action to take when a one or more ``names`` are not found in the ``segment_definer`` table, one of - ``'ignore'`` : do nothing - ``'warn'`` : print a warning - ``error'`` : raise a `ValueError` Returns ------- dqdict : `DataQualityDict` a dict of `DataQualityFlag` objects populated from the LIGO_LW tables
4.721519
4.303239
1.097201
if ilwdchar_compat is None: warnings.warn("ilwdchar_compat currently defaults to `True`, " "but this will change to `False` in the future, to " "maintain compatibility in future releases, " "manually specify `ilwdchar_compat=True`", PendingDeprecationWarning) ilwdchar_compat = True if ilwdchar_compat: from glue.ligolw import lsctables else: from ligo.lw import lsctables from ..io.ligolw import to_table_type as to_ligolw_table_type SegmentDefTable = lsctables.SegmentDefTable SegmentSumTable = lsctables.SegmentSumTable SegmentTable = lsctables.SegmentTable segdeftab = lsctables.New(SegmentDefTable) segsumtab = lsctables.New(SegmentSumTable) segtab = lsctables.New(SegmentTable) def _write_attrs(table, row): for key, val in attrs.items(): setattr(row, key, to_ligolw_table_type(val, table, key)) # write flags to tables for flag in self.values(): # segment definer segdef = segdeftab.RowType() for col in segdeftab.columnnames: # default all columns to None setattr(segdef, col, None) segdef.instruments = {flag.ifo} segdef.name = flag.tag segdef.version = flag.version segdef.comment = flag.description segdef.insertion_time = to_gps(datetime.datetime.now()).gpsSeconds segdef.segment_def_id = SegmentDefTable.get_next_id() _write_attrs(segdeftab, segdef) segdeftab.append(segdef) # write segment summary (known segments) for vseg in flag.known: segsum = segsumtab.RowType() for col in segsumtab.columnnames: # default columns to None setattr(segsum, col, None) segsum.segment_def_id = segdef.segment_def_id segsum.segment = map(LIGOTimeGPS, vseg) segsum.comment = None segsum.segment_sum_id = SegmentSumTable.get_next_id() _write_attrs(segsumtab, segsum) segsumtab.append(segsum) # write segment table (active segments) for aseg in flag.active: seg = segtab.RowType() for col in segtab.columnnames: # default all columns to None setattr(seg, col, None) seg.segment_def_id = segdef.segment_def_id seg.segment = map(LIGOTimeGPS, aseg) seg.segment_id = SegmentTable.get_next_id() _write_attrs(segtab, seg) segtab.append(seg) return segdeftab, segsumtab, segtab
def to_ligolw_tables(self, ilwdchar_compat=None, **attrs)
Convert this `DataQualityDict` into a trio of LIGO_LW segment tables Parameters ---------- ilwdchar_compat : `bool`, optional whether to write in the old format, compatible with ILWD characters (`True`), or to use the new format (`False`); the current default is `True` to maintain backwards compatibility, but this will change for gwpy-1.0.0. **attrs other attributes to add to all rows in all tables (e.g. ``'process_id'``) Returns ------- segmentdeftable : :class:`~ligo.lw.lsctables.SegmentDefTable` the ``segment_definer`` table segmentsumtable : :class:`~ligo.lw.lsctables.SegmentSumTable` the ``segment_summary`` table segmenttable : :class:`~ligo.lw.lsctables.SegmentTable` the ``segment`` table
2.85946
2.616174
1.092993
# check on_error flag if on_error not in ['raise', 'warn', 'ignore']: raise ValueError("on_error must be one of 'raise', 'warn', " "or 'ignore'") # format source source = urlparse(source) # perform query for all segments if source.netloc and segments is not None: segments = SegmentList(map(Segment, segments)) tmp = type(self).query(self.keys(), segments, url=source.geturl(), on_error=on_error, **kwargs) elif not source.netloc: tmp = type(self).read(source.geturl(), **kwargs) # apply padding and wrap to given known segments for key in self: if segments is None and source.netloc: try: tmp = {key: self[key].query( self[key].name, self[key].known, **kwargs)} except URLError as exc: if on_error == 'ignore': pass elif on_error == 'warn': warnings.warn('Error querying for %s: %s' % (key, exc)) else: raise continue self[key].known &= tmp[key].known self[key].active = tmp[key].active if pad: self[key] = self[key].pad(inplace=True) if segments is not None: self[key].known &= segments self[key].active &= segments return self
def populate(self, source=DEFAULT_SEGMENT_SERVER, segments=None, pad=True, on_error='raise', **kwargs)
Query the segment database for each flag's active segments. This method assumes all of the metadata for each flag have been filled. Minimally, the following attributes must be filled .. autosummary:: ~DataQualityFlag.name ~DataQualityFlag.known Segments will be fetched from the database, with any :attr:`~DataQualityFlag.padding` added on-the-fly. Entries in this dict will be modified in-place. Parameters ---------- source : `str` source of segments for this flag. This must be either a URL for a segment database or a path to a file on disk. segments : `SegmentList`, optional a list of known segments during which to query, if not given, existing known segments for flags will be used. pad : `bool`, optional, default: `True` apply the `~DataQualityFlag.padding` associated with each flag, default: `True`. on_error : `str` how to handle an error querying for one flag, one of - `'raise'` (default): raise the Exception - `'warn'`: print a warning - `'ignore'`: move onto the next flag as if nothing happened **kwargs any other keyword arguments to be passed to :meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`. Returns ------- self : `DataQualityDict` a reference to the modified DataQualityDict
3.554698
3.27862
1.084206
if deep: return deepcopy(self) return super(DataQualityDict, self).copy()
def copy(self, deep=False)
Build a copy of this dictionary. Parameters ---------- deep : `bool`, optional, default: `False` perform a deep copy of the original dictionary with a fresh memory address Returns ------- flag2 : `DataQualityFlag` a copy of the original dictionary
9.983108
8.403572
1.18796
usegs = reduce(operator.or_, self.values()) usegs.name = ' | '.join(self.keys()) return usegs
def union(self)
Return the union of all flags in this dict Returns ------- union : `DataQualityFlag` a new `DataQualityFlag` who's active and known segments are the union of those of the values of this dict
9.02118
8.972247
1.005454
isegs = reduce(operator.and_, self.values()) isegs.name = ' & '.join(self.keys()) return isegs
def intersection(self)
Return the intersection of all flags in this dict Returns ------- intersection : `DataQualityFlag` a new `DataQualityFlag` who's active and known segments are the intersection of those of the values of this dict
8.556407
7.265306
1.177708
# make plot from ..plot import Plot plot = Plot(self, projection='segments', **kwargs) # update labels artists = [x for ax in plot.axes for x in ax.collections] for key, artist in zip(self, artists): if label.lower() == 'name': lab = self[key].name elif label.lower() != 'key': lab = key else: lab = label artist.set_label(lab) return plot
def plot(self, label='key', **kwargs)
Plot this flag on a segments projection. Parameters ---------- label : `str`, optional Labelling system to use, or fixed label for all flags, special values include - ``'key'``: use the key of the `DataQualityDict`, - ``'name'``: use the :attr:`~DataQualityFlag.name` of the flag If anything else, that fixed label will be used for all lines. **kwargs all keyword arguments are passed to the :class:`~gwpy.plot.Plot` constructor. Returns ------- figure : `~matplotlib.figure.Figure` the newly created figure, with populated Axes. See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.SegmentAxes.plot_segmentlist for documentation of keyword arguments used in rendering the data
4.477857
4.124324
1.085719
# if user didn't specify to use tex or not, guess based on # the `GWPY_USETEX` environment variable, or whether tex is # installed at all. if usetex is None: usetex = bool_env( 'GWPY_USETEX', default=rcParams['text.usetex'] or tex.has_tex()) # build RcParams from matplotlib.rcParams with GWpy extras rcp = GWPY_RCPARAMS.copy() if usetex: rcp.update(GWPY_TEX_RCPARAMS) return rcp
def rc_params(usetex=None)
Returns a new `matplotlib.RcParams` with updated GWpy parameters The updated parameters are globally stored as `gwpy.plot.rc.GWPY_RCPARAMS`, with the updated TeX parameters as `gwpy.plot.rc.GWPY_TEX_RCPARAMS`. .. note:: This function doesn't apply the new `RcParams` in any way, just creates something that can be used to set `matplotlib.rcParams`. Parameters ---------- usetex : `bool`, `None` value to set for `text.usetex`; if `None` determine automatically using the ``GWPY_USETEX`` environment variable, and whether `tex` is available on the system. If `True` is given (or determined) a number of other parameters are updated to improve TeX formatting. Examples -------- >>> import matplotlib >>> from gwpy.plot.rc import rc_params as gwpy_rc_params() >>> matplotlib.rcParams.update(gwpy_rc_params(usetex=False))
5.105885
4.44252
1.149322
width, height, = figsize try: left, right = SUBPLOT_WIDTH[width] except KeyError: left = right = None try: bottom, top = SUBPLOT_HEIGHT[height] except KeyError: bottom = top = None return SubplotParams(left=left, bottom=bottom, right=right, top=top)
def get_subplot_params(figsize)
Return sensible default `SubplotParams` for a figure of the given size Parameters ---------- figsize : `tuple` of `float` the ``(width, height)`` figure size (inches) Returns ------- params : `~matplotlib.figure.SubplotParams` formatted set of subplot parameters
2.915811
3.486634
0.836283
usetex = rcParams['text.usetex'] if isinstance(input_, units.UnitBase): return input_.to_string('latex_inline') if isinstance(input_, (float, int)) and usetex: return tex.float_to_latex(input_) if usetex: return tex.label_to_latex(input_) return str(input_)
def to_string(input_)
Format an input for representation as text This method is just a convenience that handles default LaTeX formatting
3.55622
3.401293
1.04555
if not axis.isDefault_label: return label = axis.set_label_text(unit.to_string('latex_inline_dimensional')) axis.isDefault_label = True return label.get_text()
def default_unit_label(axis, unit)
Set default label for an axis from a `~astropy.units.Unit` If the axis already has a label, this function does nothing. Parameters ---------- axis : `~matplotlib.axis.Axis` the axis to manipulate unit : `~astropy.units.Unit` the unit to use for the label Returns ------- text : `str`, `None` the text for the new label, if set, otherwise `None`
7.319254
6.745837
1.085003
app.config.setdefault('LDAP_HOST', 'localhost') app.config.setdefault('LDAP_PORT', 389) app.config.setdefault('LDAP_SCHEMA', 'ldap') app.config.setdefault('LDAP_USERNAME', None) app.config.setdefault('LDAP_PASSWORD', None) app.config.setdefault('LDAP_TIMEOUT', 10) app.config.setdefault('LDAP_USE_SSL', False) app.config.setdefault('LDAP_USE_TLS', False) app.config.setdefault('LDAP_REQUIRE_CERT', False) app.config.setdefault('LDAP_CERT_PATH', '/path/to/cert') app.config.setdefault('LDAP_BASE_DN', None) app.config.setdefault('LDAP_OBJECTS_DN', 'distinguishedName') app.config.setdefault('LDAP_USER_FIELDS', []) app.config.setdefault('LDAP_USER_OBJECT_FILTER', '(&(objectclass=Person)(userPrincipalName=%s))') app.config.setdefault('LDAP_USER_GROUPS_FIELD', 'memberOf') app.config.setdefault('LDAP_GROUP_FIELDS', []) app.config.setdefault('LDAP_GROUP_OBJECT_FILTER', '(&(objectclass=Group)(userPrincipalName=%s))') app.config.setdefault('LDAP_GROUP_MEMBERS_FIELD', 'member') app.config.setdefault('LDAP_LOGIN_VIEW', 'login') app.config.setdefault('LDAP_REALM_NAME', 'LDAP authentication') app.config.setdefault('LDAP_OPENLDAP', False) app.config.setdefault('LDAP_GROUP_MEMBER_FILTER', '*') app.config.setdefault('LDAP_GROUP_MEMBER_FILTER_FIELD', '*') app.config.setdefault('LDAP_CUSTOM_OPTIONS', None) if app.config['LDAP_USE_SSL'] or app.config['LDAP_USE_TLS']: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) if app.config['LDAP_REQUIRE_CERT']: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND) ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, current_app.config['LDAP_CERT_PATH']) for option in ['USERNAME', 'PASSWORD', 'BASE_DN']: if app.config['LDAP_{0}'.format(option)] is None: raise LDAPException('LDAP_{0} cannot be None!'.format(option))
def init_app(app)
Initialize the `app` for use with this :class:`~LDAP`. This is called automatically if `app` is passed to :meth:`~LDAP.__init__`. :param flask.Flask app: the application to configure for use with this :class:`~LDAP`
1.869191
1.865719
1.001861
try: conn = ldap.initialize('{0}://{1}:{2}'.format( current_app.config['LDAP_SCHEMA'], current_app.config['LDAP_HOST'], current_app.config['LDAP_PORT'])) conn.set_option(ldap.OPT_NETWORK_TIMEOUT, current_app.config['LDAP_TIMEOUT']) conn = self._set_custom_options(conn) conn.protocol_version = ldap.VERSION3 if current_app.config['LDAP_USE_TLS']: conn.start_tls_s() return conn except ldap.LDAPError as e: raise LDAPException(self.error(e.args))
def initialize(self)
Initialize a connection to the LDAP server. :return: LDAP connection object.
2.529555
2.309316
1.09537
conn = self.initialize try: conn.simple_bind_s( current_app.config['LDAP_USERNAME'], current_app.config['LDAP_PASSWORD']) return conn except ldap.LDAPError as e: raise LDAPException(self.error(e.args))
def bind(self)
Attempts to bind to the LDAP server using the credentials of the service account. :return: Bound LDAP connection object if successful or ``None`` if unsuccessful.
5.415913
4.525852
1.196662
user_dn = self.get_object_details(user=username, dn_only=True) if user_dn is None: return try: conn = self.initialize conn.simple_bind_s(user_dn.decode('utf-8'), password) return True except ldap.LDAPError: return
def bind_user(self, username, password)
Attempts to bind a user to the LDAP server using the credentials supplied. .. note:: Many LDAP servers will grant anonymous access if ``password`` is the empty string, causing this method to return :obj:`True` no matter what username is given. If you want to use this method to validate a username and password, rather than actually connecting to the LDAP server as a particular user, make sure ``password`` is not empty. :param str username: The username to attempt to bind with. :param str password: The password of the username we're attempting to bind with. :return: Returns ``True`` if successful or ``None`` if the credentials are invalid.
4.646116
4.321557
1.075102
query = None fields = None if user is not None: if not dn_only: fields = current_app.config['LDAP_USER_FIELDS'] query = ldap_filter.filter_format( current_app.config['LDAP_USER_OBJECT_FILTER'], (user,)) elif group is not None: if not dn_only: fields = current_app.config['LDAP_GROUP_FIELDS'] query = ldap_filter.filter_format( current_app.config['LDAP_GROUP_OBJECT_FILTER'], (group,)) conn = self.bind try: records = conn.search_s(current_app.config['LDAP_BASE_DN'], ldap.SCOPE_SUBTREE, query, fields) conn.unbind_s() result = {} if records: if dn_only: if current_app.config['LDAP_OPENLDAP']: if records: return records[0][0] else: if current_app.config['LDAP_OBJECTS_DN'] \ in records[0][1]: dn = records[0][1][ current_app.config['LDAP_OBJECTS_DN']] return dn[0] for k, v in list(records[0][1].items()): result[k] = v return result except ldap.LDAPError as e: raise LDAPException(self.error(e.args))
def get_object_details(self, user=None, group=None, dn_only=False)
Returns a ``dict`` with the object's (user or group) details. :param str user: Username of the user object you want details for. :param str group: Name of the group object you want details for. :param bool dn_only: If we should only retrieve the object's distinguished name or not. Default: ``False``.
2.335106
2.363785
0.987867
conn = self.bind try: if current_app.config['LDAP_OPENLDAP']: fields = \ [str(current_app.config['LDAP_GROUP_MEMBER_FILTER_FIELD'])] records = conn.search_s( current_app.config['LDAP_BASE_DN'], ldap.SCOPE_SUBTREE, ldap_filter.filter_format( current_app.config['LDAP_GROUP_MEMBER_FILTER'], (self.get_object_details(user, dn_only=True),)), fields) else: records = conn.search_s( current_app.config['LDAP_BASE_DN'], ldap.SCOPE_SUBTREE, ldap_filter.filter_format( current_app.config['LDAP_USER_OBJECT_FILTER'], (user,)), [current_app.config['LDAP_USER_GROUPS_FIELD']]) conn.unbind_s() if records: if current_app.config['LDAP_OPENLDAP']: group_member_filter = \ current_app.config['LDAP_GROUP_MEMBER_FILTER_FIELD'] if sys.version_info[0] > 2: groups = [record[1][group_member_filter][0].decode( 'utf-8') for record in records] else: groups = [record[1][group_member_filter][0] for record in records] return groups else: if current_app.config['LDAP_USER_GROUPS_FIELD'] in \ records[0][1]: groups = records[0][1][ current_app.config['LDAP_USER_GROUPS_FIELD']] result = [re.findall(b'(?:cn=|CN=)(.*?),', group)[0] for group in groups] if sys.version_info[0] > 2: result = [r.decode('utf-8') for r in result] return result except ldap.LDAPError as e: raise LDAPException(self.error(e.args))
def get_user_groups(self, user)
Returns a ``list`` with the user's groups or ``None`` if unsuccessful. :param str user: User we want groups for.
2.299793
2.319757
0.991394
conn = self.bind try: records = conn.search_s( current_app.config['LDAP_BASE_DN'], ldap.SCOPE_SUBTREE, ldap_filter.filter_format( current_app.config['LDAP_GROUP_OBJECT_FILTER'], (group,)), [current_app.config['LDAP_GROUP_MEMBERS_FIELD']]) conn.unbind_s() if records: if current_app.config['LDAP_GROUP_MEMBERS_FIELD'] in \ records[0][1]: members = records[0][1][ current_app.config['LDAP_GROUP_MEMBERS_FIELD']] if sys.version_info[0] > 2: members = [m.decode('utf-8') for m in members] return members except ldap.LDAPError as e: raise LDAPException(self.error(e.args))
def get_group_members(self, group)
Returns a ``list`` with the group's members or ``None`` if unsuccessful. :param str group: Group we want users for.
2.788278
2.945706
0.946557
@wraps(func) def wrapped(*args, **kwargs): if g.user is None: return redirect(url_for(current_app.config['LDAP_LOGIN_VIEW'], next=request.path)) return func(*args, **kwargs) return wrapped
def login_required(func)
When applied to a view function, any unauthenticated requests will be redirected to the view named in LDAP_LOGIN_VIEW. Authenticated requests do NOT require membership from a specific group. The login view is responsible for asking for credentials, checking them, and setting ``flask.g.user`` to the name of the authenticated user if the credentials are acceptable. :param func: The view function to decorate.
2.694555
2.50895
1.073977
def wrapper(func): @wraps(func) def wrapped(*args, **kwargs): if g.user is None: return redirect( url_for(current_app.config['LDAP_LOGIN_VIEW'], next=request.path)) match = [group for group in groups if group in g.ldap_groups] if not match: abort(401) return func(*args, **kwargs) return wrapped return wrapper
def group_required(groups=None)
When applied to a view function, any unauthenticated requests will be redirected to the view named in LDAP_LOGIN_VIEW. Authenticated requests are only permitted if they belong to one of the listed groups. The login view is responsible for asking for credentials, checking them, and setting ``flask.g.user`` to the name of the authenticated user and ``flask.g.ldap_groups`` to the authenticated user's groups if the credentials are acceptable. :param list groups: List of groups that should be able to access the view function.
2.921716
2.643202
1.10537
def make_auth_required_response(): response = make_response('Unauthorized', 401) response.www_authenticate.set_basic( current_app.config['LDAP_REALM_NAME']) return response @wraps(func) def wrapped(*args, **kwargs): if request.authorization is None: req_username = None req_password = None else: req_username = request.authorization.username req_password = request.authorization.password # Many LDAP servers will grant you anonymous access if you log in # with an empty password, even if you supply a non-anonymous user # ID, causing .bind_user() to return True. Therefore, only accept # non-empty passwords. if req_username in ['', None] or req_password in ['', None]: current_app.logger.debug('Got a request without auth data') return make_auth_required_response() if not self.bind_user(req_username, req_password): current_app.logger.debug('User {0!r} gave wrong ' 'password'.format(req_username)) return make_auth_required_response() g.ldap_username = req_username g.ldap_password = req_password return func(*args, **kwargs) return wrapped
def basic_auth_required(self, func)
When applied to a view function, any unauthenticated requests are asked to authenticate via HTTP's standard Basic Authentication system. Requests with credentials are checked with :meth:`.bind_user()`. The user's browser will typically show them the contents of LDAP_REALM_NAME as a prompt for which username and password to enter. If the request's credentials are accepted by the LDAP server, the username is stored in ``flask.g.ldap_username`` and the password in ``flask.g.ldap_password``. :param func: The view function to decorate.
3.305518
3.085252
1.071393
login_url = "https://www.duolingo.com/login" data = {"login": self.username, "password": self.password} request = self._make_req(login_url, data) attempt = request.json() if attempt.get('response') == 'OK': self.jwt = request.headers['jwt'] return True raise Exception("Login failed")
def _login(self)
Authenticate through ``https://www.duolingo.com/login``.
4.048581
3.35078
1.20825
if before: url = "https://www.duolingo.com/stream/{}?before={}" url = url.format(self.user_data.id, before) else: url = "https://www.duolingo.com/activity/{}" url = url.format(self.user_data.id) request = self._make_req(url) try: return request.json() except: raise Exception('Could not get activity stream')
def get_activity_stream(self, before=None)
Get user's activity stream from ``https://www.duolingo.com/stream/<user_id>?before=<date> if before date is given or else ``https://www.duolingo.com/activity/<user_id>`` :param before: Datetime in format '2015-07-06 05:42:24' :type before: str :rtype: dict
2.986134
2.282819
1.308091
if unit: url = 'https://www.duolingo.com/friendships/leaderboard_activity?unit={}&_={}' else: raise Exception('Needs unit as argument (week or month)') if before: url = url.format(unit, before) else: raise Exception('Needs str in Datetime format "%Y.%m.%d %H:%M:%S"') self.leader_data = self._make_req(url).json() data = [] for result in iter(self.get_friends()): for value in iter(self.leader_data['ranking']): if result['id'] == int(value): temp = {'points': int(self.leader_data['ranking'][value]), 'unit': unit, 'id': result['id'], 'username': result['username']} data.append(temp) return sorted(data, key=lambda user: user['points'], reverse=True)
def get_leaderboard(self, unit=None, before=None)
Get user's rank in the week in descending order, stream from ``https://www.duolingo.com/friendships/leaderboard_activity?unit=week&_=time :param before: Datetime in format '2015-07-06 05:42:24' :param unit: maybe week or month :type before: str :type unit: str :rtype: List
4.12453
3.090018
1.334792
url = 'https://www.duolingo.com/2017-06-30/users/{}/purchase-store-item' url = url.format(self.user_data.id) data = {'name': item_name, 'learningLanguage': abbr} request = self._make_req(url, data) if request.status_code == 400 and request.json()['error'] == 'ALREADY_HAVE_STORE_ITEM': raise AlreadyHaveStoreItemException('Already equipped with ' + item_name + '.') if not request.ok: # any other error: raise Exception('Not possible to buy item.')
def buy_item(self, item_name, abbr)
status code '200' indicates that the item was purchased returns a text like: {"streak_freeze":"2017-01-10 02:39:59.594327"}
5.462501
5.672073
0.963052
lang = self.get_abbreviation_of(self.get_user_info()['learning_language_string']) if lang is None: raise Exception('No learning language found') try: self.buy_item('streak_freeze', lang) return True except AlreadyHaveStoreItemException: return False
def buy_streak_freeze(self)
figure out the users current learning language use this one as parameter for the shop
7.6926
5.563057
1.382801
data = {"learning_language": lang} url = "https://www.duolingo.com/switch_language" request = self._make_req(url, data) try: parse = request.json()['tracking_properties'] if parse['learning_language'] == lang: self.user_data = Struct(**self._get_data()) except: raise Exception('Failed to switch language')
def _switch_language(self, lang)
Change the learned language with ``https://www.duolingo.com/switch_language``. :param lang: Wanted language abbreviation (example: ``'fr'``) :type lang: str
6.578439
5.291324
1.24325
# Key skills by first dependency. Dependency sets can be uniquely # identified by one dependency in the set. dependency_to_skill = MultiDict([(skill['dependencies_name'][0] if skill['dependencies_name'] else '', skill) for skill in skills]) # Start with the first skill and trace the dependency graph through # skill, setting the order it was learned in. index = 0 previous_skill = '' while True: for skill in dependency_to_skill.getlist(previous_skill): skill['dependency_order'] = index index += 1 # Figure out the canonical dependency for the next set of skills. skill_names = set([skill['name'] for skill in dependency_to_skill.getlist(previous_skill)]) canonical_dependency = skill_names.intersection( set(dependency_to_skill.keys())) if canonical_dependency: previous_skill = canonical_dependency.pop() else: # Nothing depends on these skills, so we're done. break return skills
def _compute_dependency_order(skills)
Add a field to each skill indicating the order it was learned based on the skill's dependencies. Multiple skills will have the same position if they have the same dependencies.
4.869784
4.627893
1.052268
data = [] for lang in self.user_data.languages: if lang['learning']: if abbreviations: data.append(lang['language']) else: data.append(lang['language_string']) return data
def get_languages(self, abbreviations=False)
Get praticed languages. :param abbreviations: Get language as abbreviation or not :type abbreviations: bool :return: List of languages :rtype: list of str
4.598814
4.504119
1.021024
for language in self.user_data.languages: if language['language'] == abbr: return language['language_string'] return None
def get_language_from_abbr(self, abbr)
Get language full name from abbreviation.
5.146132
4.452384
1.155815
for language in self.user_data.languages: if language['language_string'] == name: return language['language'] return None
def get_abbreviation_of(self, name)
Get abbreviation of a language.
7.284907
5.596901
1.301597
for lang in self.user_data.languages: if language == lang['language_string']: return lang return {}
def get_language_details(self, language)
Get user's status about a language.
7.776855
6.355097
1.223719
for certificate in self.user_data.certificates: certificate['datetime'] = certificate['datetime'].strip() return self.user_data.certificates
def get_certificates(self)
Get user's certificates.
7.572959
5.82658
1.299726
if language_abbr: if not self._is_current_language(language_abbr): self._switch_language(language_abbr) return self.user_data.language_data[language_abbr]['calendar'] else: return self.user_data.calendar
def get_calendar(self, language_abbr=None)
Get user's last actions.
3.02585
2.962594
1.021352
if not self._is_current_language(lang): self._switch_language(lang) fields = ['streak', 'language_string', 'level_progress', 'num_skills_learned', 'level_percent', 'level_points', 'points_rank', 'next_level', 'level_left', 'language', 'points', 'fluency_score', 'level'] return self._make_dict(fields, self.user_data.language_data[lang])
def get_language_progress(self, lang)
Get informations about user's progression in a language.
6.844955
6.494622
1.053942
for k, v in iter(self.user_data.language_data.items()): data = [] for friend in v['points_ranking_data']: temp = {'username': friend['username'], 'id': friend['id'], 'points': friend['points_data']['total'], 'languages': [i['language_string'] for i in friend['points_data']['languages']]} data.append(temp) return data
def get_friends(self)
Get user's friends.
5.203521
4.918918
1.057859
words = [] for topic in self.user_data.language_data[lang]['skills']: if topic['learned']: words += topic['words'] return set(words)
def get_known_words(self, lang)
Get a list of all words learned by user in a language.
7.144821
5.691968
1.255246
skills = [skill for skill in self.user_data.language_data[lang]['skills']] self._compute_dependency_order(skills) return [skill for skill in sorted(skills, key=lambda skill: skill['dependency_order']) if skill['learned']]
def get_learned_skills(self, lang)
Return the learned skill objects sorted by the order they were learned in.
6.098707
5.451929
1.118633
return [topic['title'] for topic in self.user_data.language_data[lang]['skills'] if topic['learned']]
def get_known_topics(self, lang)
Return the topics learned by a user in a language.
9.143404
6.438405
1.420135
return [topic['title'] for topic in self.user_data.language_data[lang]['skills'] if not topic['learned']]
def get_unknown_topics(self, lang)
Return the topics remaining to learn by a user in a language.
8.833691
5.894856
1.498542
return [topic['title'] for topic in self.user_data.language_data[lang]['skills'] if topic['learned'] and topic['strength'] == 1.0]
def get_golden_topics(self, lang)
Return the topics mastered ("golden") by a user in a language.
7.565141
6.711463
1.127197
return [topic['title'] for topic in self.user_data.language_data[lang]['skills'] if topic['learned'] and topic['strength'] < 1.0]
def get_reviewable_topics(self, lang)
Return the topics learned but not golden by a user in a language.
8.256575
6.095068
1.354632
if not source: source = self.user_data.ui_language if not target: target = list(self.user_data.language_data.keys())[0] word_parameter = json.dumps(words, separators=(',', ':')) url = "https://d2.duolingo.com/api/1/dictionary/hints/{}/{}?tokens={}" \ .format(target, source, word_parameter) request = self.session.get(url) try: return request.json() except: raise Exception('Could not get translations')
def get_translations(self, words, source=None, target=None)
Get words' translations from ``https://d2.duolingo.com/api/1/dictionary/hints/<source>/<target>?tokens=``<words>`` :param words: A single word or a list :type: str or list of str :param source: Source language as abbreviation :type source: str :param target: Destination language as abbreviation :type target: str :return: Dict with words as keys and translations as values
4.18684
2.959248
1.414832
if not self.password: raise Exception("You must provide a password for this function") if language_abbr and not self._is_current_language(language_abbr): self._switch_language(language_abbr) overview_url = "https://www.duolingo.com/vocabulary/overview" overview_request = self._make_req(overview_url) overview = overview_request.json() return overview
def get_vocabulary(self, language_abbr=None)
Get overview of user's vocabulary in a language.
4.227789
3.800766
1.112352
def from_json(self, js, groups: Iterable[Group]): self.index = js["index"] self.groupIndex = js["groupIndex"] self.label = js["label"] self.functionalChannelType = FunctionalChannelType.from_str( js["functionalChannelType"], js["functionalChannelType"] ) self.groups = [] for id in js["groups"]: for g in groups: if g.id == id: self.groups.append(g) break
this function will load the functional channel object from a json object and the given groups Args: js(dict): the json object groups(Iterable[Group]): the groups for referencing
null
null
null
for _handler in self._on_update: _handler(*args, **kwargs)
def fire_update_event(self, *args, **kwargs)
Trigger the method tied to _on_update
5.997535
4.421429
1.35647
def download_configuration(self) -> str: return self._restCall( "home/getCurrentState", json.dumps(self._connection.clientCharacteristics) )
downloads the current configuration from the cloud Returns the downloaded configuration or an errorCode
null
null
null
def get_current_state(self, clearConfig: bool = False): json_state = self.download_configuration() if "errorCode" in json_state: LOGGER.error( "Could not get the current configuration. Error: %s", json_state["errorCode"], ) return False if clearConfig: self.devices = [] self.clients = [] self.groups = [] self.rules = [] self.functionalHomes = [] js_home = json_state["home"] self.from_json(js_home) self._get_devices(json_state) self._get_clients(json_state) self._get_groups(json_state) self._get_functionalHomes(js_home) self._load_functionalChannels() return True
downloads the current configuration and parses it into self Args: clearConfig(bool): if set to true, this function will remove all old objects from self.devices, self.client, ... to have a fresh config instead of reparsing them
null
null
null
def get_functionalHome(self, functionalHomeType: type) -> FunctionalHome: for x in self.functionalHomes: if isinstance(x, functionalHomeType): return x return None
gets the specified functionalHome Args: functionalHome(type): the type of the functionalHome which should be returned Returns: the FunctionalHome or None if it couldn't be found
null
null
null
def search_device_by_id(self, deviceID) -> Device: for d in self.devices: if d.id == deviceID: return d return None
searches a device by given id Args: deviceID(str): the device to search for Returns the Device object or None if it couldn't find a device
null
null
null
def search_group_by_id(self, groupID) -> Group: for g in self.groups: if g.id == groupID: return g return None
searches a group by given id Args: groupID(str): groupID the group to search for Returns the group object or None if it couldn't find a group
null
null
null
def search_client_by_id(self, clientID) -> Client: for c in self.clients: if c.id == clientID: return c return None
searches a client by given id Args: clientID(str): the client to search for Returns the client object or None if it couldn't find a client
null
null
null
def search_rule_by_id(self, ruleID) -> Rule: for r in self.rules: if r.id == ruleID: return r return None
searches a rule by given id Args: ruleID(str): the rule to search for Returns the rule object or None if it couldn't find a rule
null
null
null
def get_security_zones_activation(self) -> (bool, bool): internal_active = False external_active = False for g in self.groups: if isinstance(g, SecurityZoneGroup): if g.label == "EXTERNAL": external_active = g.active elif g.label == "INTERNAL": internal_active = g.active return internal_active, external_active
returns the value of the security zones if they are armed or not Returns internal True if the internal zone is armed external True if the external zone is armed
null
null
null
def set_security_zones_activation(self, internal=True, external=True): data = {"zonesActivation": {"EXTERNAL": external, "INTERNAL": internal}} return self._restCall("home/security/setZonesActivation", json.dumps(data))
this function will set the alarm system to armed or disable it Args: internal(bool): activates/deactivates the internal zone external(bool): activates/deactivates the external zone Examples: arming while being at home >>> home.set_security_zones_activation(False,True) arming without being at home >>> home.set_security_zones_activation(True,True) disarming the alarm system >>> home.set_security_zones_activation(False,False)
null
null
null
def set_intrusion_alert_through_smoke_detectors(self, activate: bool = True): data = {"intrusionAlertThroughSmokeDetectors": activate} return self._restCall( "home/security/setIntrusionAlertThroughSmokeDetectors", json.dumps(data) )
activate or deactivate if smoke detectors should "ring" during an alarm Args: activate(bool): True will let the smoke detectors "ring" during an alarm
null
null
null
def activate_absence_with_period(self, endtime: datetime): data = {"endTime": endtime.strftime("%Y_%m_%d %H:%M")} return self._restCall( "home/heating/activateAbsenceWithPeriod", json.dumps(data) )
activates the absence mode until the given time Args: endtime(datetime): the time when the absence should automatically be disabled
null
null
null
def activate_absence_with_duration(self, duration: int): data = {"duration": duration} return self._restCall( "home/heating/activateAbsenceWithDuration", json.dumps(data) )
activates the absence mode for a given time Args: duration(int): the absence duration in minutes
null
null
null
def activate_vacation(self, endtime: datetime, temperature: float): data = { "endtime": endtime.strftime("%Y_%m_%d %H:%M"), "temperature": temperature, } return self._restCall("home/heating/activateVacation", json.dumps(data))
activates the vatation mode until the given time Args: endtime(datetime): the time when the vatation mode should automatically be disabled temperature(float): the settemperature during the vacation mode
null
null
null
def set_pin(self, newPin: str, oldPin: str = None) -> dict: if newPin == None: newPin = "" data = {"pin": newPin} if oldPin: self._connection.headers["PIN"] = str(oldPin) result = self._restCall("home/setPin", body=json.dumps(data)) if oldPin: del self._connection.headers["PIN"] return result
sets a new pin for the home Args: newPin(str): the new pin oldPin(str): optional, if there is currently a pin active it must be given here. Otherwise it will not be possible to set the new pin Returns: the result of the call
null
null
null
def set_timezone(self, timezone: str): data = {"timezoneId": timezone} return self._restCall("home/setTimezone", body=json.dumps(data))
sets the timezone for the AP. e.g. "Europe/Berlin" Args: timezone(str): the new timezone
null
null
null
def set_zones_device_assignment(self, internal_devices, external_devices) -> dict: internal = [x.id for x in internal_devices] external = [x.id for x in external_devices] data = {"zonesDeviceAssignment": {"INTERNAL": internal, "EXTERNAL": external}} return self._restCall( "home/security/setZonesDeviceAssignment", body=json.dumps(data) )
sets the devices for the security zones Args: internal_devices(List[Device]): the devices which should be used for the internal zone external_devices(List[Device]): the devices which should be used for the external(hull) zone Returns: the result of _restCall
null
null
null
_config = configparser.ConfigParser() with open(config_file, "r") as fl: _config.read_file(fl) logging_filename = _config.get("LOGGING", "FileName", fallback="hmip.log") if logging_filename == "None": logging_filename = None _hmip_config = HmipConfig( _config["AUTH"]["AuthToken"], _config["AUTH"]["AccessPoint"], int(_config.get("LOGGING", "Level", fallback=30)), logging_filename, _config._sections, ) return _hmip_config
def load_config_file(config_file: str) -> HmipConfig
Loads the config ini file. :raises a FileNotFoundError when the config file does not exist.
3.469868
3.483513
0.996083
return await self._connection.ws_connect( on_message=self._ws_on_message, on_error=self._ws_on_error )
async def enable_events(self) -> asyncio.Task
Connects to the websocket. Returns a listening task.
5.604009
3.948884
1.419137
self.functionalChannels = [] for channel in self._rawJSONData["functionalChannels"].values(): fc = self._parse_functionalChannel(channel, groups) self.functionalChannels.append(fc) self.functionalChannelCount = Counter( x.functionalChannelType for x in self.functionalChannels )
def load_functionalChannels(self, groups: Iterable[Group])
this function will load the functionalChannels into the device
4.789995
4.250328
1.126971
data = { "channelIndex": channelIndex, "deviceId": self.id, "simpleRGBColorState": rgb, "dimLevel": dimLevel, } return self._restCall( "device/control/setSimpleRGBColorDimLevel", body=json.dumps(data) )
def set_rgb_dim_level(self, channelIndex: int, rgb: RGBColorState, dimLevel: float)
sets the color and dimlevel of the lamp Args: channelIndex(int): the channelIndex of the lamp. Use self.topLightChannelIndex or self.bottomLightChannelIndex rgb(RGBColorState): the color of the lamp dimLevel(float): the dimLevel of the lamp. 0.0 = off, 1.0 = MAX Returns: the result of the _restCall
5.372227
5.02806
1.068449
data = { "channelIndex": channelIndex, "deviceId": self.id, "simpleRGBColorState": rgb, "dimLevel": dimLevel, "onTime": onTime, "rampTime": rampTime, } return self._restCall( "device/control/setSimpleRGBColorDimLevelWithTime", body=json.dumps(data) )
def set_rgb_dim_level_with_time( self, channelIndex: int, rgb: RGBColorState, dimLevel: float, onTime: float, rampTime: float, )
sets the color and dimlevel of the lamp Args: channelIndex(int): the channelIndex of the lamp. Use self.topLightChannelIndex or self.bottomLightChannelIndex rgb(RGBColorState): the color of the lamp dimLevel(float): the dimLevel of the lamp. 0.0 = off, 1.0 = MAX onTime(float): rampTime(float): Returns: the result of the _restCall
3.605267
3.478683
1.036388
data = {"channelIndex": 1, "deviceId": self.id, "shutterLevel": level} return self._restCall("device/control/setShutterLevel", body=json.dumps(data))
def set_shutter_level(self, level=0.0)
sets the shutter level Args: level(float): the new level of the shutter. 0.0 = open, 1.0 = closed Returns: the result of the _restCall
6.767936
6.249531
1.082951
if shutterLevel is None: shutterLevel = self.shutterLevel data = { "channelIndex": 1, "deviceId": self.id, "slatsLevel": slatsLevel, "shutterLevel": shutterLevel, } return self._restCall("device/control/setSlatsLevel", json.dumps(data))
def set_slats_level(self, slatsLevel=0.0, shutterLevel=None)
sets the slats and shutter level Args: slatsLevel(float): the new level of the slats. 0.0 = open, 1.0 = closed, shutterLevel(float): the new level of the shutter. 0.0 = open, 1.0 = closed, None = use the current value Returns: the result of the _restCall
3.211924
3.058648
1.050112
def set_label(self, label): data = {"ruleId": self.id, "label": label} return self._restCall("rule/setRuleLabel", json.dumps(data))
sets the label of the rule
null
null
null
def set_rule_enabled_state(self, enabled): data = {"ruleId": self.id, "enabled": enabled} return self._restCall("rule/enableSimpleRule", json.dumps(data))
enables/disables this rule
null
null
null
async def api_call(self, path, body=None, full_url=False): result = None if not full_url: path = self.full_url(path) for i in range(self._restCallRequestCounter): try: with async_timeout.timeout(self._restCallTimout, loop=self._loop): result = await self._websession.post( path, data=body, headers=self.headers ) if result.status == 200: if result.content_type == "application/json": ret = await result.json() else: ret = True return ret else: raise HmipWrongHttpStatusError except (asyncio.TimeoutError, aiohttp.ClientConnectionError): # Both exceptions occur when connecting to the server does # somehow not work. logger.debug("Connection timed out or another error occurred %s" % path) except JSONDecodeError as err: logger.exception(err) finally: if result is not None: await result.release() raise HmipConnectionError("Failed to connect to HomeMaticIp server")
Make the actual call to the HMIP server. Throws `HmipWrongHttpStatusError` or `HmipConnectionError` if connection has failed or response is not correct.
null
null
null
data = { 'accountType': 'HOSTED_OR_GOOGLE', 'Email': email, 'has_permission': 1, 'add_account': 1, 'EncryptedPasswd': google.signature(email, password, android_key_7_3_29), 'service': service, 'source': 'android', 'androidId': android_id, 'device_country': device_country, 'operatorCountry': device_country, 'lang': lang, 'sdk_version': sdk_version, } return _perform_auth_request(data)
def perform_master_login(email, password, android_id, service='ac2dm', device_country='us', operatorCountry='us', lang='en', sdk_version=17)
Perform a master login, which is what Android does when you first add a Google account. Return a dict, eg:: { 'Auth': '...', 'Email': '[email protected]', 'GooglePlusUpgrade': '1', 'LSID': '...', 'PicasaUser': 'My Name', 'RopRevision': '1', 'RopText': ' ', 'SID': '...', 'Token': 'oauth2rt_1/...', 'firstName': 'My', 'lastName': 'Name', 'services': 'hist,mail,googleme,...' }
5.01345
5.235162
0.957649
data = { 'accountType': 'HOSTED_OR_GOOGLE', 'Email': email, 'has_permission': 1, 'EncryptedPasswd': master_token, 'service': service, 'source': 'android', 'androidId': android_id, 'app': app, 'client_sig': client_sig, 'device_country': device_country, 'operatorCountry': device_country, 'lang': lang, 'sdk_version': sdk_version } return _perform_auth_request(data)
def perform_oauth(email, master_token, android_id, service, app, client_sig, device_country='us', operatorCountry='us', lang='en', sdk_version=17)
Use a master token from master_login to perform OAuth to a specific Google service. Return a dict, eg:: { 'Auth': '...', 'LSID': '...', 'SID': '..', 'issueAdvice': 'auto', 'services': 'hist,mail,googleme,...' } To authenticate requests to this service, include a header ``Authorization: GoogleLogin auth=res['Auth']``.
3.032979
3.065462
0.989403
'''Test if n is a prime number m - the integer to test rnd - the random number generator to use for the probalistic primality algorithms, k - the number of iterations to use for the probabilistic primality algorithms, algorithm - the primality algorithm to use, default is Miller-Rabin. The gmpy implementation is used if gmpy is installed. Return value: True is n seems prime, False otherwise. ''' if algorithm is None: algorithm = PRIME_ALGO if algorithm == 'gmpy-miller-rabin': if not gmpy: raise NotImplementedError return gmpy.is_prime(n, k) elif algorithm == 'miller-rabin': # miller rabin probability of primality is 1/4**k return miller_rabin(n, k, rnd=rnd) elif algorithm == 'solovay-strassen': # for jacobi it's 1/2**k return randomized_primality_testing(n, rnd=rnd, k=k*2) else: raise NotImplementedError
def is_prime(n, rnd=default_pseudo_random, k=DEFAULT_ITERATION, algorithm=None)
Test if n is a prime number m - the integer to test rnd - the random number generator to use for the probalistic primality algorithms, k - the number of iterations to use for the probabilistic primality algorithms, algorithm - the primality algorithm to use, default is Miller-Rabin. The gmpy implementation is used if gmpy is installed. Return value: True is n seems prime, False otherwise.
4.333457
2.289686
1.892599
'''Generate a prime number of the giver size using the is_prime() helper function. size - size in bits of the prime, default to 128 rnd - a random generator to use k - the number of iteration to use for the probabilistic primality algorithms. algorithm - the name of the primality algorithm to use, default is the probabilistic Miller-Rabin algorithm. Return value: a prime number, as a long integer ''' while True: n = rnd.getrandbits(size-2) n = 2 ** (size-1) + n * 2 + 1 if is_prime(n, rnd=rnd, k=k, algorithm=algorithm): return n if algorithm == 'gmpy-miller-rabin': return gmpy.next_prime(n)
def get_prime(size=128, rnd=default_crypto_random, k=DEFAULT_ITERATION, algorithm=None)
Generate a prime number of the giver size using the is_prime() helper function. size - size in bits of the prime, default to 128 rnd - a random generator to use k - the number of iteration to use for the probabilistic primality algorithms. algorithm - the name of the primality algorithm to use, default is the probabilistic Miller-Rabin algorithm. Return value: a prime number, as a long integer
4.686029
1.907022
2.45725
'''Calculates the value of the Jacobi symbol (a/b) where both a and b are positive integers, and b is odd :returns: -1, 0 or 1 ''' assert a > 0 assert b > 0 if a == 0: return 0 result = 1 while a > 1: if a & 1: if ((a-1)*(b-1) >> 2) & 1: result = -result a, b = b % a, a else: if (((b * b) - 1) >> 3) & 1: result = -result a >>= 1 if a == 0: return 0 return result
def jacobi(a, b)
Calculates the value of the Jacobi symbol (a/b) where both a and b are positive integers, and b is odd :returns: -1, 0 or 1
3.4494
2.599787
1.326801
'''Returns False if n is an Euler pseudo-prime with base x, and True otherwise. ''' j = jacobi(x, n) % n f = pow(x, n >> 1, n) return j != f
def jacobi_witness(x, n)
Returns False if n is an Euler pseudo-prime with base x, and True otherwise.
9.947401
3.664803
2.714307
''' Pure python implementation of the Miller-Rabin algorithm. n - the integer number to test, k - the number of iteration, the probability of n being prime if the algorithm returns True is 1/2**k, rnd - a random generator ''' s = 0 d = n-1 # Find nearest power of 2 s = primitives.integer_bit_size(n) # Find greatest factor which is a power of 2 s = fractions.gcd(2**s, n-1) d = (n-1) // s s = primitives.integer_bit_size(s) - 1 while k: k = k - 1 a = rnd.randint(2, n-2) x = pow(a, d, n) if x == 1 or x == n - 1: continue for r in range(1, s-1): x = pow(x, 2, n) if x == 1: return False if x == n - 1: break else: return False return True
def miller_rabin(n, k, rnd=default_pseudo_random)
Pure python implementation of the Miller-Rabin algorithm. n - the integer number to test, k - the number of iteration, the probability of n being prime if the algorithm returns True is 1/2**k, rnd - a random generator
3.459822
2.404603
1.438833
# source: http://stackoverflow.com/a/14527004/1231454 if lnum == 0: return b'\0' * padmultiple elif lnum < 0: raise ValueError("Can only convert non-negative numbers.") s = hex(lnum)[2:] s = s.rstrip('L') if len(s) & 1: s = '0' + s s = binascii.unhexlify(s) if (padmultiple != 1) and (padmultiple != 0): filled_so_far = len(s) % padmultiple if filled_so_far != 0: s = b'\0' * (padmultiple - filled_so_far) + s return s
def long_to_bytes(lnum, padmultiple=1)
Packs the lnum (which must be convertable to a long) into a byte string 0 padded to a multiple of padmultiple bytes in size. 0 means no padding whatsoever, so that packing 0 result in an empty string. The resulting byte string is the big-endian two's complement representation of the passed in long.
2.602492
2.57166
1.011989
track = None # Get a list of (track, artist match ratio, name match ratio) tracks_with_match_ratio = [( track, get_similarity(target_track.artist, track.artist), get_similarity(target_track.name, track.name), ) for track in tracks] # Sort by artist then by title sorted_tracks = sorted( tracks_with_match_ratio, key=lambda t: (t[1], t[2]), reverse=True # Descending, highest match ratio first ) if sorted_tracks: track = sorted_tracks[0][0] # Closest match to query return track
def find_closest_match(target_track, tracks)
Return closest match to target track
3.064604
2.984022
1.027005
''' Mask Generation Function v1 from the PKCS#1 v2.0 standard. mgs_seed - the seed, a byte string mask_len - the length of the mask to generate hash_class - the digest algorithm to use, default is SHA1 Return value: a pseudo-random mask, as a byte string ''' h_len = hash_class().digest_size if mask_len > 0x10000: raise ValueError('mask too long') T = b'' for i in range(0, integer_ceil(mask_len, h_len)): C = i2osp(i, 4) T = T + hash_class(mgf_seed + C).digest() return T[:mask_len]
def mgf1(mgf_seed, mask_len, hash_class=hashlib.sha1)
Mask Generation Function v1 from the PKCS#1 v2.0 standard. mgs_seed - the seed, a byte string mask_len - the length of the mask to generate hash_class - the digest algorithm to use, default is SHA1 Return value: a pseudo-random mask, as a byte string
4.164753
2.341384
1.778757
'''Return the ceil integer of a div b.''' quanta, mod = divmod(a, b) if mod: quanta += 1 return quanta
def integer_ceil(a, b)
Return the ceil integer of a div b.
6.949839
4.928089
1.41025
'''Returns the number of bytes necessary to store the integer n.''' quanta, mod = divmod(integer_bit_size(n), 8) if mod or n == 0: quanta += 1 return quanta
def integer_byte_size(n)
Returns the number of bytes necessary to store the integer n.
4.805728
4.468213
1.075537