code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
names = [] for name in dir(module_or_class): spec_opts = self.parse_hookspec_opts(module_or_class, name) if spec_opts is not None: hc = getattr(self.hook, name, None) if hc is None: hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts) setattr(self.hook, name, hc) else: # plugins registered this hook without knowing the spec hc.set_specification(module_or_class, spec_opts) for hookfunction in hc.get_hookimpls(): self._verify_hook(hc, hookfunction) names.append(name) if not names: raise ValueError( "did not find any %r hooks in %r" % (self.project_name, module_or_class) )
def add_hookspecs(self, module_or_class)
add new hook specifications defined in the given module_or_class. Functions are recognized if they have been decorated accordingly.
4.054222
4.032993
1.005264
for name, val in self._name2plugin.items(): if plugin == val: return name
def get_name(self, plugin)
Return name for registered plugin or None if not registered.
5.871395
4.673631
1.256281
for name in self.hook.__dict__: if name[0] != "_": hook = getattr(self.hook, name) if not hook.has_spec(): for hookimpl in hook.get_hookimpls(): if not hookimpl.optionalhook: raise PluginValidationError( hookimpl.plugin, "unknown hook %r in plugin %r" % (name, hookimpl.plugin), )
def check_pending(self)
Verify that all hooks which have not been verified against a hook specification are optional, otherwise raise PluginValidationError
5.544218
3.698186
1.499172
from pkg_resources import ( iter_entry_points, DistributionNotFound, VersionConflict, ) count = 0 for ep in iter_entry_points(group, name=name): # is the plugin registered or blocked? if self.get_plugin(ep.name) or self.is_blocked(ep.name): continue try: plugin = ep.load() except DistributionNotFound: continue except VersionConflict as e: raise PluginValidationError( plugin=None, message="Plugin %r could not be loaded: %s!" % (ep.name, e), ) self.register(plugin, name=ep.name) self._plugin_distinfo.append((plugin, ep.dist)) count += 1 return count
def load_setuptools_entrypoints(self, group, name=None)
Load modules from querying the specified setuptools ``group``. :param str group: entry point group to load plugins :param str name: if given, loads only plugins with the given ``name``. :rtype: int :return: return the number of loaded plugins by this call.
3.115925
3.133563
0.994371
return _tracing._TracedHookExecution(self, before, after).undo
def add_hookcall_monitoring(self, before, after)
add before/after tracing functions for all hooks and return an undo function which, when called, will remove the added tracers. ``before(hook_name, hook_impls, kwargs)`` will be called ahead of all hook calls and receive a hookcaller instance, a list of HookImpl instances and the keyword arguments for the hook call. ``after(outcome, hook_name, hook_impls, kwargs)`` receives the same arguments as ``before`` but also a :py:class:`_Result`` object which represents the result of the overall hook call.
59.385929
59.762466
0.993699
hooktrace = self.hook._trace def before(hook_name, methods, kwargs): hooktrace.root.indent += 1 hooktrace(hook_name, kwargs) def after(outcome, hook_name, methods, kwargs): if outcome.excinfo is None: hooktrace("finish", hook_name, "-->", outcome.get_result()) hooktrace.root.indent -= 1 return self.add_hookcall_monitoring(before, after)
def enable_tracing(self)
enable tracing of hook calls and return an undo function.
6.90816
6.274702
1.100954
orig = getattr(self.hook, name) plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)] if plugins_to_remove: hc = _HookCaller( orig.name, orig._hookexec, orig.spec.namespace, orig.spec.opts ) for hookimpl in orig.get_hookimpls(): plugin = hookimpl.plugin if plugin not in plugins_to_remove: hc._add_hookimpl(hookimpl) # we also keep track of this hook caller so it # gets properly removed on plugin unregistration self._plugin2hookcallers.setdefault(plugin, []).append(hc) return hc return orig
def subset_hook_caller(self, name, remove_plugins)
Return a new _HookCaller instance for the named method which manages calls to all registered plugins except the ones from remove_plugins.
4.980149
4.602786
1.081986
cache = getattr(func, "__dict__", {}) try: return cache["_varnames"] except KeyError: pass if inspect.isclass(func): try: func = func.__init__ except AttributeError: return (), () elif not inspect.isroutine(func): # callable object? try: func = getattr(func, "__call__", func) except Exception: return () try: # func MUST be a function or method here or we won't parse any args spec = _getargspec(func) except TypeError: return (), () args, defaults = tuple(spec.args), spec.defaults if defaults: index = -len(defaults) args, defaults = args[:index], tuple(args[index:]) else: defaults = () # strip any implicit instance arg # pypy3 uses "obj" instead of "self" for default dunder methods implicit_names = ("self",) if not _PYPY3 else ("self", "obj") if args: if inspect.ismethod(func) or ( "." in getattr(func, "__qualname__", ()) and args[0] in implicit_names ): args = args[1:] try: cache["_varnames"] = args, defaults except TypeError: pass return args, defaults
def varnames(func)
Return tuple of positional and keywrord argument names for a function, method, class or callable. In case of a class, its ``__init__`` method is considered. For methods the ``self`` parameter is not included.
3.838364
3.868297
0.992262
if hookimpl.hookwrapper: methods = self._wrappers else: methods = self._nonwrappers if hookimpl.trylast: methods.insert(0, hookimpl) elif hookimpl.tryfirst: methods.append(hookimpl) else: # find last non-tryfirst method i = len(methods) - 1 while i >= 0 and methods[i].tryfirst: i -= 1 methods.insert(i + 1, hookimpl) if "__multicall__" in hookimpl.argnames: warnings.warn( "Support for __multicall__ is now deprecated and will be" "removed in an upcoming release.", DeprecationWarning, ) self.multicall = _legacymulticall
def _add_hookimpl(self, hookimpl)
Add an implementation to the callback chain.
3.177096
3.097298
1.025764
if proc is not None: warnings.warn( "Support for `proc` argument is now deprecated and will be" "removed in an upcoming release.", DeprecationWarning, ) result_callback = proc self._call_history.append((kwargs or {}, result_callback)) # historizing hooks don't return results res = self._hookexec(self, self.get_hookimpls(), kwargs) if result_callback is None: return # XXX: remember firstresult isn't compat with historic for x in res or []: result_callback(x)
def call_historic(self, result_callback=None, kwargs=None, proc=None)
Call the hook with given ``kwargs`` for all registered plugins and for all plugins which will be registered afterwards. If ``result_callback`` is not ``None`` it will be called for for each non-None result obtained from a hook implementation. .. note:: The ``proc`` argument is now deprecated.
7.08095
6.588907
1.074677
old = list(self._nonwrappers), list(self._wrappers) for method in methods: opts = dict(hookwrapper=False, trylast=False, tryfirst=False) hookimpl = HookImpl(None, "<temp>", method, opts) self._add_hookimpl(hookimpl) try: return self(**kwargs) finally: self._nonwrappers, self._wrappers = old
def call_extra(self, methods, kwargs)
Call the hook with some additional temporarily participating methods using the specified kwargs as call parameters.
6.164586
5.544149
1.111908
if self.is_historic(): for kwargs, result_callback in self._call_history: res = self._hookexec(self, [method], kwargs) if res and result_callback is not None: result_callback(res[0])
def _maybe_apply_history(self, method)
Apply call history to a new hookimpl if it is marked as historic.
7.596949
5.697557
1.333369
if logger is None: logger = logging.getLogger() for i, orig_handler in enumerate(list(logger.handlers)): handler = MultiProcessingHandler( 'mp-handler-{0}'.format(i), sub_handler=orig_handler) logger.removeHandler(orig_handler) logger.addHandler(handler)
def install_mp_handler(logger=None)
Wraps the handlers in the given Logger with an MultiProcessingHandler. :param logger: whose handlers to wrap. By default, the root logger.
3.645792
3.505634
1.039981
if new_version_number is None: return ValueError import fileinput import sys file = join('timezonefinder', '__init__.py') for line in fileinput.input(file, inplace=1): if old_version_number in line: line = line.replace(old_version_number, new_version_number) sys.stdout.write(line)
def set_version(new_version_number=None, old_version_number='')
Set package version as listed in `__version__` in `__init__.py`.
3.149275
2.815653
1.118488
''' :param tz_name: one of the names in timezone_names :param tz_id: the id of the timezone (=index in timezone_names) :param use_id: determines whether id or name should be used :param coords_as_pairs: determines the structure of the polygon representation :return: a data structure representing the multipolygon of this timezone output format: [ [polygon1, hole1, hole2...], [polygon2, ...], ...] and each polygon and hole is itself formated like: ([longitudes], [latitudes]) or [(lng1,lat1), (lng2,lat2),...] if ``coords_as_pairs=True``. ''' if use_id: zone_id = tz_id else: try: zone_id = timezone_names.index(tz_name) except ValueError: raise ValueError("The timezone '", tz_name, "' does not exist.") self.poly_nr2zone_id.seek(NR_BYTES_H * zone_id) # read poly_nr of the first polygon of that zone first_polygon_nr = unpack(DTYPE_FORMAT_H, self.poly_nr2zone_id.read(NR_BYTES_H))[0] # read poly_nr of the first polygon of the next zone last_polygon_nr = unpack(DTYPE_FORMAT_H, self.poly_nr2zone_id.read(NR_BYTES_H))[0] poly_nrs = list(range(first_polygon_nr, last_polygon_nr)) return [self.get_polygon(poly_nr, coords_as_pairs) for poly_nr in poly_nrs]
def get_geometry(self, tz_name='', tz_id=0, use_id=False, coords_as_pairs=False)
:param tz_name: one of the names in timezone_names :param tz_id: the id of the timezone (=index in timezone_names) :param use_id: determines whether id or name should be used :param coords_as_pairs: determines the structure of the polygon representation :return: a data structure representing the multipolygon of this timezone output format: [ [polygon1, hole1, hole2...], [polygon2, ...], ...] and each polygon and hole is itself formated like: ([longitudes], [latitudes]) or [(lng1,lat1), (lng2,lat2),...] if ``coords_as_pairs=True``.
3.48847
1.877052
1.858484
zone_id_list = empty([nr_of_polygons], dtype=DTYPE_FORMAT_H_NUMPY) for pointer_local, polygon_id in enumerate(polygon_id_list): zone_id = self.id_of(polygon_id) zone_id_list[pointer_local] = zone_id return zone_id_list
def id_list(self, polygon_id_list, nr_of_polygons)
:param polygon_id_list: :param nr_of_polygons: length of polygon_id_list :return: (list of zone_ids, boolean: do all entries belong to the same zone)
4.07109
4.274422
0.952431
# TODO functional def all_equal(iterable): x = None for x in iterable: # first_val = x break for y in iterable: if x != y: return False return True zone_id_list = empty([nr_of_polygons], dtype=DTYPE_FORMAT_H_NUMPY) counted_zones = {} for pointer_local, polygon_id in enumerate(polygon_id_list): zone_id = self.id_of(polygon_id) zone_id_list[pointer_local] = zone_id try: counted_zones[zone_id] += 1 except KeyError: counted_zones[zone_id] = 1 if len(counted_zones) == 1: # there is only one zone. no sorting needed. return polygon_id_list, zone_id_list, True if all_equal(list(counted_zones.values())): # all the zones have the same amount of polygons. no sorting needed. return polygon_id_list, zone_id_list, False counted_zones_sorted = sorted(list(counted_zones.items()), key=lambda zone: zone[1]) sorted_polygon_id_list = empty([nr_of_polygons], dtype=DTYPE_FORMAT_H_NUMPY) sorted_zone_id_list = empty([nr_of_polygons], dtype=DTYPE_FORMAT_H_NUMPY) pointer_output = 0 for zone_id, amount in counted_zones_sorted: # write all polygons from this zone in the new list pointer_local = 0 detected_polygons = 0 while detected_polygons < amount: if zone_id_list[pointer_local] == zone_id: # the polygon at the pointer has the wanted zone_id detected_polygons += 1 sorted_polygon_id_list[pointer_output] = polygon_id_list[pointer_local] sorted_zone_id_list[pointer_output] = zone_id pointer_output += 1 pointer_local += 1 return sorted_polygon_id_list, sorted_zone_id_list, False
def compile_id_list(self, polygon_id_list, nr_of_polygons)
sorts the polygons_id list from least to most occurrences of the zone ids (->speed up) only 4.8% of all shortcuts include polygons from more than one zone but only for about 0.4% sorting would be beneficial (zones have different frequencies) in most of those cases there are only two types of zones (= entries in counted_zones) and one of them has only one entry. the polygon lists of all single shortcut are already sorted (during compilation of the binary files) sorting should be used for closest_timezone_at(), because only in that use case the polygon lists are quite long (multiple shortcuts are being checked simultaneously). :param polygon_id_list: :param nr_of_polygons: length of polygon_id_list :return: sorted list of polygon_ids, sorted list of zone_ids, boolean: do all entries belong to the same zone
2.332495
2.220043
1.050653
lng, lat = rectify_coordinates(lng, lat) # x = longitude y = latitude both converted to 8byte int x = coord2int(lng) y = coord2int(lat) shortcut_id_x, shortcut_id_y = coord2shortcut(lng, lat) self.shortcuts_unique_id.seek( (180 * NR_SHORTCUTS_PER_LAT * NR_BYTES_H * shortcut_id_x + NR_BYTES_H * shortcut_id_y)) try: # if there is just one possible zone in this shortcut instantly return its name return timezone_names[unpack(DTYPE_FORMAT_H, self.shortcuts_unique_id.read(NR_BYTES_H))[0]] except IndexError: possible_polygons = self.polygon_ids_of_shortcut(shortcut_id_x, shortcut_id_y) nr_possible_polygons = len(possible_polygons) if nr_possible_polygons == 0: return None if nr_possible_polygons == 1: # there is only one polygon in that area. return its timezone name without further checks return timezone_names[self.id_of(possible_polygons[0])] # create a list of all the timezone ids of all possible polygons ids = self.id_list(possible_polygons, nr_possible_polygons) # check until the point is included in one of the possible polygons for i in range(nr_possible_polygons): # when including the current polygon only polygons from the same zone remain, same_element = all_the_same(pointer=i, length=nr_possible_polygons, id_list=ids) if same_element != -1: # return the name of that zone return timezone_names[same_element] polygon_nr = possible_polygons[i] # get the boundaries of the polygon = (lng_max, lng_min, lat_max, lat_min) self.poly_max_values.seek(4 * NR_BYTES_I * polygon_nr) boundaries = self.fromfile(self.poly_max_values, dtype=DTYPE_FORMAT_SIGNED_I_NUMPY, count=4) # only run the expensive algorithm if the point is withing the boundaries if not (x > boundaries[0] or x < boundaries[1] or y > boundaries[2] or y < boundaries[3]): outside_all_holes = True # when the point is within a hole of the polygon, this timezone must not be returned for hole_coordinates in self._holes_of_line(polygon_nr): if inside_polygon(x, y, hole_coordinates): outside_all_holes = False break if outside_all_holes: if inside_polygon(x, y, self.coords_of(line=polygon_nr)): # the point is included in this polygon. return its timezone name without further checks return timezone_names[ids[i]] # the timezone name of the last polygon should always be returned # if no other polygon has been matched beforehand. raise ValueError('BUG: this statement should never be reached. Please open up an issue on Github!')
def timezone_at(self, *, lng, lat)
this function looks up in which polygons the point could be included in to speed things up there are shortcuts being used (stored in a binary file) especially for large polygons it is expensive to check if a point is really included, so certain simplifications are made and even when you get a hit the point might actually not be inside the polygon (for example when there is only one timezone nearby) if you want to make sure a point is really inside a timezone use 'certain_timezone_at' :param lng: longitude of the point in degree (-180 to 180) :param lat: latitude in degree (90 to -90) :return: the timezone name of a matching polygon or None
5.025363
4.820465
1.042506
lng, lat = rectify_coordinates(lng, lat) shortcut_id_x, shortcut_id_y = coord2shortcut(lng, lat) possible_polygons = self.polygon_ids_of_shortcut(shortcut_id_x, shortcut_id_y) # x = longitude y = latitude both converted to 8byte int x = coord2int(lng) y = coord2int(lat) # check if the point is actually included in one of the polygons for polygon_nr in possible_polygons: # get boundaries self.poly_max_values.seek(4 * NR_BYTES_I * polygon_nr) boundaries = self.fromfile(self.poly_max_values, dtype=DTYPE_FORMAT_SIGNED_I_NUMPY, count=4) if not (x > boundaries[0] or x < boundaries[1] or y > boundaries[2] or y < boundaries[3]): outside_all_holes = True # when the point is within a hole of the polygon this timezone doesn't need to be checked for hole_coordinates in self._holes_of_line(polygon_nr): if inside_polygon(x, y, hole_coordinates): outside_all_holes = False break if outside_all_holes: if inside_polygon(x, y, self.coords_of(line=polygon_nr)): return timezone_names[self.id_of(polygon_nr)] # no polygon has been matched return None
def certain_timezone_at(self, *, lng, lat)
this function looks up in which polygon the point certainly is included this is much slower than 'timezone_at'! :param lng: longitude of the point in degree :param lat: latitude in degree :return: the timezone name of the polygon the point is included in or None
5.2565
4.974278
1.056736
from pytz import timezone import pytz from datetime import datetime utc = pytz.utc today = datetime.now() tz_target = timezone(tf.certain_timezone_at(lat=target['lat'], lng=target['lng'])) # ATTENTION: tz_target could be None! handle error case today_target = tz_target.localize(today) today_utc = utc.localize(today) return (today_utc - today_target).total_seconds() / 60
def get_offset(target)
returns a location's time zone offset from UTC in minutes.
4.636375
4.180226
1.109121
contained = False # the edge from the last to the first point is checked first i = -1 y1 = coordinates[1][-1] y_gt_y1 = y > y1 for y2 in coordinates[1]: y_gt_y2 = y > y2 if y_gt_y1: if not y_gt_y2: x1 = coordinates[0][i] x2 = coordinates[0][i + 1] # only crossings "right" of the point should be counted x1GEx = x <= x1 x2GEx = x <= x2 # compare the slope of the line [p1-p2] and [p-p2] # depending on the position of p2 this determines whether the polygon edge is right or left of the point # to avoid expensive division the divisors (of the slope dy/dx) are brought to the other side # ( dy/dx > a == dy > a * dx ) # int64 accuracy needed here! if (x1GEx and x2GEx) or ((x1GEx or x2GEx) and (int64(y2) - int64(y)) * (int64(x2) - int64(x1)) <= ( int64(y2) - int64(y1)) * (int64(x2) - int64(x))): contained = not contained else: if y_gt_y2: x1 = coordinates[0][i] x2 = coordinates[0][i + 1] # only crossings "right" of the point should be counted x1GEx = x <= x1 x2GEx = x <= x2 if (x1GEx and x2GEx) or ((x1GEx or x2GEx) and (int64(y2) - int64(y)) * (int64(x2) - int64(x1)) >= ( int64(y2) - int64(y1)) * (int64(x2) - int64(x))): contained = not contained y1 = y2 y_gt_y1 = y_gt_y2 i += 1 return contained
def inside_polygon(x, y, coordinates)
Implementing the ray casting point in polygon test algorithm cf. https://en.wikipedia.org/wiki/Point_in_polygon#Ray_casting_algorithm :param x: :param y: :param coordinates: a polygon represented by a list containing two lists (x and y coordinates): [ [x1,x2,x3...], [y1,y2,y3...]] those lists are actually numpy arrays which are bei ng read directly from a binary file :return: true if the point (x,y) lies within the polygon Some overflow considerations for the critical part of comparing the line segment slopes: (y2 - y) * (x2 - x1) <= delta_y_max * delta_x_max (y2 - y1) * (x2 - x) <= delta_y_max * delta_x_max delta_y_max * delta_x_max = 180 * 360 < 65 x10^3 Instead of calculating with float I decided using just ints (by multiplying with 10^7). That gives us: delta_y_max * delta_x_max = 180x10^7 * 360x10^7 delta_y_max * delta_x_max <= 65x10^17 So these numbers need up to log_2(65 x10^17) ~ 63 bits to be represented! Even though values this big should never occur in practice (timezone polygons do not span the whole lng lat coordinate space), 32bit accuracy hence is not safe to use here! Python 2.2 automatically uses the appropriate int data type preventing overflow (cf. https://www.python.org/dev/peps/pep-0237/), but here the data types are numpy internal static data types. The data is stored as int32 -> use int64 when comparing slopes!
2.932951
2.884616
1.016756
element = id_list[pointer] pointer += 1 while pointer < length: if element != id_list[pointer]: return -1 pointer += 1 return element
def all_the_same(pointer, length, id_list)
:param pointer: starting from that element the list is being checked for equality of its elements :param length: :param id_list: List mustn't be empty or Null. There has to be at least one element :return: returns the first encountered element if starting from the pointer all elements are the same, otherwise it returns -1
2.773576
2.347065
1.181721
# 2* for the distance in rad and * 12742 (mean diameter of earth) for the distance in km return 12742 * asin(sqrt(((sin(lat_rad / 2)) ** 2 + cos(lat_rad) * (sin((lng_rad - lng_rad_p1) / 2)) ** 2)))
def distance_to_point_on_equator(lng_rad, lat_rad, lng_rad_p1)
uses the simplified haversine formula for this special case (lat_p1 = 0) :param lng_rad: the longitude of the point in radians :param lat_rad: the latitude of the point :param lng_rad_p1: the latitude of the point1 on the equator (lat=0) :return: distance between the point and p1 (lng_rad_p1,0) in km this is only an approximation since the earth is not a real sphere
5.630165
5.157551
1.091635
# 2* for the distance in rad and * 12742(mean diameter of earth) for the distance in km return 12742 * asin( sqrt(((sin((lat_p1 - lat_p2) / 2)) ** 2 + cos(lat_p2) * cos(lat_p1) * (sin((lng_p1 - lng_p2) / 2)) ** 2)))
def haversine(lng_p1, lat_p1, lng_p2, lat_p2)
:param lng_p1: the longitude of point 1 in radians :param lat_p1: the latitude of point 1 in radians :param lng_p2: the longitude of point 1 in radians :param lat_p2: the latitude of point 1 in radians :return: distance between p1 and p2 in km this is only an approximation since the earth is not a real sphere
3.744845
3.649348
1.026168
# rotate coordinate system (= all the points) so that p0 would have lat_rad=lng_rad=0 (=origin) # z rotation is simply subtracting the lng_rad # convert the points to the cartesian coordinate system px_cartesian = coords2cartesian(lng_rad - p0_lng, lat_rad) p1_cartesian = coords2cartesian(p1_lng - p0_lng, p1_lat) pm1_cartesian = coords2cartesian(pm1_lng - p0_lng, pm1_lat) px_cartesian = y_rotate(p0_lat, px_cartesian) p1_cartesian = y_rotate(p0_lat, p1_cartesian) pm1_cartesian = y_rotate(p0_lat, pm1_cartesian) # for both p1 and pm1 separately do: # rotate coordinate system so that this point also has lat_p1_rad=0 and lng_p1_rad>0 (p0 does not change!) rotation_rad = atan2(p1_cartesian[2], p1_cartesian[1]) p1_cartesian = x_rotate(rotation_rad, p1_cartesian) lng_p1_rad = atan2(p1_cartesian[1], p1_cartesian[0]) px_retrans_rad = cartesian2rad(*x_rotate(rotation_rad, px_cartesian)) # if lng_rad of px is between 0 (<-point1) and lng_rad of point 2: # the distance between point x and the 'equator' is the shortest # if the point is not between p0 and p1 the distance to the closest of the two points should be used # so clamp/clip the lng_rad of px to the interval of [0; lng_rad p1] and compute the distance with it temp_distance = distance_to_point_on_equator(px_retrans_rad[0], px_retrans_rad[1], max(min(px_retrans_rad[0], lng_p1_rad), 0)) # ATTENTION: vars are being reused. p1 is actually pm1 here! rotation_rad = atan2(pm1_cartesian[2], pm1_cartesian[1]) p1_cartesian = x_rotate(rotation_rad, pm1_cartesian) lng_p1_rad = atan2(p1_cartesian[1], p1_cartesian[0]) px_retrans_rad = cartesian2rad(*x_rotate(rotation_rad, px_cartesian)) return min(temp_distance, distance_to_point_on_equator(px_retrans_rad[0], px_retrans_rad[1], max(min(px_retrans_rad[0], lng_p1_rad), 0)))
def compute_min_distance(lng_rad, lat_rad, p0_lng, p0_lat, pm1_lng, pm1_lat, p1_lng, p1_lat)
:param lng_rad: lng of px in radians :param lat_rad: lat of px in radians :param p0_lng: lng of p0 in radians :param p0_lat: lat of p0 in radians :param pm1_lng: lng of pm1 in radians :param pm1_lat: lat of pm1 in radians :param p1_lng: lng of p1 in radians :param p1_lat: lat of p1 in radians :return: shortest distance between pX and the polygon section (pm1---p0---p1) in radians
3.569138
3.493098
1.021768
with self._connection.get_response(stream_id) as response: if response.status == 200: return 'Success' else: raw_data = response.read().decode('utf-8') data = json.loads(raw_data) if response.status == 410: return data['reason'], data['timestamp'] else: return data['reason']
def get_notification_result(self, stream_id)
Get result for specified stream The function returns: 'Success' or 'failure reason' or ('Unregistered', timestamp)
3.121985
2.592407
1.20428
notification_iterator = iter(notifications) next_notification = next(notification_iterator, None) # Make sure we're connected to APNs, so that we receive and process the server's SETTINGS # frame before starting to send notifications. self.connect() results = {} open_streams = collections.deque() # Loop on the tokens, sending as many requests as possible concurrently to APNs. # When reaching the maximum concurrent streams limit, wait for a response before sending # another request. while len(open_streams) > 0 or next_notification is not None: # Update the max_concurrent_streams on every iteration since a SETTINGS frame can be # sent by the server at any time. self.update_max_concurrent_streams() if self.should_send_notification(next_notification, open_streams): logger.info('Sending to token %s', next_notification.token) stream_id = self.send_notification_async(next_notification.token, next_notification.payload, topic, priority, expiration, collapse_id) open_streams.append(RequestStream(stream_id, next_notification.token)) next_notification = next(notification_iterator, None) if next_notification is None: # No tokens remaining. Proceed to get results for pending requests. logger.info('Finished sending all tokens, waiting for pending requests.') else: # We have at least one request waiting for response (otherwise we would have either # sent new requests or exited the while loop.) Wait for the first outstanding stream # to return a response. pending_stream = open_streams.popleft() result = self.get_notification_result(pending_stream.stream_id) logger.info('Got response for %s: %s', pending_stream.token, result) results[pending_stream.token] = result return results
def send_notification_batch(self, notifications, topic=None, priority=NotificationPriority.Immediate, expiration=None, collapse_id=None)
Send a notification to a list of tokens in batch. Instead of sending a synchronous request for each token, send multiple requests concurrently. This is done on the same connection, using HTTP/2 streams (one request per stream). APNs allows many streams simultaneously, but the number of streams can vary depending on server load. This method reads the SETTINGS frame sent by the server to figure out the maximum number of concurrent streams. Typically, APNs reports a maximum of 500. The function returns a dictionary mapping each token to its result. The result is "Success" if the token was sent successfully, or the string returned by APNs in the 'reason' field of the response, if the token generated an error.
4.150472
3.686945
1.125721
retries = 0 while retries < MAX_CONNECTION_RETRIES: try: self._connection.connect() logger.info('Connected to APNs') return except Exception: # pylint: disable=broad-except # close the connnection, otherwise next connect() call would do nothing self._connection.close() retries += 1 logger.exception('Failed connecting to APNs (attempt %s of %s)', retries, MAX_CONNECTION_RETRIES) raise ConnectionFailed()
def connect(self)
Establish a connection to APNs. If already connected, the function does nothing. If the connection fails, the function retries up to MAX_CONNECTION_RETRIES times.
4.139919
3.153738
1.312702
''' Get raw data as integer, based on offset and size ''' offset = int(source['offset']) size = int(source['size']) return int(''.join(['1' if digit else '0' for digit in bitarray[offset:offset + size]]), 2)
def _get_raw(source, bitarray)
Get raw data as integer, based on offset and size
4.533279
3.21217
1.411282
''' put value into bit array ''' offset = int(target['offset']) size = int(target['size']) for digit in range(size): bitarray[offset+digit] = (raw_value >> (size-digit-1)) & 0x01 != 0 return bitarray
def _set_raw(target, raw_value, bitarray)
put value into bit array
3.848386
3.639828
1.057299
''' Get value, based on the data in XML ''' raw_value = self._get_raw(source, bitarray) rng = source.find('range') rng_min = float(rng.find('min').text) rng_max = float(rng.find('max').text) scl = source.find('scale') scl_min = float(scl.find('min').text) scl_max = float(scl.find('max').text) return { source['shortcut']: { 'description': source.get('description'), 'unit': source['unit'], 'value': (scl_max - scl_min) / (rng_max - rng_min) * (raw_value - rng_min) + scl_min, 'raw_value': raw_value, } }
def _get_value(self, source, bitarray)
Get value, based on the data in XML
2.758117
2.437646
1.131467
''' Get enum value, based on the data in XML ''' raw_value = self._get_raw(source, bitarray) # Find value description. value_desc = source.find('item', {'value': str(raw_value)}) or self._get_rangeitem(source, raw_value) return { source['shortcut']: { 'description': source.get('description'), 'unit': source.get('unit', ''), 'value': value_desc['description'].format(value=raw_value), 'raw_value': raw_value, } }
def _get_enum(self, source, bitarray)
Get enum value, based on the data in XML
5.627839
4.756984
1.183069
''' Get boolean value, based on the data in XML ''' raw_value = self._get_raw(source, bitarray) return { source['shortcut']: { 'description': source.get('description'), 'unit': source.get('unit', ''), 'value': True if raw_value else False, 'raw_value': raw_value, } }
def _get_boolean(self, source, bitarray)
Get boolean value, based on the data in XML
4.953194
3.839688
1.289999
''' set given numeric value to target field in bitarray ''' # derive raw value rng = target.find('range') rng_min = float(rng.find('min').text) rng_max = float(rng.find('max').text) scl = target.find('scale') scl_min = float(scl.find('min').text) scl_max = float(scl.find('max').text) raw_value = (value - scl_min) * (rng_max - rng_min) / (scl_max - scl_min) + rng_min # store value in bitfield return self._set_raw(target, int(raw_value), bitarray)
def _set_value(self, target, value, bitarray)
set given numeric value to target field in bitarray
2.685211
2.426896
1.106438
''' set given enum value (by string or integer value) to target field in bitarray ''' # derive raw value if isinstance(value, int): # check whether this value exists if target.find('item', {'value': value}) or self._get_rangeitem(target, value): # set integer values directly raw_value = value else: raise ValueError('Enum value "%s" not found in EEP.' % (value)) else: value_item = target.find('item', {'description': value}) if value_item is None: raise ValueError('Enum description for value "%s" not found in EEP.' % (value)) raw_value = int(value_item['value']) return self._set_raw(target, raw_value, bitarray)
def _set_enum(self, target, value, bitarray)
set given enum value (by string or integer value) to target field in bitarray
4.920236
4.143025
1.187595
''' Find profile and data description, matching RORG, FUNC and TYPE ''' if not self.init_ok: self.logger.warn('EEP.xml not loaded!') return None if eep_rorg not in self.telegrams.keys(): self.logger.warn('Cannot find rorg in EEP!') return None if rorg_func not in self.telegrams[eep_rorg].keys(): self.logger.warn('Cannot find func in EEP!') return None if rorg_type not in self.telegrams[eep_rorg][rorg_func].keys(): self.logger.warn('Cannot find type in EEP!') return None profile = self.telegrams[eep_rorg][rorg_func][rorg_type] if command: # multiple commands can be defined, with the command id always in same location (per RORG-FUNC-TYPE). eep_command = profile.find('command', recursive=False) # If commands are not set in EEP, or command is None, # get the first data as a "best guess". if not eep_command: return profile.find('data', recursive=False) # If eep_command is defined, so should be data.command return profile.find('data', {'command': str(command)}, recursive=False) # extract data description # the direction tag is optional if direction is None: return profile.find('data', recursive=False) return profile.find('data', {'direction': direction}, recursive=False)
def find_profile(self, bitarray, eep_rorg, rorg_func, rorg_type, direction=None, command=None)
Find profile and data description, matching RORG, FUNC and TYPE
3.872162
3.582612
1.080821
''' Get keys and values from bitarray ''' if not self.init_ok or profile is None: return [], {} output = OrderedDict({}) for source in profile.contents: if not source.name: continue if source.name == 'value': output.update(self._get_value(source, bitarray)) if source.name == 'enum': output.update(self._get_enum(source, bitarray)) if source.name == 'status': output.update(self._get_boolean(source, status)) return output.keys(), output
def get_values(self, profile, bitarray, status)
Get keys and values from bitarray
3.592182
3.544607
1.013422
''' Update data based on data contained in properties ''' if not self.init_ok or profile is None: return data, status for shortcut, value in properties.items(): # find the given property from EEP target = profile.find(shortcut=shortcut) if not target: # TODO: Should we raise an error? self.logger.warning('Cannot find data description for shortcut %s', shortcut) continue # update bit_data if target.name == 'value': data = self._set_value(target, value, data) if target.name == 'enum': data = self._set_enum(target, value, data) if target.name == 'status': status = self._set_boolean(target, value, status) return data, status
def set_values(self, profile, data, status, properties)
Update data based on data contained in properties
4.847838
4.388357
1.104705
''' Combine list of integer values to one big integer ''' output = 0x00 for i, value in enumerate(reversed(data)): output |= (value << i * 8) return output
def combine_hex(data)
Combine list of integer values to one big integer
5.241289
4.340714
1.207472
''' Convert data (list of integers, bytearray or integer) to bitarray ''' if isinstance(data, list) or isinstance(data, bytearray): data = combine_hex(data) return [True if digit == '1' else False for digit in bin(data)[2:].zfill(width)]
def to_bitarray(data, width=8)
Convert data (list of integers, bytearray or integer) to bitarray
4.20877
3.5065
1.200277
''' Convert list of integers to a hex string, separated by ":" ''' if isinstance(data, int): return '%02X' % data return ':'.join([('%02X' % o) for o in data])
def to_hex_string(data)
Convert list of integers to a hex string, separated by ":"
4.370126
3.129892
1.396255
''' Parses message from buffer. returns: - PARSE_RESULT - remaining buffer - Packet -object (if message was valid, else None) ''' # If the buffer doesn't contain 0x55 (start char) # the message isn't needed -> ignore if 0x55 not in buf: return PARSE_RESULT.INCOMPLETE, [], None # Valid buffer starts from 0x55 # Convert to list, as index -method isn't defined for bytearray buf = [ord(x) if not isinstance(x, int) else x for x in buf[list(buf).index(0x55):]] try: data_len = (buf[1] << 8) | buf[2] opt_len = buf[3] except IndexError: # If the fields don't exist, message is incomplete return PARSE_RESULT.INCOMPLETE, buf, None # Header: 6 bytes, data, optional data and data checksum msg_len = 6 + data_len + opt_len + 1 if len(buf) < msg_len: # If buffer isn't long enough, the message is incomplete return PARSE_RESULT.INCOMPLETE, buf, None msg = buf[0:msg_len] buf = buf[msg_len:] packet_type = msg[4] data = msg[6:6 + data_len] opt_data = msg[6 + data_len:6 + data_len + opt_len] # Check CRCs for header and data if msg[5] != crc8.calc(msg[1:5]): # Fail if doesn't match message Packet.logger.error('Header CRC error!') # Return CRC_MISMATCH return PARSE_RESULT.CRC_MISMATCH, buf, None if msg[6 + data_len + opt_len] != crc8.calc(msg[6:6 + data_len + opt_len]): # Fail if doesn't match message Packet.logger.error('Data CRC error!') # Return CRC_MISMATCH return PARSE_RESULT.CRC_MISMATCH, buf, None # If we got this far, everything went ok (?) if packet_type == PACKET.RADIO_ERP1: # Need to handle UTE Teach-in here, as it's a separate packet type... if data[0] == RORG.UTE: packet = UTETeachInPacket(packet_type, data, opt_data) else: packet = RadioPacket(packet_type, data, opt_data) elif packet_type == PACKET.RESPONSE: packet = ResponsePacket(packet_type, data, opt_data) elif packet_type == PACKET.EVENT: packet = EventPacket(packet_type, data, opt_data) else: packet = Packet(packet_type, data, opt_data) return PARSE_RESULT.OK, buf, packet
def parse_msg(buf)
Parses message from buffer. returns: - PARSE_RESULT - remaining buffer - Packet -object (if message was valid, else None)
3.357226
2.908709
1.154198
''' Creates an packet ready for sending. Uses rorg, rorg_func and rorg_type to determine the values set based on EEP. Additional arguments (**kwargs) are used for setting the values. Currently only supports: - PACKET.RADIO_ERP1 - RORGs RPS, BS1, BS4, VLD. TODO: - Require sender to be set? Would force the "correct" sender to be set. - Do we need to set telegram control bits? Might be useful for acting as a repeater? ''' if packet_type != PACKET.RADIO_ERP1: # At least for now, only support PACKET.RADIO_ERP1. raise ValueError('Packet type not supported by this function.') if rorg not in [RORG.RPS, RORG.BS1, RORG.BS4, RORG.VLD]: # At least for now, only support these RORGS. raise ValueError('RORG not supported by this function.') if destination is None: Packet.logger.warning('Replacing destination with broadcast address.') destination = [0xFF, 0xFF, 0xFF, 0xFF] # TODO: Should use the correct Base ID as default. # Might want to change the sender to be an offset from the actual address? if sender is None: Packet.logger.warning('Replacing sender with default address.') sender = [0xDE, 0xAD, 0xBE, 0xEF] if not isinstance(destination, list) or len(destination) != 4: raise ValueError('Destination must a list containing 4 (numeric) values.') if not isinstance(sender, list) or len(sender) != 4: raise ValueError('Sender must a list containing 4 (numeric) values.') packet = Packet(packet_type, data=[], optional=[]) packet.rorg = rorg packet.data = [packet.rorg] # Select EEP at this point, so we know how many bits we're dealing with (for VLD). packet.select_eep(rorg_func, rorg_type, direction, command) # Initialize data depending on the profile. if rorg in [RORG.RPS, RORG.BS1]: packet.data.extend([0]) elif rorg == RORG.BS4: packet.data.extend([0, 0, 0, 0]) else: packet.data.extend([0] * int(packet._profile.get('bits', '1'))) packet.data.extend(sender) packet.data.extend([0]) # Always use sub-telegram 3, maximum dbm (as per spec, when sending), # and no security (security not supported as per EnOcean Serial Protocol). packet.optional = [3] + destination + [0xFF] + [0] if command: # Set CMD to command, if applicable.. Helps with VLD. kwargs['CMD'] = command packet.set_eep(kwargs) if rorg in [RORG.BS1, RORG.BS4] and not learn: if rorg == RORG.BS1: packet.data[1] |= (1 << 3) if rorg == RORG.BS4: packet.data[4] |= (1 << 3) packet.data[-1] = packet.status # Parse the built packet, so it corresponds to the received packages # For example, stuff like RadioPacket.learn should be set. packet = Packet.parse_msg(packet.build())[2] packet.rorg = rorg packet.parse_eep(rorg_func, rorg_type, direction, command) return packet
def create(packet_type, rorg, rorg_func, rorg_type, direction=None, command=None, destination=None, sender=None, learn=False, **kwargs)
Creates an packet ready for sending. Uses rorg, rorg_func and rorg_type to determine the values set based on EEP. Additional arguments (**kwargs) are used for setting the values. Currently only supports: - PACKET.RADIO_ERP1 - RORGs RPS, BS1, BS4, VLD. TODO: - Require sender to be set? Would force the "correct" sender to be set. - Do we need to set telegram control bits? Might be useful for acting as a repeater?
4.897599
3.348197
1.462757
''' Parse data from Packet ''' # Parse status from messages if self.rorg in [RORG.RPS, RORG.BS1, RORG.BS4]: self.status = self.data[-1] if self.rorg == RORG.VLD: self.status = self.optional[-1] if self.rorg in [RORG.RPS, RORG.BS1, RORG.BS4]: # These message types should have repeater count in the last for bits of status. self.repeater_count = enocean.utils.from_bitarray(self._bit_status[4:]) return self.parsed
def parse(self)
Parse data from Packet
7.416579
7.203302
1.029608
''' Set EEP based on FUNC and TYPE ''' # set EEP profile self.rorg_func = rorg_func self.rorg_type = rorg_type self._profile = self.eep.find_profile(self._bit_data, self.rorg, rorg_func, rorg_type, direction, command) return self._profile is not None
def select_eep(self, rorg_func, rorg_type, direction=None, command=None)
Set EEP based on FUNC and TYPE
4.425338
4.138023
1.069433
''' Parse EEP based on FUNC and TYPE ''' # set EEP profile, if demanded if rorg_func is not None and rorg_type is not None: self.select_eep(rorg_func, rorg_type, direction, command) # parse data provides, values = self.eep.get_values(self._profile, self._bit_data, self._bit_status) self.parsed.update(values) return list(provides)
def parse_eep(self, rorg_func=None, rorg_type=None, direction=None, command=None)
Parse EEP based on FUNC and TYPE
6.119379
5.794635
1.056042
''' Update packet data based on EEP. Input data is a dictionary with keys corresponding to the EEP. ''' self._bit_data, self._bit_status = self.eep.set_values(self._profile, self._bit_data, self._bit_status, data)
def set_eep(self, data)
Update packet data based on EEP. Input data is a dictionary with keys corresponding to the EEP.
7.771337
4.154981
1.870367
''' Build Packet for sending to EnOcean controller ''' data_length = len(self.data) ords = [0x55, (data_length >> 8) & 0xFF, data_length & 0xFF, len(self.optional), int(self.packet_type)] ords.append(crc8.calc(ords[1:5])) ords.extend(self.data) ords.extend(self.optional) ords.append(crc8.calc(ords[6:])) return ords
def build(self)
Build Packet for sending to EnOcean controller
4.224796
3.361554
1.256798
''' Get message from send queue, if one exists ''' try: packet = self.transmit.get(block=False) self.logger.info('Sending packet') self.logger.debug(packet) return packet except queue.Empty: pass return None
def _get_from_send_queue(self)
Get message from send queue, if one exists
4.889844
4.213329
1.160566
''' Parses messages and puts them to receive queue ''' # Loop while we get new messages while True: status, self._buffer, packet = Packet.parse_msg(self._buffer) # If message is incomplete -> break the loop if status == PARSE_RESULT.INCOMPLETE: return status # If message is OK, add it to receive queue or send to the callback method if status == PARSE_RESULT.OK and packet: packet.received = datetime.datetime.now() if isinstance(packet, UTETeachInPacket) and self.teach_in: response_packet = packet.create_response_packet(self.base_id) self.logger.info('Sending response to UTE teach-in.') self.send(response_packet) if self.__callback is None: self.receive.put(packet) else: self.__callback(packet) self.logger.debug(packet)
def parse(self)
Parses messages and puts them to receive queue
5.873399
5.313253
1.105424
''' Fetches Base ID from the transmitter, if required. Otherwise returns the currently set Base ID. ''' # If base id is already set, return it. if self._base_id is not None: return self._base_id # Send COMMON_COMMAND 0x08, CO_RD_IDBASE request to the module self.send(Packet(PACKET.COMMON_COMMAND, data=[0x08])) # Loop over 10 times, to make sure we catch the response. # Thanks to timeout, shouldn't take more than a second. # Unfortunately, all other messages received during this time are ignored. for i in range(0, 10): try: packet = self.receive.get(block=True, timeout=0.1) # We're only interested in responses to the request in question. if packet.packet_type == PACKET.RESPONSE and packet.response == RETURN_CODE.OK and len(packet.response_data) == 4: # Base ID is set in the response data. self._base_id = packet.response_data # Put packet back to the Queue, so the user can also react to it if required... self.receive.put(packet) break # Put other packets back to the Queue. self.receive.put(packet) except queue.Empty: continue # Return the current Base ID (might be None). return self._base_id
def base_id(self)
Fetches Base ID from the transmitter, if required. Otherwise returns the currently set Base ID.
5.776412
4.925451
1.172768
''' Wrapper to implement simple timing of tests. Allows running multiple rounds to calculate average time. Limit (in milliseconds) can be set to assert, if (average) duration is too high. ''' def decorator(method): @functools.wraps(method) def f(): if rounds == 1: start = time.time() method() duration = time.time() - start else: start = time.time() for i in range(rounds): method() duration = (time.time() - start) / rounds # Use milliseconds for duration counter duration = duration * 1e3 print('Test "%s.%s" took %.06f ms.' % (method.__module__, method.__name__, duration)) if limit is not None: assert limit > duration, 'Timing failure: %.06f > %.06f' % (duration, limit) # Run tests with timings, only if WITH_TIMINGS environment variable is set. # This is because tests with multiple rounds can take long to process. if environ.get('WITH_TIMINGS', None) == '1': return f return method return decorator
def timing(rounds=1, limit=None)
Wrapper to implement simple timing of tests. Allows running multiple rounds to calculate average time. Limit (in milliseconds) can be set to assert, if (average) duration is too high.
4.401547
2.949149
1.492481
sys.path.append(os.getcwd()) worker_settings = import_string(worker_settings) logging.config.dictConfig(default_log_config(verbose)) if check: exit(check_health(worker_settings)) else: kwargs = {} if burst is None else {'burst': burst} if watch: loop = asyncio.get_event_loop() loop.run_until_complete(watch_reload(watch, worker_settings, loop)) else: run_worker(worker_settings, **kwargs)
def cli(*, worker_settings, burst, check, watch, verbose)
Job queues in python with asyncio and redis. CLI to run the arq worker.
2.903879
2.920213
0.994407
dt = previous_dt + timedelta(seconds=1) if isinstance(weekday, str): weekday = weekdays.index(weekday.lower()) options = dict( month=month, day=day, weekday=weekday, hour=hour, minute=minute, second=second, microsecond=microsecond ) while True: next_dt = _get_next_dt(dt, options) # print(dt, next_dt) if next_dt is None: return dt dt = next_dt
def next_cron( previous_dt: datetime, *, month: Union[None, set, int] = None, day: Union[None, set, int] = None, weekday: Union[None, set, int, str] = None, hour: Union[None, set, int] = None, minute: Union[None, set, int] = None, second: Union[None, set, int] = 0, microsecond: int = 123_456, )
Find the next datetime matching the given parameters.
2.551655
2.407401
1.059921
if isinstance(coroutine, str): name = name or 'cron:' + coroutine coroutine = import_string(coroutine) assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function' timeout = to_seconds(timeout) keep_result = to_seconds(keep_result) return CronJob( name or 'cron:' + coroutine.__qualname__, coroutine, month, day, weekday, hour, minute, second, microsecond, run_at_startup, unique, timeout, keep_result, max_tries, )
def cron( coroutine: Union[str, Callable], *, name: Optional[str] = None, month: Union[None, set, int] = None, day: Union[None, set, int] = None, weekday: Union[None, set, int, str] = None, hour: Union[None, set, int] = None, minute: Union[None, set, int] = None, second: Union[None, set, int] = 0, microsecond: int = 123_456, run_at_startup: bool = False, unique: bool = True, timeout: Optional[SecondsTimedelta] = None, keep_result: Optional[float] = 0, max_tries: Optional[int] = 1, ) -> CronJob
Create a cron job, eg. it should be executed at specific times. Workers will enqueue this job at or just after the set times. If ``unique`` is true (the default) the job will only be run once even if multiple workers are running. :param coroutine: coroutine function to run :param name: name of the job, if None, the name of the coroutine is used :param month: month(s) to run the job on, 1 - 12 :param day: day(s) to run the job on, 1 - 31 :param weekday: week day(s) to run the job on, 0 - 6 or mon - sun :param hour: hour(s) to run the job on, 0 - 23 :param minute: minute(s) to run the job on, 0 - 59 :param second: second(s) to run the job on, 0 - 59 :param microsecond: microsecond(s) to run the job on, defaults to 123456 as the world is busier at the top of a second, 0 - 1e6 :param run_at_startup: whether to run as worker starts :param unique: whether the job should be only be executed once at each time :param timeout: job timeout :param keep_result: how long to keep the result for :param max_tries: maximum number of tries for the job
2.047012
2.156977
0.949019
utcoffset = dt.utcoffset() ep = epoch if utcoffset is None else epoch_tz return as_int((dt - ep).total_seconds() * 1000)
def to_unix_ms(dt: datetime) -> int
convert a datetime to number of milliseconds since 1970 and calculate timezone offset
5.625235
4.783033
1.176081
if len(s) > length: s = s[: length - 1] + '…' return s
def truncate(s: str, length: int = DEFAULT_CURTAIL) -> str
Truncate a string and add an ellipsis (three dots) to the end if it was too long :param s: string to possibly truncate :param length: length to truncate the string to
3.998482
4.110736
0.972692
settings = settings or RedisSettings() addr = settings.host, settings.port try: pool = await aioredis.create_redis_pool( addr, db=settings.database, password=settings.password, timeout=settings.conn_timeout, encoding='utf8', commands_factory=ArqRedis, ) except (ConnectionError, OSError, aioredis.RedisError, asyncio.TimeoutError) as e: if _retry < settings.conn_retries: logger.warning( 'redis connection error %s:%s %s %s, %d retries remaining...', settings.host, settings.port, e.__class__.__name__, e, settings.conn_retries - _retry, ) await asyncio.sleep(settings.conn_retry_delay) else: raise else: if _retry > 0: logger.info('redis connection successful') return pool # recursively attempt to create the pool outside the except block to avoid # "During handling of the above exception..." madness return await create_pool(settings, _retry=_retry + 1)
async def create_pool(settings: RedisSettings = None, *, _retry: int = 0) -> ArqRedis
Create a new redis pool, retrying up to ``conn_retries`` times if the connection fails. Similar to ``aioredis.create_redis_pool`` except it returns a :class:`arq.connections.ArqRedis` instance, thus allowing job enqueuing.
2.723453
2.802305
0.971862
job_id = _job_id or uuid4().hex job_key = job_key_prefix + job_id assert not (_defer_until and _defer_by), "use either 'defer_until' or 'defer_by' or neither, not both" defer_by_ms = to_ms(_defer_by) expires_ms = to_ms(_expires) with await self as conn: pipe = conn.pipeline() pipe.unwatch() pipe.watch(job_key) job_exists = pipe.exists(job_key) await pipe.execute() if await job_exists: return enqueue_time_ms = timestamp_ms() if _defer_until is not None: score = to_unix_ms(_defer_until) elif defer_by_ms: score = enqueue_time_ms + defer_by_ms else: score = enqueue_time_ms expires_ms = expires_ms or score - enqueue_time_ms + expires_extra_ms job = pickle_job(function, args, kwargs, _job_try, enqueue_time_ms) tr = conn.multi_exec() tr.psetex(job_key, expires_ms, job) tr.zadd(queue_name, score, job_id) try: await tr.execute() except MultiExecError: # job got enqueued since we checked 'job_exists' return return Job(job_id, self)
async def enqueue_job( self, function: str, *args: Any, _job_id: Optional[str] = None, _defer_until: Optional[datetime] = None, _defer_by: Union[None, int, float, timedelta] = None, _expires: Union[None, int, float, timedelta] = None, _job_try: Optional[int] = None, **kwargs: Any, ) -> Optional[Job]
Enqueue a job. :param function: Name of the function to call :param args: args to pass to the function :param _job_id: ID of the job, can be used to enforce job uniqueness :param _defer_until: datetime at which to run the job :param _defer_by: duration to wait before running the job :param _expires: if the job still hasn't started after this duration, do not run it :param _job_try: useful when re-enqueueing jobs within a job :param kwargs: any keyword arguments to pass to the function :return: :class:`arq.jobs.Job` instance or ``None`` if a job with this ID already exists
3.092599
3.218067
0.961011
keys = await self.keys(result_key_prefix + '*') results = await asyncio.gather(*[self._get_job_result(k) for k in keys]) return sorted(results, key=attrgetter('enqueue_time'))
async def all_job_results(self) -> List[JobResult]
Get results for all jobs in redis.
3.851994
3.520739
1.094087
redis = await create_pool(RedisSettings()) # no id, random id will be generated job1 = await redis.enqueue_job('the_task') print(job1) # random id again, again the job will be enqueued and a job will be returned job2 = await redis.enqueue_job('the_task') print(job2) # custom job id, job will be enqueued job3 = await redis.enqueue_job('the_task', _job_id='foobar') print(job3) # same custom job id, job will not be enqueued and enqueue_job will return None job4 = await redis.enqueue_job('the_task', _job_id='foobar') print(job4)
async def main()
> <arq job 99edfef86ccf4145b2f64ee160fa3297>
4.260359
4.172196
1.021131
if isinstance(coroutine, Function): return coroutine if isinstance(coroutine, str): name = name or coroutine coroutine = import_string(coroutine) assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function' timeout = to_seconds(timeout) keep_result = to_seconds(keep_result) return Function(name or coroutine.__qualname__, coroutine, timeout, keep_result, max_tries)
def func( coroutine: Union[str, Function, Callable], *, name: Optional[str] = None, keep_result: Optional[SecondsTimedelta] = None, timeout: Optional[SecondsTimedelta] = None, max_tries: Optional[int] = None, ) -> Function
Wrapper for a job function which lets you configure more settings. :param coroutine: coroutine function to call, can be a string to import :param name: name for function, if None, ``coroutine.__qualname__`` is used :param keep_result: duration to keep the result for, if 0 the result is not kept :param timeout: maximum time the job should take :param max_tries: maximum number of tries allowed for the function, use 1 to prevent retrying
2.214146
2.742604
0.807315
cls_kwargs = get_kwargs(settings_cls) loop = asyncio.get_event_loop() return loop.run_until_complete(async_check_health(cls_kwargs.get('redis_settings')))
def check_health(settings_cls) -> int
Run a health check on the worker and return the appropriate exit code. :return: 0 if successful, 1 if not
4.052241
4.128402
0.981552
self.main_task = self.loop.create_task(self.main()) try: self.loop.run_until_complete(self.main_task) except asyncio.CancelledError: # happens on shutdown, fine pass finally: self.loop.run_until_complete(self.close())
def run(self) -> None
Sync function to run the worker, finally closes worker connections.
3.162175
2.635143
1.200001
self.main_task = self.loop.create_task(self.main()) await self.main_task
async def async_run(self) -> None
Asynchronously run the worker, does not close connections. Useful when testing.
4.558376
3.501144
1.301968
await self.async_run() if self.jobs_failed: failed_job_results = [r for r in await self.pool.all_job_results() if not r.success] raise FailedJobs(self.jobs_failed, failed_job_results) else: return self.jobs_complete
async def run_check(self) -> int
Run :func:`arq.worker.Worker.async_run`, check for failed jobs and raise :class:`arq.worker.FailedJobs` if any jobs have failed. :return: number of completed jobs
4.748358
3.326605
1.427389
redis = await create_pool(RedisSettings()) job = await redis.enqueue_job('the_task') # get the job's id print(job.job_id) # get information about the job, will include results if the job has finished, but # doesn't await the job's result debug(await job.info()) # get the Job's status print(await job.status()) # poll redis for the job result, if the job raised an exception, # it will be raised here # (You'll need the worker running at the same time to get a result here) print(await job.result(timeout=5))
async def main()
> 68362958a244465b9be909db4b7b5ab4 (or whatever)
8.93618
9.043807
0.988099
async for delay in poll(pole_delay): info = await self.result_info() if info: result = info.result if info.success: return result else: raise result if timeout is not None and delay > timeout: raise asyncio.TimeoutError()
async def result(self, timeout: Optional[float] = None, *, pole_delay: float = 0.5) -> Any
Get the result of the job, including waiting if it's not yet available. If the job raised an exception, it will be raised here. :param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever :param pole_delay: how often to poll redis for the job result
4.079769
4.72957
0.862609
info = await self.result_info() if not info: v = await self._redis.get(job_key_prefix + self.job_id, encoding=None) if v: info = unpickle_job(v) if info: info.score = await self._redis.zscore(queue_name, self.job_id) return info
async def info(self) -> Optional[JobDef]
All information on a job, including its result if it's available, does not wait for the result.
4.286027
4.054005
1.057233
v = await self._redis.get(result_key_prefix + self.job_id, encoding=None) if v: return unpickle_result(v)
async def result_info(self) -> Optional[JobResult]
Information about the job result if available, does not wait for the result. Does not raise an exception even if the job raised one.
5.82498
5.575634
1.044721
if await self._redis.exists(result_key_prefix + self.job_id): return JobStatus.complete elif await self._redis.exists(in_progress_key_prefix + self.job_id): return JobStatus.in_progress else: score = await self._redis.zscore(queue_name, self.job_id) if not score: return JobStatus.not_found return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued
async def status(self) -> JobStatus
Status of the job.
3.015434
2.981246
1.011468
for k in self._NAME_KEYS: if self._raw.get(k): return self._raw[k] if "profile" in self._raw and self._raw["profile"].get(k): return self._raw["profile"][k] return self._raw["name"]
def display_name(self)
Find the most appropriate display name for a user: look for a "display_name", then a "real_name", and finally fall back to the always-present "name".
3.476006
2.9747
1.168523
if "profile" in self._raw: email = self._raw["profile"].get("email") elif "bot_url" in self._raw: email = self._raw["bot_url"] else: email = None if not email: logging.debug("No email found for %s", self._raw.get("name")) return email
def email(self)
Shortcut property for finding the e-mail address or bot URL.
3.222909
2.718072
1.185734
if "profile" not in self._raw: return profile = self._raw["profile"] if (pixel_size): img_key = "image_%s" % pixel_size if img_key in profile: return profile[img_key] return profile[self._DEFAULT_IMAGE_KEY]
def image_url(self, pixel_size=None)
Get the URL for the user icon in the desired pixel size, if it exists. If no size is supplied, give the URL for the full-size image.
3.807671
3.69002
1.031883
process_markdown = ("fields" in self._raw.get("mrkdwn_in", [])) fields = self._raw.get("fields", []) if fields: logging.debug("Rendering with markdown markdown %s for %s", process_markdown, fields) return [ {"title": e["title"], "short": e["short"], "value": self._formatter.render_text(e["value"], process_markdown)} for e in fields ]
def fields(self)
Fetch the "fields" list, and process the text within each field, including markdown processing if the message indicates that the fields contain markdown. Only present on attachments, not files--this abstraction isn't 100% awesome.'
5.75317
4.772088
1.205588
dm_data = self._read_from_json("dms.json") dms = dm_data.values() all_dms_users = [] for dm in dms: # checks if messages actually exsist if dm["id"] not in self._EMPTY_DMS: # added try catch for users from shared workspaces not in current workspace try: dm_members = {"id": dm["id"], "users": [self.__USER_DATA[m] for m in dm["members"]]} all_dms_users.append(dm_members) except KeyError: dm_members = None return all_dms_users
def compile_dm_users(self)
Gets the info for the members within the dm Returns a list of all dms with the members that have ever existed :rtype: [object] { id: <id> users: [<user_id>] }
6.06694
5.386243
1.126377
mpim_data = self._read_from_json("mpims.json") mpims = [c for c in mpim_data.values()] all_mpim_users = [] for mpim in mpims: mpim_members = {"name": mpim["name"], "users": [self.__USER_DATA[m] for m in mpim["members"]]} all_mpim_users.append(mpim_members) return all_mpim_users
def compile_mpim_users(self)
Gets the info for the members within the multiple person instant message Returns a list of all dms with the members that have ever existed :rtype: [object] { name: <name> users: [<user_id>] }
3.199047
2.94162
1.087512
chats = {} empty_dms = [] formatter = SlackFormatter(self.__USER_DATA, data) for name in names: # gets path to dm directory that holds the json archive dir_path = os.path.join(self._PATH, name) messages = [] # array of all days archived day_files = glob.glob(os.path.join(dir_path, "*.json")) # this is where it's skipping the empty directories if not day_files: if isDms: empty_dms.append(name) continue for day in sorted(day_files): with io.open(os.path.join(self._PATH, day), encoding="utf8") as f: # loads all messages day_messages = json.load(f) messages.extend([Message(formatter, d) for d in day_messages]) chats[name] = messages if isDms: self._EMPTY_DMS = empty_dms return chats
def _create_messages(self, names, data, isDms=False)
Creates object of arrays of messages from each json file specified by the names or ids :param [str] names: names of each group of messages :param [object] data: array of objects detailing where to get the messages from in the directory structure :param bool isDms: boolean value used to tell if the data is dm data so the function can collect the empty dm directories and store them in memory only :return: object of arrays of messages :rtype: object
4.488709
4.16864
1.07678
try: with io.open(os.path.join(self._PATH, file), encoding="utf8") as f: return {u["id"]: u for u in json.load(f)} except IOError: return {}
def _read_from_json(self, file)
Reads the file specified from json and creates an object based on the id of each element :param str file: Path to file of json to read :return: object of data read from json file :rtype: object
3.88612
4.003098
0.970778
if PY_VERSION == 2: b = bytes(s) elif PY_VERSION == 3: b = bytes(s, encoding) else: raise ValueError("Is Python 4 out already?") return b
def to_bytes(s, encoding="utf8")
Converts str s to bytes
6.014326
5.544107
1.084814
h = hashlib.sha1() with io.open(filepath, 'rb') as f: for chunk in iter(lambda: f.read(h.block_size), b''): h.update(chunk) h.update(extra) return h.hexdigest()
def SHA1_file(filepath, extra=b'')
Returns hex digest of SHA1 hash of file at filepath :param str filepath: File to hash :param bytes extra: Extra content added to raw read of file before taking hash :return: hex digest of hash :rtype: str
1.854739
2.24954
0.824497
# Checks if file path is a directory if os.path.isdir(filepath): path = os.path.abspath(filepath) print("Archive already extracted. Viewing from {}...".format(path)) return path # Checks if the filepath is a zipfile and continues to extract if it is # if not it raises an error elif not zipfile.is_zipfile(filepath): # Misuse of TypeError? :P raise TypeError("{} is not a zipfile".format(filepath)) archive_sha = SHA1_file( filepath=filepath, # Add version of slackviewer to hash as well so we can invalidate the cached copy # if there are new features added extra=to_bytes(slackviewer.__version__) ) extracted_path = os.path.join(SLACKVIEWER_TEMP_PATH, archive_sha) if os.path.exists(extracted_path): print("{} already exists".format(extracted_path)) else: # Extract zip with zipfile.ZipFile(filepath) as zip: print("{} extracting to {}...".format(filepath, extracted_path)) zip.extractall(path=extracted_path) print("{} extracted to {}".format(filepath, extracted_path)) # Add additional file with archive info create_archive_info(filepath, extracted_path, archive_sha) return extracted_path
def extract_archive(filepath)
Returns the path of the archive :param str filepath: Path to file to extract or read :return: path of the archive :rtype: str
4.448196
4.504834
0.987427
archive_info = { "sha1": archive_sha, "filename": os.path.split(filepath)[1], } with io.open( os.path.join( extracted_path, ".slackviewer_archive_info.json", ), 'w+', encoding="utf-8" ) as f: s = json.dumps(archive_info, ensure_ascii=False) s = to_unicode(s) f.write(s)
def create_archive_info(filepath, extracted_path, archive_sha=None)
Saves archive info to a json file :param str filepath: Path to directory of archive :param str extracted_path: Path to directory of archive :param str archive_sha: SHA string created when archive was extracted from zip
2.922874
3.330667
0.877564
extracted_path = extract_archive(archive_name) base_filename = basename(archive_name) (noext_filename, _) = splitext(base_filename) # Typical extract name: "My Friends and Family Slack export Jul 21 2018 - Sep 06 2018" # If that's not the format, we will just fall back to the extension-free filename. (workspace_name, _) = noext_filename.split(" Slack export ", 1) return { "readable_path": extracted_path, "basename": base_filename, "stripped_name": noext_filename, "workspace_name": workspace_name, }
def get_export_info(archive_name)
Given a file or directory, extract it and return information that will be used in an export printout: the basename of the file, the name stripped of its extension, and our best guess (based on Slack's current naming convention) of the name of the workspace that this is an export of.
6.139805
5.204382
1.179738
assert action in ['reply', 'forward', 'bounce'] # get accounts my_accounts = settings.get_accounts() assert my_accounts, 'no accounts set!' # extract list of addresses to check for my address # X-Envelope-To and Envelope-To are used to store the recipient address # if not included in other fields # Process the headers in order of importance: if a mail was sent with # account X, with account Y in e.g. CC or delivered-to, make sure that # account X is the one selected and not account Y. candidate_headers = settings.get("reply_account_header_priority") for candidate_header in candidate_headers: candidate_addresses = getaddresses(mail.get_all(candidate_header, [])) logging.debug('candidate addresses: %s', candidate_addresses) # pick the most important account that has an address in candidates # and use that account's realname and the address found here for account in my_accounts: for seen_name, seen_address in candidate_addresses: if account.matches_address(seen_address): if settings.get(action + '_force_realname'): realname = account.realname else: realname = seen_name if settings.get(action + '_force_address'): address = str(account.address) else: address = seen_address logging.debug('using realname: "%s"', realname) logging.debug('using address: %s', address) from_value = formataddr((realname, address)) return from_value, account # revert to default account if nothing found account = my_accounts[0] realname = account.realname address = account.address logging.debug('using realname: "%s"', realname) logging.debug('using address: %s', address) from_value = formataddr((realname, str(address))) return from_value, account
def determine_sender(mail, action='reply')
Inspect a given mail to reply/forward/bounce and find the most appropriate account to act from and construct a suitable From-Header to use. :param mail: the email to inspect :type mail: `email.message.Message` :param action: intended use case: one of "reply", "forward" or "bounce" :type action: str
4.804405
4.589538
1.046817
if key in self.headers: value = self.headers[key][0] else: value = fallback return value
def get(self, key, fallback=None)
secure getter for header values that allows specifying a `fallback` return string (defaults to None). This returns the first matching value and doesn't raise KeyErrors
3.602553
3.113615
1.157032
if key in self.headers: value = self.headers[key] else: value = fallback or [] return value
def get_all(self, key, fallback=None)
returns all header values for given key
4.376329
3.097329
1.412936
if key not in self.headers: self.headers[key] = [] self.headers[key].append(value) if self.sent_time: self.modified_since_sent = True
def add(self, key, value)
add header value
5.015114
4.250797
1.179806
if isinstance(attachment, Attachment): self.attachments.append(attachment) elif isinstance(attachment, str): path = os.path.expanduser(attachment) part = helper.mimewrap(path, filename, ctype) self.attachments.append(Attachment(part)) else: raise TypeError('attach accepts an Attachment or str') if self.sent_time: self.modified_since_sent = True
def attach(self, attachment, filename=None, ctype=None)
attach a file :param attachment: File to attach, given as :class:`~alot.db.attachment.Attachment` object or path to a file. :type attachment: :class:`~alot.db.attachment.Attachment` or str :param filename: filename to use in content-disposition. Will be ignored if `path` matches multiple files :param ctype: force content-type to be used for this attachment :type ctype: str
4.266794
4.22805
1.009164
logging.debug('GoT: ', tmp) if self.sent_time: self.modified_since_sent = True if only_body: self.body = tmp else: m = re.match(r'(?P<h>([a-zA-Z0-9_-]+:.+\n)*)\n?(?P<b>(\s*.*)*)', tmp) assert m d = m.groupdict() headertext = d['h'] self.body = d['b'] # remove existing content if reset: self.headers = {} # go through multiline, utf-8 encoded headers # we decode the edited text ourselves here as # email.message_from_file can't deal with raw utf8 header values key = value = None for line in headertext.splitlines(): if re.match('[a-zA-Z0-9_-]+:', line): # new k/v pair if key and value: # save old one from stack self.add(key, value) # save key, value = line.strip().split(':', 1) # parse new pair # strip spaces, otherwise we end up having " foo" as value # of "Subject: foo" value = value.strip() elif key and value: # append new line without key prefix value += line if key and value: # save last one if present self.add(key, value) # interpret 'Attach' pseudo header if 'Attach' in self: to_attach = [] for line in self.get_all('Attach'): gpath = os.path.expanduser(line.strip()) to_attach += [g for g in glob.glob(gpath) if os.path.isfile(g)] logging.debug('Attaching: %s', to_attach) for path in to_attach: self.attach(path) del self['Attach']
def parse_template(self, tmp, reset=False, only_body=False)
parses a template or user edited string to fills this envelope. :param tmp: the string to parse. :type tmp: str :param reset: remove previous envelope content :type reset: bool
5.26217
5.162879
1.019232
# shlex seems to remove unescaped quotes and backslashes s = s.replace('\\', '\\\\') s = s.replace('\'', '\\\'') s = s.replace('\"', '\\\"') lex = shlex.shlex(s, posix=posix) lex.whitespace_split = True lex.whitespace = ';' if not comments: lex.commenters = '' return list(lex)
def split_commandline(s, comments=False, posix=True)
splits semi-colon separated commandlines
2.770414
2.869959
0.965315
r string = string.replace('\r', '') lines = list() for line in string.split('\n'): tab_count = line.count('\t') if tab_count > 0: line_length = 0 new_line = list() for i, chunk in enumerate(line.split('\t')): line_length += len(chunk) new_line.append(chunk) if i < tab_count: next_tab_stop_in = tab_width - (line_length % tab_width) new_line.append(' ' * next_tab_stop_in) line_length += next_tab_stop_in lines.append(''.join(new_line)) else: lines.append(line) return '\n'.join(lines)
def string_sanitize(string, tab_width=8)
r""" strips, and replaces non-printable characters :param tab_width: number of spaces to replace tabs with. Read from `globals.tabwidth` setting if `None` :type tab_width: int or `None` >>> string_sanitize(' foo\rbar ', 8) ' foobar ' >>> string_sanitize('foo\tbar', 8) 'foo bar' >>> string_sanitize('foo\t\tbar', 8) 'foo bar'
2.358161
2.546354
0.926093
if enc is None: enc = 'ascii' try: string = str(string, enc, errors='replace') except LookupError: # malformed enc string string = string.decode('ascii', errors='replace') except TypeError: # already str pass return string
def string_decode(string, enc='ascii')
safely decodes string to unicode bytestring, respecting `enc` as a hint. :param string: the string to decode :type string: str or unicode :param enc: a hint what encoding is used in string ('ascii', 'utf-8', ...) :type enc: str :returns: the unicode decoded input string :rtype: unicode
3.995983
3.911007
1.021727
if 1 < maxlen < len(string): string = string[:maxlen - 1] + u'…' return string[:maxlen]
def shorten(string, maxlen)
shortens string if longer than maxlen, appending ellipsis
4.188044
4.066618
1.029859
# I will create a list of authors by parsing author_string. I use # deque to do popleft without performance penalties authors = deque() # If author list is too long, it uses only the first part of each # name (gmail style) short_names = len(authors_string) > maxlength for au in authors_string.split(", "): if short_names: author_as_list = au.split() if len(author_as_list) > 0: authors.append(author_as_list[0]) else: authors.append(au) # Author chain will contain the list of author strings to be # concatenated using commas for the final formatted author_string. authors_chain = deque() if len(authors) == 0: return u'' # reserve space for first author first_au = shorten(authors.popleft(), maxlength) remaining_length = maxlength - len(first_au) # Tries to add an ellipsis if no space to show more than 1 author if authors and maxlength > 3 and remaining_length < 3: first_au = shorten(first_au, maxlength - 3) remaining_length += 3 # Tries to add as more authors as possible. It takes into account # that if any author will be hidden, and ellipsis should be added while authors and remaining_length >= 3: au = authors.pop() if len(au) > 1 and (remaining_length == 3 or (authors and remaining_length < 7)): authors_chain.appendleft(u'\u2026') break else: if authors: # 5= ellipsis + 2 x comma and space used as separators au_string = shorten(au, remaining_length - 5) else: # 2 = comma and space used as separator au_string = shorten(au, remaining_length - 2) remaining_length -= len(au_string) + 2 authors_chain.appendleft(au_string) # Add the first author to the list and concatenate list authors_chain.appendleft(first_au) authorsstring = ', '.join(authors_chain) return authorsstring
def shorten_author_string(authors_string, maxlength)
Parse a list of authors concatenated as a text string (comma separated) and smartly adjust them to maxlength. 1) If the complete list of sender names does not fit in maxlength, it tries to shorten names by using only the first part of each. 2) If the list is still too long, hide authors according to the following priority: - First author is always shown (if too long is shorten with ellipsis) - If possible, last author is also shown (if too long, uses ellipsis) - If there are more than 2 authors in the thread, show the maximum of them. More recent senders have higher priority. - If it is finally necessary to hide any author, an ellipsis between first and next authors is added.
4.67951
4.556448
1.027008
ampm = d.strftime('%p').lower() if len(ampm): hourfmt = '%I' + ampm hourminfmt = '%I:%M' + ampm else: hourfmt = '%Hh' hourminfmt = '%H:%M' now = datetime.now() today = now.date() if d.date() == today or d > now - timedelta(hours=6): delta = datetime.now() - d if delta.seconds < 60: string = 'just now' elif delta.seconds < 3600: string = '%dmin ago' % (delta.seconds // 60) elif delta.seconds < 6 * 3600: string = '%dh ago' % (delta.seconds // 3600) else: string = d.strftime(hourminfmt) elif d.date() == today - timedelta(1): string = d.strftime('yest ' + hourfmt) elif d.date() > today - timedelta(7): string = d.strftime('%a ' + hourfmt) elif d.year != today.year: string = d.strftime('%b %Y') else: string = d.strftime('%b %d') return string_decode(string, 'UTF-8')
def pretty_datetime(d)
translates :class:`datetime` `d` to a "sup-style" human readable string. >>> now = datetime.now() >>> now.strftime('%c') 'Sat 31 Mar 2012 14:47:26 ' >>> pretty_datetime(now) u'just now' >>> pretty_datetime(now - timedelta(minutes=1)) u'1min ago' >>> pretty_datetime(now - timedelta(hours=5)) u'5h ago' >>> pretty_datetime(now - timedelta(hours=12)) u'02:54am' >>> pretty_datetime(now - timedelta(days=1)) u'yest 02pm' >>> pretty_datetime(now - timedelta(days=2)) u'Thu 02pm' >>> pretty_datetime(now - timedelta(days=7)) u'Mar 24' >>> pretty_datetime(now - timedelta(days=356)) u'Apr 2011'
2.489166
2.351342
1.058615
termenc = urwid.util.detected_encoding if isinstance(stdin, str): stdin = stdin.encode(termenc) try: logging.debug("Calling %s" % cmdlist) proc = subprocess.Popen( cmdlist, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE if stdin is not None else None) except OSError as e: out = b'' err = e.strerror ret = e.errno else: out, err = proc.communicate(stdin) ret = proc.returncode out = string_decode(out, termenc) err = string_decode(err, termenc) return out, err, ret
def call_cmd(cmdlist, stdin=None)
get a shell commands output, error message and return value and immediately return. .. warning:: This returns with the first screen content for interactive commands. :param cmdlist: shellcommand to call, already splitted into a list accepted by :meth:`subprocess.Popen` :type cmdlist: list of str :param stdin: string to pipe to the process :type stdin: str, bytes, or None :return: triple of stdout, stderr, return value of the shell command :rtype: str, str, int
2.790889
2.779062
1.004256
termenc = urwid.util.detected_encoding cmdlist = [s.encode(termenc) for s in cmdlist] environment = os.environ.copy() if env is not None: environment.update(env) logging.debug('ENV = %s', environment) logging.debug('CMD = %s', cmdlist) try: proc = await asyncio.create_subprocess_exec( *cmdlist, env=environment, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE if stdin else None) except OSError as e: return ('', str(e), 1) out, err = await proc.communicate(stdin.encode(termenc) if stdin else None) return (out.decode(termenc), err.decode(termenc), proc.returncode)
async def call_cmd_async(cmdlist, stdin=None, env=None)
Given a command, call that command asynchronously and return the output. This function only handles `OSError` when creating the subprocess, any other exceptions raised either durring subprocess creation or while exchanging data with the subprocess are the caller's responsibility to handle. If such an `OSError` is caught, then returncode will be set to 1, and the error value will be set to the str() method fo the exception. :type cmdlist: list of str :param stdin: string to pipe to the process :type stdin: str :return: Tuple of stdout, stderr, returncode :rtype: tuple[str, str, int]
2.439001
2.454939
0.993508
mimetype = 'application/octet-stream' # this is a bit of a hack to support different versions of python magic. # Hopefully at some point this will no longer be necessary # # the version with open() is the bindings shipped with the file source from # http://darwinsys.com/file/ - this is what is used by the python-magic # package on Debian/Ubuntu. However, it is not available on pypi/via pip. # # the version with from_buffer() is available at # https://github.com/ahupp/python-magic and directly installable via pip. # # for more detail see https://github.com/pazz/alot/pull/588 if hasattr(magic, 'open'): m = magic.open(magic.MAGIC_MIME_TYPE) m.load() magictype = m.buffer(blob) elif hasattr(magic, 'from_buffer'): # cf. issue #841 magictype = magic.from_buffer(blob, mime=True) or magictype else: raise Exception('Unknown magic API') # libmagic does not always return proper mimetype strings, cf. issue #459 if re.match(r'\w+\/\w+', magictype): mimetype = magictype return mimetype
def guess_mimetype(blob)
uses file magic to determine the mime-type of the given data blob. :param blob: file content as read by file.read() :type blob: data :returns: mime-type, falls back to 'application/octet-stream' :rtype: str
6.039634
6.166177
0.979478
# this is a bit of a hack to support different versions of python magic. # Hopefully at some point this will no longer be necessary # # the version with open() is the bindings shipped with the file source from # http://darwinsys.com/file/ - this is what is used by the python-magic # package on Debian/Ubuntu. However it is not available on pypi/via pip. # # the version with from_buffer() is available at # https://github.com/ahupp/python-magic and directly installable via pip. # # for more detail see https://github.com/pazz/alot/pull/588 if hasattr(magic, 'open'): m = magic.open(magic.MAGIC_MIME_ENCODING) m.load() return m.buffer(blob) elif hasattr(magic, 'from_buffer'): m = magic.Magic(mime_encoding=True) return m.from_buffer(blob) else: raise Exception('Unknown magic API')
def guess_encoding(blob)
uses file magic to determine the encoding of the given data blob. :param blob: file content as read by file.read() :type blob: data :returns: encoding :rtype: str
6.513525
6.346446
1.026326