code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
sources = sources or ("config", "jobs", "raw_subqueues") queues = set() if "config" in sources and not prefixes: # Some queues are explicitly declared in the config (including all root raw queues) cfg = context.get_current_config() queues_from_config = [ t.get("queue") for t in (cfg.get("tasks") or {}).values() if t.get("queue") ] queues_from_config += Queue.get_queues_config().keys() queues_from_config += [ t.get("retry_queue") for t in Queue.get_queues_config().values() if t.get("retry_queue") ] queues |= set(queues_from_config) if "jobs" in sources: # This will get all queues from mongodb, including those where we have only non-queued jobs for q in context.connections.mongodb_jobs.mrq_jobs.distinct("queue"): if prefixes and not any(q.startswith(p) for p in prefixes): continue queues.add(q) if "raw_subqueues" in sources: for q in Queue.get_queues_config(): if prefixes and not any(q + "/" == p for p in prefixes): continue queue_obj = Queue(q) if queue_obj.is_raw and queue_obj.has_subqueues: # TODO: optimize this with a single SUNION on all keys queues |= queue_obj.get_known_subqueues() return queues
def all_known(cls, sources=None, prefixes=None)
List all currently known queues
4.383365
4.236713
1.034615
# Start with raw queues we know exist from the config queues = {x: 0 for x in Queue.get_queues_config()} stats = list(context.connections.mongodb_jobs.mrq_jobs.aggregate([ {"$match": {"status": "queued"}}, {"$group": {"_id": "$queue", "jobs": {"$sum": 1}}} ])) queues.update({x["_id"]: x["jobs"] for x in stats}) return queues
def all(cls)
List all queues in MongoDB via aggregation, with their queued jobs counts. Might be slow.
6.386113
4.502153
1.418457
if not self.use_notify(): return # Not really useful to send more than 100 notifs (to be configured) count = min(new_jobs_count, 100) notify_key = redis_key("notify", self) context.connections.redis.lpush(notify_key, *([1] * count)) context.connections.redis.expire(notify_key, max(1, int(context.get_current_config()["max_latency"] * 2)))
def notify(self, new_jobs_count)
We just queued new_jobs_count jobs on this queue, wake up the workers if needed
6.517621
6.239763
1.04453
with context.connections.redis.pipeline(transaction=True) as pipe: pipe.delete(self.redis_key) pipe.delete(self.redis_key_known_subqueues) pipe.execute()
def empty(self)
Empty a queue.
5.919363
5.59044
1.058837
if not self.has_subqueues: return set() return set(context.connections.redis.smembers(self.redis_key_known_subqueues))
def get_known_subqueues(self)
Returns all known subqueues
6.816387
6.271835
1.086825
if self.id.endswith("/"): return sum(Queue(q).size() for q in self.get_known_subqueues()) # ZSET if self.is_sorted: return context.connections.redis.zcard(self.redis_key) # SET elif self.is_set: return context.connections.redis.scard(self.redis_key) # LIST else: return context.connections.redis.llen(self.redis_key)
def size(self)
Returns the total number of queued jobs on the queue
4.210354
3.743168
1.12481
if len(params_list) == 0: return if self.is_subqueue: context.connections.redis.sadd(self.redis_key_known_subqueues, self.id) # ZSET if self.is_sorted: if not isinstance(params_list, dict) and self.is_timed: now = time.time() params_list = {x: now for x in params_list} context.connections.redis.zadd(self.redis_key, **params_list) # SET elif self.is_set: context.connections.redis.sadd(self.redis_key, *params_list) # LIST else: context.connections.redis.rpush(self.redis_key, *params_list) context.metric("queues.%s.enqueued" % self.id, len(params_list)) context.metric("queues.all.enqueued", len(params_list))
def enqueue_raw_jobs(self, params_list)
Add Jobs to this queue with raw parameters. They are not yet in MongoDB.
2.79513
2.786562
1.003074
if len(params_list) == 0: return # ZSET if self.is_sorted: context.connections.redis.zrem(self.redis_key, *iter(params_list)) # SET elif self.is_set: context.connections.redis.srem(self.redis_key, *params_list) else: # O(n)! Use with caution. for k in params_list: context.connections.redis.lrem(self.redis_key, 1, k) context.metric("queues.%s.removed" % self.id, len(params_list)) context.metric("queues.all.removed", len(params_list))
def remove_raw_jobs(self, params_list)
Remove jobs from a raw queue with their raw params.
3.634596
3.618056
1.004571
# timed ZSET if self.is_timed: return context.connections.redis.zcount( self.redis_key, "-inf", time.time()) # In all other cases, it's the same as .size() else: return self.size()
def count_jobs_to_dequeue(self)
Returns the number of jobs that can be dequeued right now from the queue.
9.616771
8.269295
1.162949
if not self.is_sorted: raise Exception("Not a sorted queue") with context.connections.redis.pipeline(transaction=exact) as pipe: interval = old_div(float(stop - start), slices) for i in range(0, slices): pipe.zcount(self.redis_key, (start + i * interval), "(%s" % (start + (i + 1) * interval)) if include_inf: pipe.zcount(self.redis_key, stop, "+inf") pipe.zcount(self.redis_key, "-inf", "(%s" % start) data = pipe.execute() if include_inf: return data[-1:] + data[:-1] return data
def get_sorted_graph( self, start=0, stop=100, slices=100, include_inf=False, exact=False)
Returns a graph of the distribution of jobs in a sorted set
3.319802
3.333002
0.99604
self.graceful_stop = False def request_shutdown_now(): self.shutdown_now() def request_shutdown_graceful(): # Second time CTRL-C, shutdown now if self.graceful_stop: self.shutdown_now() else: self.graceful_stop = True self.shutdown_graceful() # First time CTRL-C, try to shutdown gracefully gevent.signal(signal.SIGINT, request_shutdown_graceful) # User (or Heroku) requests a stop now, just mark tasks as interrupted. gevent.signal(signal.SIGTERM, request_shutdown_now)
def install_signal_handlers(self)
Handle events like Ctrl-C from the command line.
4.582765
4.408781
1.039463
self.desired_commands = commands target_commands = list(self.desired_commands) for process in list(self.processes): found = False for i in range(len(target_commands)): if process["command"] == target_commands[i]: target_commands.pop(i) found = True break if not found: self.stop_process(process, timeout) # What is left are the commands to add # TODO: we should only do this once memory conditions allow for command in target_commands: self.spawn(command)
def set_commands(self, commands, timeout=None)
Sets the processes' desired commands for this pool and manages diff to reach that state
4.462729
4.1122
1.085241
# process_name # output # time before starting (wait for port?) # start_new_session=True : avoid sending parent signals to child env = dict(os.environ) env["MRQ_IS_SUBPROCESS"] = "1" env.update(self.extra_env or {}) # Extract env variables from shell commands. parts = shlex.split(command) for p in list(parts): if "=" in p: env[p.split("=")[0]] = p[len(p.split("=")[0]) + 1:] parts.pop(0) else: break p = subprocess.Popen(parts, shell=False, close_fds=True, env=env, cwd=os.getcwd()) self.processes.append({ "subprocess": p, "pid": p.pid, "command": command, "psutil": psutil.Process(pid=p.pid) })
def spawn(self, command)
Spawns a new process and adds it to the pool
4.781262
4.605692
1.03812
while True: if not self.greenlet_watch: break if self.stopping: gevent.sleep(0.1) else: gevent.sleep(1)
def wait(self)
Waits for the pool to be fully stopped
6.340652
5.525723
1.147479
for process in list(self.processes): self.watch_process(process) # Cleanup processes self.processes = [p for p in self.processes if not p.get("dead")] if self.stopping and len(self.processes) == 0: self.stop_watch()
def watch_processes(self)
Manages the status of all the known processes
4.380197
4.165566
1.051525
status = process["psutil"].status() # TODO: how to avoid zombies? # print process["pid"], status if process.get("terminate"): if status in ("zombie", "dead"): process["dead"] = True elif process.get("terminate_at"): if time.time() > (process["terminate_at"] + 5): log.warning("Process %s had to be sent SIGKILL" % (process["pid"], )) process["subprocess"].send_signal(signal.SIGKILL) elif time.time() > process["terminate_at"]: log.warning("Process %s had to be sent SIGTERM" % (process["pid"], )) process["subprocess"].send_signal(signal.SIGTERM) else: if status in ("zombie", "dead"): # Restart a new process right away (TODO: sleep a bit? max retries?) process["dead"] = True self.spawn(process["command"]) elif status not in ("running", "sleeping"): log.warning("Process %s was in status %s" % (process["pid"], status))
def watch_process(self, process)
Manages the status of a single process
3.441214
3.425233
1.004666
self.stopping = True for process in list(self.processes): self.stop_process(process, timeout=timeout)
def stop(self, timeout=None)
Initiates a graceful stop of the processes
5.652634
4.69131
1.204916
process["terminate"] = True if timeout is not None: process["terminate_at"] = time.time() + timeout process["subprocess"].send_signal(signal.SIGINT)
def stop_process(self, process, timeout=None)
Initiates a graceful stop of one process
3.999955
4.161899
0.961089
for process in list(self.processes): process["subprocess"].send_signal(signal.SIGTERM) self.stop_watch()
def terminate(self)
Terminates the processes right now with a SIGTERM
8.790252
6.939844
1.266635
for process in list(self.processes): process["subprocess"].send_signal(signal.SIGKILL) self.stop_watch()
def kill(self)
Kills the processes right now with a SIGKILL
8.806169
6.961143
1.265046
if self.greenlet_watch: self.greenlet_watch.kill(block=False) self.greenlet_watch = None
def stop_watch(self)
Stops the periodic watch greenlet, thus the pool itself
4.380088
2.7912
1.569249
if not parser: parser = argparse.ArgumentParser() add_parser_args(parser, config_type) parser_types = {action.dest: action.type for action in parser._actions if action.dest} if config_type in ["run"]: default_config = parser.parse_args(["notask"]).__dict__ else: default_config = parser.parse_args([]).__dict__ # Keys that can't be passed from the command line default_config["tasks"] = {} default_config["log_handlers"] = {} default_config["scheduled_tasks"] = {} # Only keep values actually passed on the command line from_args = {} if "args" in sources: cmdline_parser = ArgumentParserIgnoringDefaults(argument_default=argparse.SUPPRESS) add_parser_args(cmdline_parser, config_type) from_args = cmdline_parser.parse_args().__dict__ # If we were given another config file, use it if file_path is not None: config_file = file_path elif from_args.get("config"): config_file = from_args.get("config") # If a mrq-config.py file is in the current directory, use it! elif os.path.isfile(os.path.join(os.getcwd(), "mrq-config.py")): config_file = os.path.join(os.getcwd(), "mrq-config.py") else: config_file = None from_file = {} if config_file and "file" in sources: sys.path.insert(0, os.path.dirname(config_file)) config_module = __import__(os.path.basename(config_file.replace(".py", ""))) sys.path.pop(0) for k, v in config_module.__dict__.items(): # We only keep variables starting with an uppercase character. if k[0].isupper(): from_file[k.lower()] = v # Merge the config in the order given by the user merged_config = default_config config_keys = set(list(default_config.keys()) + list(from_file.keys())) for part in sources: for name in config_keys: if part == "env": value = os.environ.get(env_prefix + name.upper()) if value: if name == "queues": value = re.split("\s+", value) if parser_types.get(name): value = parser_types[name](value) merged_config[name] = value elif part == "args" and name in from_args: merged_config[name] = from_args[name] elif part == "file" and name in from_file: merged_config[name] = from_file[name] if extra: merged_config.update(extra) if merged_config["profile"]: import cProfile, pstats profiler = cProfile.Profile() profiler.enable() def print_profiling(): pstats.Stats(profiler).sort_stats("cumulative").print_stats() atexit.register(print_profiling) if merged_config["version"]: print("MRQ version: %s" % VERSION) print("Python version: %s" % sys.version) sys.exit(1) if "no_import_patch" in from_args: print("WARNING: --no_import_patch will be deprecated in MRQ 1.0!") return merged_config
def get_config( sources=( "file", "env"), env_prefix="MRQ_", file_path=None, parser=None, extra=None, config_type=None)
Returns a config dict merged from several possible sources
2.555183
2.562063
0.997315
ms = [] while len(message) > max_length: ms.append(message[:max_length]) message = message[max_length:] ms.append(message) return ms
def split_message(message, max_length)
Split large message into smaller messages each smaller than the max_length.
2.090393
1.915882
1.091087
for plugin in self.config.plugins.plugins: if plugin.name == "capture": return plugin return None
def _get_capture_plugin(self)
:rtype: nose.plugins.capture.Capture
4.282941
3.592779
1.192097
desc = get_message_description(self.linter, msg.msg_id) self.tc.message('inspectionType', id=msg.msg_id, name=msg.symbol, description=desc, category=msg.category)
def report_message_type(self, msg)
Issues an `inspectionType` service message to define generic properties of a given PyLint message type. :param utils.Message msg: a PyLint message
9.298177
6.257073
1.486026
if msg.msg_id not in self.msg_types: self.report_message_type(msg) self.msg_types.add(msg.msg_id) self.tc.message('inspection', typeId=msg.msg_id, message=msg.msg, file=os.path.relpath(msg.abspath).replace('\\', '/'), line=str(msg.line), SEVERITY=TC_SEVERITY.get(msg.category))
def handle_message(self, msg)
Issues an `inspection` service message based on a PyLint message. Registers each message type upon first encounter. :param utils.Message msg: a PyLint message
5.961584
4.420784
1.348536
try: score = self.linter.stats['global_note'] except (AttributeError, KeyError): pass else: self.tc.message('buildStatisticValue', key='PyLintScore', value=str(score))
def display_reports(self, layout)
Issues the final PyLint score as a TeamCity build statistic value
12.900911
4.583036
2.814927
if items is not None and isinstance(items, list): result = [] for item in items: if isinstance(item, BaseButton): result.append(item) elif isinstance(item, dict): if item.get('type') in ['web_url', 'postback', 'phone_number', 'element_share']: type = item.get('type') title = item.get('title') value = item.get('value', item.get('url', item.get('payload'))) if type == 'web_url': result.append(ButtonWeb(title=title, url=value)) elif type == 'postback': result.append(ButtonPostBack(title=title, payload=value)) elif type == 'phone_number': result.append(ButtonPhoneNumber(title=title, payload=value)) elif type == 'element_share': result.append(ButtonShare()) else: raise ValueError('Invalid button type') else: raise ValueError('Invalid buttons variables') return result else: return items
def convert_shortcut_buttons(items)
support shortcut buttons [{'type':'web_url', 'title':'open web url', 'value':'https://~~'}]
1.983414
1.881614
1.054102
scope = scope.lower() if scope == 'after_send': self._after_send = callback return if scope not in Page.WEBHOOK_ENDPOINTS: raise ValueError("The 'scope' argument must be one of {}.".format(Page.WEBHOOK_ENDPOINTS)) self._webhook_handlers[scope] = callback
def set_webhook_handler(self, scope, callback)
Allows adding a webhook_handler as an alternative to the decorators
4.052561
3.740683
1.083375
if items is not None and isinstance(items, list): result = [] for item in items: if isinstance(item, QuickReply): result.append(item) elif isinstance(item, dict): result.append(QuickReply(title=item.get('title'), payload=item.get('payload'))) else: raise ValueError('Invalid quick_replies variables') return result else: return items
def convert_shortcut_quick_reply(items)
support shortcut [{'title':'title', 'payload':'payload'}]
2.262841
2.11098
1.071939
page.send(recipient, Template.Buttons("hello", [ Template.ButtonWeb("Open Web URL", "https://www.oculus.com/en-us/rift/"), Template.ButtonPostBack("trigger Postback", "DEVELOPED_DEFINED_PAYLOAD"), Template.ButtonPhoneNumber("Call Phone Number", "+16505551234") ]))
def send_button(recipient)
Shortcuts are supported page.send(recipient, Template.Buttons("hello", [ {'type': 'web_url', 'title': 'Open Web URL', 'value': 'https://www.oculus.com/en-us/rift/'}, {'type': 'postback', 'title': 'tigger Postback', 'value': 'DEVELOPED_DEFINED_PAYLOAD'}, {'type': 'phone_number', 'title': 'Call Phone Number', 'value': '+16505551234'}, ]))
4.968211
1.76725
2.811266
page.send(recipient, "What's your favorite movie genre?", quick_replies=[QuickReply(title="Action", payload="PICK_ACTION"), QuickReply(title="Comedy", payload="PICK_COMEDY")], metadata="DEVELOPER_DEFINED_METADATA")
def send_quick_reply(recipient)
shortcuts are supported page.send(recipient, "What's your favorite movie genre?", quick_replies=[{'title': 'Action', 'payload': 'PICK_ACTION'}, {'title': 'Comedy', 'payload': 'PICK_COMEDY'}, ], metadata="DEVELOPER_DEFINED_METADATA")
3.311775
1.589471
2.083571
with rasterio.open(input) as src: return [[window, ij] for ij, window in src.block_windows()]
def getWindows(input)
Get a source's windows
5.970634
5.543605
1.077031
shapes = np.array([a.shape for a in arrays]) if not np.all(np.roll(shapes[:, 1:], 1, axis=0) == shapes[:, 1:]): raise ValueError( "All input arrays must have the same height and width for this mode" ) width = arrays[0].shape[-1] height = arrays[0].shape[-2] return np.array([a for subarray in arrays for a in subarray]).reshape( shapes[:, 0].sum(), height, width )
def array_stack(arrays)
Stack arrays
3.170581
3.214628
0.986298
output = (data[0] > numpy.mean(data[0])).astype(data[0].dtype) * data[0].max() return output
def read_function(data, window, ij, g_args)
Takes an array, and sets any value above the mean to the max, the rest to 0
6.378129
4.499015
1.417672
@wraps(func) def wrapper(*args, **kwds): try: return func(*args, **kwds) except Exception: raise MuchoChildError() return wrapper
def tb_capture(func)
A decorator which captures worker tracebacks. Tracebacks in particular, are captured. Inspired by an example in https://bugs.python.org/issue13831. This decorator wraps rio-mucho worker tasks. Parameters ---------- func : function A function to be decorated. Returns ------- func
6.054983
5.954446
1.016884
global global_args global srcs global_args = g_args srcs = [rasterio.open(i) for i in inpaths]
def init_worker(inpaths, g_args)
The multiprocessing worker initializer Parameters ---------- inpaths : list of str A list of dataset paths. g_args : dict Global arguments. Returns ------- None
5.69706
7.052148
0.807847
if processes == 1: self.pool = MockTub(init_worker, (self.inpaths, self.global_args)) else: self.pool = Pool(processes, init_worker, (self.inpaths, self.global_args)) self.options["transform"] = guard_transform(self.options["transform"]) if self.mode == "manual_read": reader_worker = manual_reader(self.run_function) elif self.mode == "array_read": reader_worker = array_reader(self.run_function) else: reader_worker = simple_reader(self.run_function) if isinstance(self.outpath_or_dataset, rasterio.io.DatasetWriter): destination = self.outpath_or_dataset else: destination = rasterio.open(self.outpath_or_dataset, "w", **self.options) # Open an output file, work through the function in parallel, # and write out the data. with destination as dst: for data, window in self.pool.imap_unordered(reader_worker, self.windows): dst.write(data, window=window) self.pool.close() self.pool.join()
def run(self, processes=4)
TODO
3.810249
3.733601
1.020529
'''serialize only with letters, numbers and _''' if isinstance(l, str): return l elif isinstance(l, list): return '%s_%s_' % (l[0], ''.join(map(safe_serialize_type, l[1:]))) else: return str(l)
def safe_serialize_type(l)
serialize only with letters, numbers and _
4.278357
3.077186
1.390347
'''s[<int>] where s is a Tuple[T1..]''' return n.args and n.args[-1].type == 'index' and general_type(n.args[-1].sequence.pseudo_type) == 'Tuple' and\ n.args[-1].index.type == 'int' and\ (n.args[-1].index.value == -1 or n.args[-1].index.value == len(n.args[-1].sequence.pseudo_type) - 2)
def tuple_index(self, n)
s[<int>] where s is a Tuple[T1..]
5.487934
3.842568
1.428194
''' generates code based on templates and gen functions defined in the <x> lang generator ''' for middleware in DEFAULT_MIDDLEWARES + self.middlewares: tree = middleware.process(tree) # changed in place!! original = self._generate_node(tree) # first n lines n dependencies # after that additional code if self.a and tree.type == 'module': p = original.split('\n') r = '\n'.join(p[:len(tree.dependencies)] + (['\n'] if tree.dependencies else []) + self.a + ['\n'] + p[len(tree.dependencies):]) + '\n' else: r = original r = re.sub(CLOSING_CURLY_ENDLINES, r'}\n\2}', r) r = re.sub(JS_BRACKET, r'}\1', r) return re.sub(TOO_MANY_ENDLINES, r'\n\n', r)
def generate(self, tree)
generates code based on templates and gen functions defined in the <x> lang generator
8.892097
6.288652
1.413991
'''A shortcut for a method call, expands a str receiver to a identifier''' if not isinstance(receiver, Node): receiver = local(receiver) return Node('method_call', receiver=receiver, message=message, args=args, pseudo_type=pseudo_type)
def method_call(receiver, message, args, pseudo_type=None)
A shortcut for a method call, expands a str receiver to a identifier
6.980233
3.027228
2.305817
'''A shortcut for a call with an identifier callee''' if not isinstance(function, Node): function = local(function) return Node('call', function=function, args=args, pseudo_type=pseudo_type)
def call(function, args, pseudo_type=None)
A shortcut for a call with an identifier callee
6.97404
3.775275
1.847293
'''Expand to a literal node if a basic type otherwise just returns the node''' if isinstance(value, Node): return value elif isinstance(value, str): return Node('string', value=value, pseudo_type='String') elif isinstance(value, int): return Node('int', value=value, pseudo_type='Int') elif isinstance(value, bool): return Node('boolean', value=str(value).lower(), pseudo_type='Boolean') elif isinstance(value, float): return Node('float', value=value, pseudo_type='Float') elif value is None: return Node('null', pseudo_type='Void') else: 1/0
def to_node(value)
Expand to a literal node if a basic type otherwise just returns the node
2.856246
2.069997
1.379831
''' create a function that transforms a method to a binary op often we need to convert a pseudo method <receiver>.<message>(<z>) to a binary op <receiver> <op> <message> that's a decorator that helps for that ''' def transformer(receiver, param, pseudo_type): if not reversed: return Node('binary_op', op=op, left=receiver, right=param, pseudo_type=pseudo_type) return Node('binary_op', op=op, left=param, right=receiver, pseudo_type=pseudo_type) return transformer
def to_op(op, reversed=False)
create a function that transforms a method to a binary op often we need to convert a pseudo method <receiver>.<message>(<z>) to a binary op <receiver> <op> <message> that's a decorator that helps for that
7.400874
2.450622
3.019998
''' an expression leaking ... assignment nodes into the nearest block list of nodes c++ guys, stay calm ''' # input(node.y) args = [node.receiver] + node.args if node.type == 'standard_method_call' else node.args z = z(module, name, args) if context == 'expression': if isinstance(z, NormalLeakingNode): leaked_nodes, exp = z.as_expression() else: leaked_nodes, exp = z.as_expression() zz = local(z.temp_name(getattr(z, 'default', '')), node.pseudo_type) leaked_nodes = z.as_assignment(zz) exp = local(zz, node.pseudo_type) if exp is None or exp.pseudo_type == 'Void': raise PseudoTypeError("pseudo can't handle values with void type in expression: %s?%s" % (module, name)) self.leaked_nodes += leaked_nodes return exp elif context == 'assignment': if isinstance(z, NormalLeakingNode): leaked_nodes, exp = z.as_expression() if exp is None or exp.pseudo_type == 'Void': raise PseudoTypeError("pseudo can't handle values with void type in expression: %s?%s" % (module, name)) self.leaked_nodes += leaked_nodes return assignment(data[0], exp) else: self.leaked_nodes += z.as_assignment(data[0]) return None elif context == 'block': leaked_nodes, exp = z.as_expression() self.leaked_nodes += leaked_nodes return exp
def leaking(self, z, module, name, node, context, *data)
an expression leaking ... assignment nodes into the nearest block list of nodes c++ guys, stay calm
4.87975
3.640797
1.340297
''' the heart of api translation dsl function or <z>(<arg>, ..) can be expanded, <z> can be just a name for a global function, or #name for method, <arg> can be %{self} for self or %{n} for nth arg ''' if callable(api): if receiver: return api(receiver, *(args + [pseudo_type])) else: return api(*(args + [pseudo_type])) elif isinstance(api, str): if '(' in api: call_api, arg_code = api[:-1].split('(') new_args = [self._parse_part( a.strip(), receiver, args, equivalent) for a in arg_code.split(',')] else: call_api, arg_code = api, '' new_args = args if '#' in call_api: a, b = call_api.split('#') method_receiver = self._parse_part( a, receiver, args, equivalent) if a else receiver return method_call(method_receiver, b, new_args, pseudo_type=pseudo_type) elif '.' in call_api: a, b = call_api.split('.') static_receiver = self._parse_part( a, receiver, args, equivalent) if a else receiver if b[-1] != '!': return Node('static_call', receiver=static_receiver, message=b, args=new_args, pseudo_type=pseudo_type) else: return Node('attr', object=static_receiver, attr=b[:-1], pseudo_type=pseudo_type) else: if receiver: return call(call_api, [receiver] + new_args, pseudo_type=pseudo_type) else: return call(call_api, new_args, pseudo_type=pseudo_type) else: raise PseudoDSLError('%s not supported by api dsl' % str(api))
def _expand_api(self, api, receiver, args, pseudo_type, equivalent)
the heart of api translation dsl function or <z>(<arg>, ..) can be expanded, <z> can be just a name for a global function, or #name for method, <arg> can be %{self} for self or %{n} for nth arg
3.762345
2.363836
1.591627
''' generate output code for main in `language` `main` is a dict/Node or a list of dicts/Nodes with pseudo ast e.g. > print(generate_main({'type': 'int', 'value': 0, 'pseudo_type': 'Int'}, 'rb')) 2 > print(generate_main([pseudo.pseudo_tree.to_node('a'), pseudo.pseudo_tree.to_node(0)], 'js')) 'a'; 0; ''' base = {'type': 'module', 'custom_exceptions': [], 'definitions': [], 'constants': [], 'main': [], 'pseudo_type': 'Void'} base_node = pseudo.loader.convert_to_syntax_tree(base) if isinstance(main, dict): base['main'] = [main] elif isinstance(main, list): if main and isinstance(main[0], dict): base['main'] = main else: base_node.main = main elif isinstance(main, pseudo.pseudo_tree.Node): base_node.main = [main] if base['main']: q = pseudo.loader.convert_to_syntax_tree(base) else: q = base_node return generate(q, language)
def generate_main(main, language)
generate output code for main in `language` `main` is a dict/Node or a list of dicts/Nodes with pseudo ast e.g. > print(generate_main({'type': 'int', 'value': 0, 'pseudo_type': 'Int'}, 'rb')) 2 > print(generate_main([pseudo.pseudo_tree.to_node('a'), pseudo.pseudo_tree.to_node(0)], 'js')) 'a'; 0;
4.334651
2.118288
2.046299
''' generate output code in `language` converts yaml input to a Node-based pseudo internal tree and passes it to `generate ''' return pseudo.generate(pseudo.loader.as_tree(pseudo_ast), language)
def generate_from_yaml(pseudo_ast, language)
generate output code in `language` converts yaml input to a Node-based pseudo internal tree and passes it to `generate
27.902048
5.734298
4.865817
''' generate output code in `language` `pseudo_ast` can be a plain `dict` with ast data or it can use the internal `pseudo` `Node(type, **fields)` format if you want to play with it, you can use `generate_main` which expects just a dict node / a list of dict nodes and a language `language` can be 'py', 'python', 'rb', 'ruby', 'javascript', 'js', 'cs', 'csharp', 'go' or 'cpp' ''' if isinstance(pseudo_ast, dict): pseudo_ast = pseudo.loader.convert_to_syntax_tree(pseudo_ast) translated_ast = API_TRANSLATORS[language](pseudo_ast).api_translate() return GENERATORS[language]().generate(translated_ast)
def generate(pseudo_ast, language)
generate output code in `language` `pseudo_ast` can be a plain `dict` with ast data or it can use the internal `pseudo` `Node(type, **fields)` format if you want to play with it, you can use `generate_main` which expects just a dict node / a list of dict nodes and a language `language` can be 'py', 'python', 'rb', 'ruby', 'javascript', 'js', 'cs', 'csharp', 'go' or 'cpp'
9.130462
2.148498
4.249696
@functools.wraps(func) def wrapper(*args, **kwargs): return normalize('NFC', func(*args, **kwargs)) return wrapper
def sanitize(func)
NFC is the normalization form recommended by W3C.
3.233988
2.582495
1.252272
if not raw_data: return six.text_type() if isinstance(raw_data, six.text_type): return raw_data.strip() if six.PY2: try: return six.text_type(raw_data, encoding, errors).strip() except LookupError: return six.text_type(raw_data, "utf-8", errors).strip() if six.PY3: try: return six.text_type(raw_data, encoding).strip() except (LookupError, UnicodeDecodeError): return six.text_type(raw_data, "utf-8", errors).strip()
def ported_string(raw_data, encoding='utf-8', errors='ignore')
Give as input raw data and output a str in Python 3 and unicode in Python 2. Args: raw_data: Python 2 str, Python 3 bytes or str to porting encoding: string giving the name of an encoding errors: his specifies the treatment of characters which are invalid in the input encoding Returns: str (Python 3) or unicode (Python 2)
1.868749
1.934311
0.966106
if not header: return six.text_type() output = six.text_type() try: for d, c in decode_header(header): c = c if c else 'utf-8' output += ported_string(d, c, 'ignore') # Header parsing failed, when header has charset Shift_JIS except (HeaderParseError, UnicodeError): log.error("Failed decoding header part: {}".format(header)) output += header return output
def decode_header_part(header)
Given an raw header returns an decoded header Args: header (string): header to decode Returns: str (Python 3) or unicode (Python 2)
6.8429
7.729196
0.885331
Hashes = namedtuple('Hashes', "md5 sha1 sha256 sha512") if six.PY2: if not isinstance(data, str): data = data.encode("utf-8") elif six.PY3: if not isinstance(data, bytes): data = data.encode("utf-8") # md5 md5 = hashlib.md5() md5.update(data) md5 = md5.hexdigest() # sha1 sha1 = hashlib.sha1() sha1.update(data) sha1 = sha1.hexdigest() # sha256 sha256 = hashlib.sha256() sha256.update(data) sha256 = sha256.hexdigest() # sha512 sha512 = hashlib.sha512() sha512.update(data) sha512 = sha512.hexdigest() return Hashes(md5, sha1, sha256, sha512)
def fingerprints(data)
This function return the fingerprints of data. Args: data (string): raw data Returns: namedtuple: fingerprints md5, sha1, sha256, sha512
1.484235
1.483357
1.000592
log.debug("Started converting Outlook email") temph, temp = tempfile.mkstemp(prefix="outlook_") command = ["msgconvert", "--outfile", temp, email] try: if six.PY2: with open(os.devnull, "w") as devnull: out = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=devnull) elif six.PY3: out = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) except OSError: message = "To use this function you must install 'msgconvert' tool" log.exception(message) raise MailParserOSError(message) else: stdoutdata, _ = out.communicate() return temp, stdoutdata.decode("utf-8").strip() finally: os.close(temph)
def msgconvert(email)
Exec msgconvert tool, to convert msg Outlook mail in eml mail format Args: email (string): file path of Outlook msg mail Returns: tuple with file path of mail converted and standard output data (unicode Python 2, str Python 3)
3.260017
2.988379
1.090898
values_by_clause = {} for pattern in RECEIVED_COMPILED_LIST: matches = [match for match in pattern.finditer(received)] if len(matches) == 0: # no matches for this clause, but it's ok! keep going! log.debug("No matches found for %s in %s" % ( pattern.pattern, received)) continue elif len(matches) > 1: # uh, can't have more than one of each clause in a received. # so either there's more than one or the current regex is wrong msg = "More than one match found for %s in %s" % ( pattern.pattern, received) log.error(msg) raise MailParserReceivedParsingError(msg) else: # otherwise we have one matching clause! log.debug("Found one match for %s in %s" % ( pattern.pattern, received)) match = matches[0].groupdict() if six.PY2: values_by_clause[match.keys()[0]] = match.values()[0] elif six.PY3: key = list(match.keys())[0] value = list(match.values())[0] values_by_clause[key] = value if len(values_by_clause) == 0: # we weren't able to match anything... msg = "Unable to match any clauses in %s" % (received) log.error(msg) raise MailParserReceivedParsingError(msg) return values_by_clause
def parse_received(received)
Parse a single received header. Return a dictionary of values by clause. Arguments: received {str} -- single received header Raises: MailParserReceivedParsingError -- Raised when a received header cannot be parsed Returns: dict -- values by clause
3.077296
2.771774
1.110226
parsed = [] receiveds = [re.sub(JUNK_PATTERN, " ", i).strip() for i in receiveds] n = len(receiveds) log.debug("Nr. of receiveds. {}".format(n)) for idx, received in enumerate(receiveds): log.debug("Parsing received {}/{}".format(idx + 1, n)) log.debug("Try to parse {!r}".format(received)) try: # try to parse the current received header... values_by_clause = parse_received(received) except MailParserReceivedParsingError: # if we can't, let's append the raw parsed.append({'raw': received}) else: # otherwise append the full values_by_clause dict parsed.append(values_by_clause) log.debug("len(receiveds) %s, len(parsed) %s" % ( len(receiveds), len(parsed))) if len(receiveds) != len(parsed): # something really bad happened, # so just return raw receiveds with hop indices log.error("len(receiveds): %s, len(parsed): %s, receiveds: %s, \ parsed: %s" % (len(receiveds), len(parsed), receiveds, parsed)) return receiveds_not_parsed(receiveds) else: # all's good! we have parsed or raw receiveds for each received header return receiveds_format(parsed)
def receiveds_parsing(receiveds)
This function parses the receiveds headers. Args: receiveds (list): list of raw receiveds headers Returns: a list of parsed receiveds headers with first hop in first position
4.360999
4.264802
1.022556
log.debug("Receiveds for this email are not parsed") output = [] counter = Counter() for i in receiveds[::-1]: j = {"raw": i.strip()} j["hop"] = counter["hop"] + 1 counter["hop"] += 1 output.append(j) else: return output
def receiveds_not_parsed(receiveds)
If receiveds are not parsed, makes a new structure with raw field. It's useful to have the same structure of receiveds parsed. Args: receiveds (list): list of raw receiveds headers Returns: a list of not parsed receiveds headers with first hop in first position
6.454787
5.835791
1.106069
log.debug("Receiveds for this email are parsed") output = [] counter = Counter() for i in receiveds[::-1]: # Clean strings j = {k: v.strip() for k, v in i.items() if v} # Add hop j["hop"] = counter["hop"] + 1 # Add UTC date if i.get("date"): # Modify date to manage strange header like: # "for <[email protected]>; Tue, 7 Mar 2017 14:29:24 -0800", i["date"] = i["date"].split(";")[-1] try: j["date_utc"], _ = convert_mail_date(i["date"]) except TypeError: j["date_utc"] = None # Add delay size = len(output) now = j.get("date_utc") if size and now: before = output[counter["hop"] - 1].get("date_utc") if before: j["delay"] = (now - before).total_seconds() else: j["delay"] = 0 else: j["delay"] = 0 # append result output.append(j) # new hop counter["hop"] += 1 else: for i in output: if i.get("date_utc"): i["date_utc"] = i["date_utc"].isoformat() else: return output
def receiveds_format(receiveds)
Given a list of receiveds hop, adds metadata and reformat field values Args: receiveds (list): list of receiveds hops already formatted Returns: list of receiveds reformated and with new fields
4.71106
4.799098
0.981655
header = message.get(name) log.debug("Getting header {!r}: {!r}".format(name, header)) if header: return decode_header_part(header) return six.text_type()
def get_header(message, name)
Gets an email.message.Message and a header name and returns the mail header decoded with the correct charset. Args: message (email.message.Message): email message object name (string): header to get Returns: decoded header
4.946934
5.779155
0.855996
if complete: log.debug("Get all headers") all_headers_keys = {i.lower() for i in message.keys()} all_parts = ADDRESSES_HEADERS | OTHERS_PARTS | all_headers_keys else: log.debug("Get only mains headers") all_parts = ADDRESSES_HEADERS | OTHERS_PARTS log.debug("All parts to get: {}".format(", ".join(all_parts))) return all_parts
def get_mail_keys(message, complete=True)
Given an email.message.Message, return a set with all email parts to get Args: message (email.message.Message): email message object complete (bool): if True returns all email headers Returns: set with all email parts
3.980868
3.77286
1.055133
os.makedirs(path) sample = os.path.join(path, filename) if binary: with open(sample, "wb") as f: f.write(base64.b64decode(payload)) else: with open(sample, "w") as f: f.write(payload)
def write_sample(binary, payload, path, filename): # pragma: no cover if not os.path.exists(path)
This function writes a sample on file system. Args: binary (bool): True if it's a binary file payload: payload of sample, in base64 if it's a binary path (string): path of file filename (string): name of file hash_ (string): file hash
1.960288
2.092752
0.936703
log.debug("Parsing email from file object") try: fp.seek(0) except IOError: # When stdout is a TTY it's a character device # and it's not seekable, you cannot seek in a TTY. pass finally: s = fp.read() return cls.from_string(s)
def from_file_obj(cls, fp)
Init a new object from a file-like object. Not for Outlook msg. Args: fp (file-like object): file-like object of raw email Returns: Instance of MailParser
6.756511
6.793418
0.994567
log.debug("Parsing email from file {!r}".format(fp)) with ported_open(fp) as f: message = email.message_from_file(f) if is_outlook: log.debug("Removing temp converted Outlook email {!r}".format(fp)) os.remove(fp) return cls(message)
def from_file(cls, fp, is_outlook=False)
Init a new object from a file path. Args: fp (string): file path of raw email is_outlook (boolean): if True is an Outlook email Returns: Instance of MailParser
4.549876
5.086548
0.894492
log.debug("Parsing email from file Outlook") f, _ = msgconvert(fp) return cls.from_file(f, True)
def from_file_msg(cls, fp)
Init a new object from a Outlook message file, mime type: application/vnd.ms-outlook Args: fp (string): file path of raw Outlook email Returns: Instance of MailParser
19.2679
17.894175
1.076769
log.debug("Parsing email from string") message = email.message_from_string(s) return cls(message)
def from_string(cls, s)
Init a new object from a string. Args: s (string): raw email Returns: Instance of MailParser
6.542132
4.811327
1.359736
log.debug("Parsing email from bytes") if six.PY2: raise MailParserEnvironmentError( "Parsing from bytes is valid only for Python 3.x version") message = email.message_from_bytes(bt) return cls(message)
def from_bytes(cls, bt)
Init a new object from bytes. Args: bt (bytes-like object): raw email as bytes-like object Returns: Instance of MailParser
7.371872
6.226515
1.183948
log.debug("Reset all variables") self._attachments = [] self._text_plain = [] self._text_html = [] self._defects = [] self._defects_categories = set() self._has_defects = False
def _reset(self)
Reset the state of mail object.
6.116976
4.996594
1.224229
part_defects = {} for e in part.defects: defects = "{}: {}".format(e.__class__.__name__, e.__doc__) self._defects_categories.add(e.__class__.__name__) part_defects.setdefault(part_content_type, []).append(defects) log.debug("Added defect {!r}".format(defects)) # Tag mail with defect if part_defects: self._has_defects = True # Save all defects self._defects.append(part_defects)
def _append_defects(self, part, part_content_type)
Add new defects and defects categories to object attributes. The defects are a list of all the problems found when parsing this message. Args: part (string): mail part part_content_type (string): content type of part
3.899204
3.989993
0.977246
mail = {} keys = get_mail_keys(self.message, complete) for i in keys: log.debug("Getting header or part {!r}".format(i)) value = getattr(self, i) if value: mail[i] = value # add defects mail["has_defects"] = self.has_defects if self.has_defects: mail["defects"] = self.defects mail["defects_categories"] = list(self.defects_categories) return mail
def _make_mail(self, complete=True)
This method assigns the right values to all tokens of email. Returns a parsed object Keyword Arguments: complete {bool} -- If True returns all mails parts (default: {True}) Returns: dict -- Parsed email object
3.657929
3.92539
0.931864
if not self.message: return self # reset and start parsing self._reset() parts = [] # Normal parts plus defects # walk all mail parts to search defects for p in self.message.walk(): part_content_type = p.get_content_type() self._append_defects(p, part_content_type) parts.append(p) # If defects are in epilogue defects get epilogue if self.defects_categories & EPILOGUE_DEFECTS: log.debug("Found defects in emails") epilogue = find_between( self.message.epilogue, "{}".format("--" + self.message.get_boundary()), "{}".format("--" + self.message.get_boundary() + "--")) try: p = email.message_from_string(epilogue) parts.append(p) except TypeError: log.debug("Failed to get epilogue part for TypeError") except Exception: log.error("Failed to get epilogue part. Check raw mail.") # walk all mail parts for i, p in enumerate(parts): if not p.is_multipart(): filename = decode_header_part(p.get_filename()) charset = p.get_content_charset('utf-8') charset_raw = p.get_content_charset() log.debug("Charset {!r} part {!r}".format(charset, i)) if filename: log.debug("Email part {!r} is an attachment".format(i)) log.debug("Filename {!r} part {!r}".format(filename, i)) binary = False mail_content_type = ported_string(p.get_content_type()) log.debug("Mail content type {!r} part {!r}".format( mail_content_type, i)) transfer_encoding = ported_string( p.get('content-transfer-encoding', '')).lower() log.debug("Transfer encoding {!r} part {!r}".format( transfer_encoding, i)) content_id = ported_string(p.get('content-id')) log.debug("content-id {!r} part {!r}".format( content_id, i)) if transfer_encoding == "base64" or ( transfer_encoding == "quoted-\ printable" and "application" in mail_content_type): payload = p.get_payload(decode=False) binary = True log.debug("Filename {!r} part {!r} is binary".format( filename, i)) else: payload = ported_string( p.get_payload(decode=True), encoding=charset) log.debug( "Filename {!r} part {!r} is not binary".format( filename, i)) self._attachments.append({ "filename": filename, "payload": payload, "binary": binary, "mail_content_type": mail_content_type, "content-id": content_id, "charset": charset_raw, "content_transfer_encoding": transfer_encoding}) else: log.debug("Email part {!r} is not an attachment".format(i)) payload = ported_string( p.get_payload(decode=True), encoding=charset) if payload: if p.get_content_subtype() == 'html': self._text_html.append(payload) else: self._text_plain.append(payload) else: # Parsed object mail with all parts self._mail = self._make_mail() # Parsed object mail with mains parts self._mail_partial = self._make_mail(complete=False)
def parse(self)
This method parses the raw email and makes the tokens. Returns: Instance of MailParser with raw email parsed
3.072678
2.990541
1.027466
log.debug("Trust string is {!r}".format(trust)) if not trust.strip(): return received = self.message.get_all("received", []) for i in received: i = ported_string(i) if trust in i: log.debug("Trust string {!r} is in {!r}".format(trust, i)) check = REGXIP.findall(i[0:i.find("by")]) if check: try: ip_str = six.text_type(check[-1]) log.debug("Found sender IP {!r} in {!r}".format( ip_str, i)) ip = ipaddress.ip_address(ip_str) except ValueError: return else: if not ip.is_private: log.debug("IP {!r} not private".format(ip_str)) return ip_str
def get_server_ipaddress(self, trust)
Return the ip address of sender Overview: Extract a reliable sender IP address heuristically for each message. Although the message format dictates a chain of relaying IP addresses in each message, a malicious relay can easily alter that. Therefore we cannot simply take the first IP in the chain. Instead, our method is as follows. First we trust the sender IP reported by our mail server in the Received headers, and if the previous relay IP address is on our trust list (e.g. other well-known mail services), we continue to follow the previous Received line, till we reach the first unrecognized IP address in the email header. From article Characterizing Botnets from Email Spam Records: Li Zhuang, J. D. Tygar In our case we trust only our mail server with the trust string. Args: trust (string): String that identify our mail server Returns: string with the ip address
4.25929
3.755761
1.134068
output = [] for i in self.message.get_all("received", []): output.append(decode_header_part(i)) return output
def received_raw(self)
Return a list of all received headers in raw format
7.611846
5.414951
1.405709
d = {} for k, v in self.message.items(): d[k] = decode_header_part(v) return d
def headers(self)
Return only the headers as Python object
5.967959
5.011438
1.190867
date = self.message.get('date') conv = None try: conv, _ = convert_mail_date(date) finally: return conv
def date(self)
Return the mail date in datetime.datetime format and UTC.
9.24051
5.929262
1.558459
date = self.message.get('date') timezone = 0 try: _, timezone = convert_mail_date(date) finally: return timezone
def timezone(self)
Return timezone. Offset from UTC.
10.428252
8.26393
1.2619
if self.date: return json.dumps(self.date.isoformat(), ensure_ascii=False)
def date_json(self)
Return the JSON of date
4.09644
3.406396
1.202573
if self.mail.get("date"): self._mail["date"] = self.date.isoformat() return json.dumps(self.mail, ensure_ascii=False, indent=2)
def mail_json(self)
Return the JSON of mail parsed
4.762873
4.199898
1.134045
if self.mail_partial.get("date"): self._mail_partial["date"] = self.date.isoformat() return json.dumps(self.mail_partial, ensure_ascii=False, indent=2)
def mail_partial_json(self)
Return the JSON of mail parsed partial
4.495371
3.912786
1.148893
failed = False size_failed = False try: scraper.download_image(img_url) except ImageDownloadError: failed = True except ImageSizeError: size_failed = True status_lock.acquire(True) if failed: status_flags['failed'] += 1 elif size_failed: status_flags['under_min_or_over_max_filesize'] += 1 status_flags['percent'] = status_flags[ 'percent'] + old_div(100.0, scraper.no_to_download) pbar.update(status_flags['percent'] % 100) status_lock.release() return True
def download_worker_fn(scraper, img_url, pbar, status_flags, status_lock)
Stnadalone function that downloads images.
3.197834
3.195086
1.00086
parser = argparse.ArgumentParser( description='Downloads images from given URL') parser.add_argument('url2scrape', nargs=1, help="URL to scrape") parser.add_argument('-m', '--max-images', type=int, default=None, help="Limit on number of images\n") parser.add_argument('-s', '--save-dir', type=str, default="images", help="Directory in which images should be saved") parser.add_argument('-g', '--injected', help="Scrape injected images", action="store_true") parser.add_argument('--proxy-server', type=str, default=None, help="Proxy server to use") parser.add_argument('--min-filesize', type=int, default=0, help="Limit on size of image in bytes") parser.add_argument('--max-filesize', type=int, default=100000000, help="Limit on size of image in bytes") parser.add_argument('--dump-urls', default=False, help="Print the URLs of the images", action="store_true") parser.add_argument('--formats', nargs="*", default=None, help="Specify formats in a list without any separator.\ This argument must be after the URL.") parser.add_argument('--scrape-reverse', default=False, help="Scrape the images in reverse order", action="store_true") parser.add_argument('--filename-pattern', type=str, default=None, help="Only scrape images with filenames that\ match the given regex pattern") parser.add_argument('--nthreads', type=int, default=10, help="The number of threads to use when downloading images.") args = parser.parse_args() self.url = args.url2scrape[0] if not re.match(r'^[a-zA-Z]+://', self.url): self.url = 'http://' + self.url self.no_to_download = args.max_images save_dir = args.save_dir + '_{uri.netloc}'.format( uri=urlparse(self.url)) if args.save_dir != "images": save_dir = args.save_dir self.download_path = os.path.join(os.getcwd(), save_dir) self.use_ghost = args.injected self.format_list = args.formats if args.formats else [ "jpg", "png", "gif", "svg", "jpeg"] self.min_filesize = args.min_filesize self.max_filesize = args.max_filesize self.dump_urls = args.dump_urls self.proxy_url = args.proxy_server self.proxies = {} if self.proxy_url: if not re.match(r'^[a-zA-Z]+://', self.proxy_url): self.proxy_url = 'http://' + self.proxy_url proxy_start_length = self.proxy_url.find("://") + 3 self.proxies = { self.proxy_url[:(proxy_start_length - 3)]: self.proxy_url } self.scrape_reverse = args.scrape_reverse self.filename_pattern = args.filename_pattern self.nthreads = args.nthreads return (self.url, self.no_to_download, self.format_list, self.download_path, self.min_filesize, self.max_filesize, self.dump_urls, self.scrape_reverse, self.use_ghost, self.filename_pattern)
def get_arguments(self)
Gets the arguments from the command line.
2.303256
2.282401
1.009137
if self.use_ghost: self.url = urljoin("http://", self.url) import selenium import selenium.webdriver driver = selenium.webdriver.PhantomJS( service_log_path=os.path.devnull) driver.get(self.url) page_html = driver.page_source page_url = driver.current_url driver.quit() else: if self.proxy_url: print("Using proxy: " + self.proxy_url + "\n") try: page = requests.get(self.url, proxies=self.proxies) if page.status_code != 200: raise PageLoadError(page.status_code) except requests.exceptions.MissingSchema: self.url = "http://" + self.url page = requests.get(self.url, proxies=self.proxies) if page.status_code != 200: raise PageLoadError(page.status_code) except requests.exceptions.ConnectionError: raise PageLoadError(None) try: page_html = page.text page_url = page.url except UnboundLocalError: raise PageLoadError(None) self.page_html = page_html self.page_url = page_url return (self.page_html, self.page_url)
def get_html(self)
Downloads HTML content of page given the page_url
1.997298
1.912947
1.044095
tree = html.fromstring(self.page_html) img = tree.xpath('//img/@src') links = tree.xpath('//a/@href') img_list = self.process_links(img) img_links = self.process_links(links) img_list.extend(img_links) if self.filename_pattern: # Compile pattern for efficiency pattern = re.compile(self.filename_pattern) # Verifies filename in the image URL matches pattern def matches_pattern(img_url): img_filename = urlparse(img_url).path.split('/')[-1] return pattern.search(img_filename) images = [urljoin(self.url, img_url) for img_url in img_list if matches_pattern(img_url)] else: images = [urljoin(self.url, img_url) for img_url in img_list] images = list(set(images)) self.images = images if self.scrape_reverse: self.images.reverse() return self.images
def get_img_list(self)
Gets list of images from the page_html.
2.969505
2.770309
1.071904
if os.path.exists(self.download_path): if not os.access(self.download_path, os.W_OK): raise DirectoryAccessError elif os.access(os.path.dirname(self.download_path), os.W_OK): os.makedirs(self.download_path) else: raise DirectoryCreateError return True
def process_download_path(self)
Processes the download path. It checks if the path exists and the scraper has write permissions.
2.302513
2.337601
0.98499
img_request = None try: img_request = requests.request( 'get', img_url, stream=True, proxies=self.proxies) if img_request.status_code != 200: raise ImageDownloadError(img_request.status_code) except: raise ImageDownloadError() if img_url[-3:] == "svg" or (int(img_request.headers['content-length']) > self.min_filesize and\ int(img_request.headers['content-length']) < self.max_filesize): img_content = img_request.content with open(os.path.join(self.download_path, img_url.split('/')[-1]), 'wb') as f: byte_image = bytes(img_content) f.write(byte_image) else: raise ImageSizeError(img_request.headers['content-length']) return True
def download_image(self, img_url)
Downloads a single image. Downloads img_url using self.page_url as base. Also, raises the appropriate exception if required.
2.430348
2.446354
0.993457
links_list = [] for link in links: if os.path.splitext(link)[1][1:].strip().lower() in self.format_list: links_list.append(link) return links_list
def process_links(self, links)
Function to process the list of links and filter required links.
2.868372
2.711062
1.058025
"Updates the progress bar to a new value." if value <= 0.1: value = 0 assert 0 <= value <= self.maxval self.currval = value if not self._need_update() or self.finished: return if not self.start_time: self.start_time = time.time() self.seconds_elapsed = time.time() - self.start_time self.prev_percentage = self.percentage() if value != self.maxval: self.fd.write(self._format_line() + '\r') else: self.finished = True self.fd.write(self._format_line() + '\n')
def update(self, value)
Updates the progress bar to a new value.
3.399058
3.268019
1.040097
self.update(self.maxval) if self.signal_set: signal.signal(signal.SIGWINCH, signal.SIG_DFL)
def finish(self)
Used to tell the progress is finished.
5.580545
4.236909
1.317126
setproctitle('image-scraper') scraper = ImageScraper() scraper.get_arguments() print("\nImageScraper\n============\nRequesting page....\n") try: scraper.get_html() except PageLoadError as err: if err.status_code is None: print("ImageScraper is unable to acces the internet.") else: print("Page failed to load. Status code: {0}".format(err.status_code)) sys.exit() scraper.get_img_list() if len(scraper.images) == 0: sys.exit("Sorry, no images found.") if scraper.no_to_download is None: scraper.no_to_download = len(scraper.images) print("Found {0} images: ".format(len(scraper.images))) try: scraper.process_download_path() except DirectoryAccessError: print("Sorry, the directory can't be accessed.") sys.exit() except DirectoryCreateError: print("Sorry, the directory can't be created.") sys.exit() if scraper.dump_urls: for img_url in scraper.images: print(img_url) status_flags = {'count': 0, 'percent': 0.0, 'failed': 0, 'under_min_or_over_max_filesize': 0} widgets = ['Progress: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ', ETA(), ' ', FileTransferSpeed()] pbar = ProgressBar(widgets=widgets, maxval=100).start() pool = ThreadPoolExecutor(max_workers=scraper.nthreads) status_lock = threading.Lock() for img_url in scraper.images: if status_flags['count'] == scraper.no_to_download: break pool.submit(download_worker_fn, scraper, img_url, pbar, status_flags, status_lock) status_flags['count'] += 1 pool.shutdown(wait=True) pbar.finish() print("\nDone!\nDownloaded {0} images\nFailed: {1}\n".format( status_flags['count']-status_flags['failed']-status_flags['under_min_or_over_max_filesize'], status_flags['failed'])) return
def console_main()
This function handles all the console action.
2.899472
2.874838
1.008569
msg = cls() for line in raw.splitlines(): m = cls.sse_line_pattern.match(line) if m is None: # Malformed line. Discard but warn. warnings.warn('Invalid SSE line: "%s"' % line, SyntaxWarning) continue name = m.group('name') if name == '': # line began with a ":", so is a comment. Ignore continue value = m.group('value') if name == 'data': # If we already have some data, then join to it with a newline. # Else this is it. if msg.data: msg.data = '%s\n%s' % (msg.data, value) else: msg.data = value elif name == 'event': msg.event = value elif name == 'id': msg.id = value elif name == 'retry': msg.retry = int(value) return msg
def parse(cls, raw)
Given a possibly-multiline string representing an SSE message, parse it and return a Event object.
3.268371
2.895698
1.128699
repo = Repo.init(".") if repo.is_dirty(untracked_files=True): raise RuntimeError(f"Repository is dirty, please commit/stash your changes.") branch_name = f"release-{version}" print(f"{Fore.CYAN}Create {branch_name} branch from upstream master") upstream = get_upstream(repo) upstream.fetch() release_branch = repo.create_head(branch_name, upstream.refs.master, force=True) release_branch.checkout() return repo
def create_branch(version)
Create a fresh branch from upstream/master
3.337045
3.159536
1.056182
for remote in repo.remotes: for url in remote.urls: if url.endswith("pytest-dev/pluggy.git"): return remote raise RuntimeError("could not find tox-dev/tox.git remote")
def get_upstream(repo: Repo) -> Remote
Find upstream repository for pluggy on the remotes
7.171114
5.506207
1.302369
create_branch(version) changelog(version, write_out=True) check_call(["git", "commit", "-a", "-m", f"Preparing release {version}"]) print() print(f"{Fore.GREEN}Please push your branch to your fork and open a PR.")
def pre_release(version)
Generates new docs, release announcements and creates a local tag.
6.603858
6.543868
1.009167
try: next(wrap_controller) # first yield except StopIteration: _raise_wrapfail(wrap_controller, "did not yield") call_outcome = _Result.from_call(func) try: wrap_controller.send(call_outcome) _raise_wrapfail(wrap_controller, "has second yield") except StopIteration: pass return call_outcome.get_result()
def _wrapped_call(wrap_controller, func)
Wrap calling to a function with a generator which needs to yield exactly once. The yield point will trigger calling the wrapped function and return its ``_Result`` to the yield point. The generator then needs to finish (raise StopIteration) in order for the wrapped call to complete.
4.539026
4.091223
1.109455
__tracebackhide__ = True results = [] excinfo = None try: # run impl and wrapper setup functions in a loop teardowns = [] try: for hook_impl in reversed(hook_impls): try: args = [caller_kwargs[argname] for argname in hook_impl.argnames] except KeyError: for argname in hook_impl.argnames: if argname not in caller_kwargs: raise HookCallError( "hook call must provide argument %r" % (argname,) ) if hook_impl.hookwrapper: try: gen = hook_impl.function(*args) next(gen) # first yield teardowns.append(gen) except StopIteration: _raise_wrapfail(gen, "did not yield") else: res = hook_impl.function(*args) if res is not None: results.append(res) if firstresult: # halt further impl calls break except BaseException: excinfo = sys.exc_info() finally: if firstresult: # first result hooks return a single value outcome = _Result(results[0] if results else None, excinfo) else: outcome = _Result(results, excinfo) # run all wrapper post-yield blocks for gen in reversed(teardowns): try: gen.send(outcome) _raise_wrapfail(gen, "has second yield") except StopIteration: pass return outcome.get_result()
def _multicall(hook_impls, caller_kwargs, firstresult=False)
Execute a call into multiple python functions/methods and return the result(s). ``caller_kwargs`` comes from _HookCaller.__call__().
3.692713
3.853063
0.958384
msg = "Use get_result() which forces correct exception handling" warnings.warn(DeprecationWarning(msg), stacklevel=2) return self._result
def result(self)
Get the result(s) for this hook call (DEPRECATED in favor of ``get_result()``).
12.670308
7.635401
1.659416
__tracebackhide__ = True if self._excinfo is None: return self._result else: ex = self._excinfo if _py3: raise ex[1].with_traceback(ex[2]) _reraise(*ex)
def get_result(self)
Get the result(s) for this hook call. If the hook was marked as a ``firstresult`` only a single value will be returned otherwise a list of results.
4.688197
5.0021
0.937246
plugin_name = name or self.get_canonical_name(plugin) if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers: if self._name2plugin.get(plugin_name, -1) is None: return # blocked plugin, return None to indicate no registration raise ValueError( "Plugin already registered: %s=%s\n%s" % (plugin_name, plugin, self._name2plugin) ) # XXX if an error happens we should make sure no state has been # changed at point of return self._name2plugin[plugin_name] = plugin # register matching hook implementations of the plugin self._plugin2hookcallers[plugin] = hookcallers = [] for name in dir(plugin): hookimpl_opts = self.parse_hookimpl_opts(plugin, name) if hookimpl_opts is not None: normalize_hookimpl_opts(hookimpl_opts) method = getattr(plugin, name) hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts) hook = getattr(self.hook, name, None) if hook is None: hook = _HookCaller(name, self._hookexec) setattr(self.hook, name, hook) elif hook.has_spec(): self._verify_hook(hook, hookimpl) hook._maybe_apply_history(hookimpl) hook._add_hookimpl(hookimpl) hookcallers.append(hook) return plugin_name
def register(self, plugin, name=None)
Register a plugin and return its canonical name or None if the name is blocked from registering. Raise a ValueError if the plugin is already registered.
4.23232
3.989143
1.06096
if name is None: assert plugin is not None, "one of name or plugin needs to be specified" name = self.get_name(plugin) if plugin is None: plugin = self.get_plugin(name) # if self._name2plugin[name] == None registration was blocked: ignore if self._name2plugin.get(name): del self._name2plugin[name] for hookcaller in self._plugin2hookcallers.pop(plugin, []): hookcaller._remove_plugin(plugin) return plugin
def unregister(self, plugin=None, name=None)
unregister a plugin object and all its contained hook implementations from internal data structures.
4.139214
3.830902
1.08048
self.unregister(name=name) self._name2plugin[name] = None
def set_blocked(self, name)
block registrations of the given name, unregister if already registered.
11.808104
8.341682
1.415554