Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
5,300
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
RewardRecipient.is_all_field_none
def is_all_field_none(self): """ :rtype: bool """ if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._status is not None: return False if self._sub_status is not None: return False if self._type_ is not None: return False if self._counterparty_alias is not None: return False if self._amount_reward is not None: return False return True
python
def is_all_field_none(self): """ :rtype: bool """ if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._status is not None: return False if self._sub_status is not None: return False if self._type_ is not None: return False if self._counterparty_alias is not None: return False if self._amount_reward is not None: return False return True
['def', 'is_all_field_none', '(', 'self', ')', ':', 'if', 'self', '.', '_id_', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_created', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_updated', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_status', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_sub_status', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_type_', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_counterparty_alias', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_amount_reward', 'is', 'not', 'None', ':', 'return', 'False', 'return', 'True']
:rtype: bool
[':', 'rtype', ':', 'bool']
train
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L13934-L13963
5,301
StanfordVL/robosuite
robosuite/environments/base.py
MujocoEnv.reset_from_xml_string
def reset_from_xml_string(self, xml_string): """Reloads the environment from an XML description of the environment.""" # if there is an active viewer window, destroy it self.close() # load model from xml self.mjpy_model = load_model_from_xml(xml_string) self.sim = MjSim(self.mjpy_model) self.initialize_time(self.control_freq) if self.has_renderer and self.viewer is None: self.viewer = MujocoPyRenderer(self.sim) self.viewer.viewer.vopt.geomgroup[0] = ( 1 if self.render_collision_mesh else 0 ) self.viewer.viewer.vopt.geomgroup[1] = 1 if self.render_visual_mesh else 0 # hiding the overlay speeds up rendering significantly self.viewer.viewer._hide_overlay = True elif self.has_offscreen_renderer: render_context = MjRenderContextOffscreen(self.sim) render_context.vopt.geomgroup[0] = 1 if self.render_collision_mesh else 0 render_context.vopt.geomgroup[1] = 1 if self.render_visual_mesh else 0 self.sim.add_render_context(render_context) self.sim_state_initial = self.sim.get_state() self._get_reference() self.cur_time = 0 self.timestep = 0 self.done = False # necessary to refresh MjData self.sim.forward()
python
def reset_from_xml_string(self, xml_string): """Reloads the environment from an XML description of the environment.""" # if there is an active viewer window, destroy it self.close() # load model from xml self.mjpy_model = load_model_from_xml(xml_string) self.sim = MjSim(self.mjpy_model) self.initialize_time(self.control_freq) if self.has_renderer and self.viewer is None: self.viewer = MujocoPyRenderer(self.sim) self.viewer.viewer.vopt.geomgroup[0] = ( 1 if self.render_collision_mesh else 0 ) self.viewer.viewer.vopt.geomgroup[1] = 1 if self.render_visual_mesh else 0 # hiding the overlay speeds up rendering significantly self.viewer.viewer._hide_overlay = True elif self.has_offscreen_renderer: render_context = MjRenderContextOffscreen(self.sim) render_context.vopt.geomgroup[0] = 1 if self.render_collision_mesh else 0 render_context.vopt.geomgroup[1] = 1 if self.render_visual_mesh else 0 self.sim.add_render_context(render_context) self.sim_state_initial = self.sim.get_state() self._get_reference() self.cur_time = 0 self.timestep = 0 self.done = False # necessary to refresh MjData self.sim.forward()
['def', 'reset_from_xml_string', '(', 'self', ',', 'xml_string', ')', ':', '# if there is an active viewer window, destroy it', 'self', '.', 'close', '(', ')', '# load model from xml', 'self', '.', 'mjpy_model', '=', 'load_model_from_xml', '(', 'xml_string', ')', 'self', '.', 'sim', '=', 'MjSim', '(', 'self', '.', 'mjpy_model', ')', 'self', '.', 'initialize_time', '(', 'self', '.', 'control_freq', ')', 'if', 'self', '.', 'has_renderer', 'and', 'self', '.', 'viewer', 'is', 'None', ':', 'self', '.', 'viewer', '=', 'MujocoPyRenderer', '(', 'self', '.', 'sim', ')', 'self', '.', 'viewer', '.', 'viewer', '.', 'vopt', '.', 'geomgroup', '[', '0', ']', '=', '(', '1', 'if', 'self', '.', 'render_collision_mesh', 'else', '0', ')', 'self', '.', 'viewer', '.', 'viewer', '.', 'vopt', '.', 'geomgroup', '[', '1', ']', '=', '1', 'if', 'self', '.', 'render_visual_mesh', 'else', '0', '# hiding the overlay speeds up rendering significantly', 'self', '.', 'viewer', '.', 'viewer', '.', '_hide_overlay', '=', 'True', 'elif', 'self', '.', 'has_offscreen_renderer', ':', 'render_context', '=', 'MjRenderContextOffscreen', '(', 'self', '.', 'sim', ')', 'render_context', '.', 'vopt', '.', 'geomgroup', '[', '0', ']', '=', '1', 'if', 'self', '.', 'render_collision_mesh', 'else', '0', 'render_context', '.', 'vopt', '.', 'geomgroup', '[', '1', ']', '=', '1', 'if', 'self', '.', 'render_visual_mesh', 'else', '0', 'self', '.', 'sim', '.', 'add_render_context', '(', 'render_context', ')', 'self', '.', 'sim_state_initial', '=', 'self', '.', 'sim', '.', 'get_state', '(', ')', 'self', '.', '_get_reference', '(', ')', 'self', '.', 'cur_time', '=', '0', 'self', '.', 'timestep', '=', '0', 'self', '.', 'done', '=', 'False', '# necessary to refresh MjData', 'self', '.', 'sim', '.', 'forward', '(', ')']
Reloads the environment from an XML description of the environment.
['Reloads', 'the', 'environment', 'from', 'an', 'XML', 'description', 'of', 'the', 'environment', '.']
train
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/base.py#L254-L288
5,302
materialsproject/pymatgen
pymatgen/io/abinit/launcher.py
BatchLauncher.submit
def submit(self, **kwargs): """ Submit a job script that will run the schedulers with `abirun.py`. Args: verbose: Verbosity level dry_run: Don't submit the script if dry_run. Default: False Returns: namedtuple with attributes: retcode: Return code as returned by the submission script. qjob: :class:`QueueJob` object. num_flows_inbatch: Number of flows executed by the batch script Return code of the job script submission. """ verbose, dry_run = kwargs.pop("verbose", 0), kwargs.pop("dry_run", False) if not self.flows: print("Cannot submit an empty list of flows!") return 0 if hasattr(self, "qjob"): # This usually happens when we have loaded the object from pickle # and we have already submitted to batch script to the queue. # At this point we need to understand if the previous batch job # is still running before trying to submit it again. There are three cases: # # 1) The batch script has completed withing timelimit and therefore # the pid_file has been removed by the script. In this case, we # should not try to submit it again. # 2) The batch script has been killed due to timelimit (other reasons are possible # but we neglect them). In this case the pid_file exists but there's no job with # this pid runnig and we can resubmit it again. # 3) The batch script is still running. print("BatchLauncher has qjob %s" % self.qjob) if not self.batch_pid_file.exists: print("It seems that the batch script reached the end. Wont' try to submit it again") return 0 msg = ("Here I have to understand if qjob is in the queue." " but I need an abstract API that can retrieve info from the queue id") raise RuntimeError(msg) # TODO: Temptative API if self.qjob.in_status("Running|Queued"): print("Job is still running. Cannot submit") else: del self.qjob script, num_flows_inbatch = self._get_script_nflows() if num_flows_inbatch == 0: print("All flows have reached all_ok! Batch script won't be submitted") return 0 if verbose: print("*** submission script ***") print(script) # Write the script. self.script_file.write(script) self.script_file.chmod(0o740) # Builf the flow. for flow in self.flows: flow.build_and_pickle_dump() # Submit the task and save the queue id. if dry_run: return -1 print("Will submit %s flows in batch script" % len(self.flows)) self.qjob, process = self.qadapter.submit_to_queue(self.script_file.path) # Save the queue id in the pid file # The file will be removed by the job script if execution is completed. self.batch_pidfile.write(str(self.qjob.qid)) self.pickle_dump() process.wait() return dict2namedtuple(retcode=process.returncode, qjob=self.qjob, num_flows_inbatch=num_flows_inbatch)
python
def submit(self, **kwargs): """ Submit a job script that will run the schedulers with `abirun.py`. Args: verbose: Verbosity level dry_run: Don't submit the script if dry_run. Default: False Returns: namedtuple with attributes: retcode: Return code as returned by the submission script. qjob: :class:`QueueJob` object. num_flows_inbatch: Number of flows executed by the batch script Return code of the job script submission. """ verbose, dry_run = kwargs.pop("verbose", 0), kwargs.pop("dry_run", False) if not self.flows: print("Cannot submit an empty list of flows!") return 0 if hasattr(self, "qjob"): # This usually happens when we have loaded the object from pickle # and we have already submitted to batch script to the queue. # At this point we need to understand if the previous batch job # is still running before trying to submit it again. There are three cases: # # 1) The batch script has completed withing timelimit and therefore # the pid_file has been removed by the script. In this case, we # should not try to submit it again. # 2) The batch script has been killed due to timelimit (other reasons are possible # but we neglect them). In this case the pid_file exists but there's no job with # this pid runnig and we can resubmit it again. # 3) The batch script is still running. print("BatchLauncher has qjob %s" % self.qjob) if not self.batch_pid_file.exists: print("It seems that the batch script reached the end. Wont' try to submit it again") return 0 msg = ("Here I have to understand if qjob is in the queue." " but I need an abstract API that can retrieve info from the queue id") raise RuntimeError(msg) # TODO: Temptative API if self.qjob.in_status("Running|Queued"): print("Job is still running. Cannot submit") else: del self.qjob script, num_flows_inbatch = self._get_script_nflows() if num_flows_inbatch == 0: print("All flows have reached all_ok! Batch script won't be submitted") return 0 if verbose: print("*** submission script ***") print(script) # Write the script. self.script_file.write(script) self.script_file.chmod(0o740) # Builf the flow. for flow in self.flows: flow.build_and_pickle_dump() # Submit the task and save the queue id. if dry_run: return -1 print("Will submit %s flows in batch script" % len(self.flows)) self.qjob, process = self.qadapter.submit_to_queue(self.script_file.path) # Save the queue id in the pid file # The file will be removed by the job script if execution is completed. self.batch_pidfile.write(str(self.qjob.qid)) self.pickle_dump() process.wait() return dict2namedtuple(retcode=process.returncode, qjob=self.qjob, num_flows_inbatch=num_flows_inbatch)
['def', 'submit', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'verbose', ',', 'dry_run', '=', 'kwargs', '.', 'pop', '(', '"verbose"', ',', '0', ')', ',', 'kwargs', '.', 'pop', '(', '"dry_run"', ',', 'False', ')', 'if', 'not', 'self', '.', 'flows', ':', 'print', '(', '"Cannot submit an empty list of flows!"', ')', 'return', '0', 'if', 'hasattr', '(', 'self', ',', '"qjob"', ')', ':', '# This usually happens when we have loaded the object from pickle', '# and we have already submitted to batch script to the queue.', '# At this point we need to understand if the previous batch job', '# is still running before trying to submit it again. There are three cases:', '#', '# 1) The batch script has completed withing timelimit and therefore', '# the pid_file has been removed by the script. In this case, we', '# should not try to submit it again.', '# 2) The batch script has been killed due to timelimit (other reasons are possible', "# but we neglect them). In this case the pid_file exists but there's no job with", '# this pid runnig and we can resubmit it again.', '# 3) The batch script is still running.', 'print', '(', '"BatchLauncher has qjob %s"', '%', 'self', '.', 'qjob', ')', 'if', 'not', 'self', '.', 'batch_pid_file', '.', 'exists', ':', 'print', '(', '"It seems that the batch script reached the end. Wont\' try to submit it again"', ')', 'return', '0', 'msg', '=', '(', '"Here I have to understand if qjob is in the queue."', '" but I need an abstract API that can retrieve info from the queue id"', ')', 'raise', 'RuntimeError', '(', 'msg', ')', '# TODO: Temptative API', 'if', 'self', '.', 'qjob', '.', 'in_status', '(', '"Running|Queued"', ')', ':', 'print', '(', '"Job is still running. Cannot submit"', ')', 'else', ':', 'del', 'self', '.', 'qjob', 'script', ',', 'num_flows_inbatch', '=', 'self', '.', '_get_script_nflows', '(', ')', 'if', 'num_flows_inbatch', '==', '0', ':', 'print', '(', '"All flows have reached all_ok! Batch script won\'t be submitted"', ')', 'return', '0', 'if', 'verbose', ':', 'print', '(', '"*** submission script ***"', ')', 'print', '(', 'script', ')', '# Write the script.', 'self', '.', 'script_file', '.', 'write', '(', 'script', ')', 'self', '.', 'script_file', '.', 'chmod', '(', '0o740', ')', '# Builf the flow.', 'for', 'flow', 'in', 'self', '.', 'flows', ':', 'flow', '.', 'build_and_pickle_dump', '(', ')', '# Submit the task and save the queue id.', 'if', 'dry_run', ':', 'return', '-', '1', 'print', '(', '"Will submit %s flows in batch script"', '%', 'len', '(', 'self', '.', 'flows', ')', ')', 'self', '.', 'qjob', ',', 'process', '=', 'self', '.', 'qadapter', '.', 'submit_to_queue', '(', 'self', '.', 'script_file', '.', 'path', ')', '# Save the queue id in the pid file', '# The file will be removed by the job script if execution is completed.', 'self', '.', 'batch_pidfile', '.', 'write', '(', 'str', '(', 'self', '.', 'qjob', '.', 'qid', ')', ')', 'self', '.', 'pickle_dump', '(', ')', 'process', '.', 'wait', '(', ')', 'return', 'dict2namedtuple', '(', 'retcode', '=', 'process', '.', 'returncode', ',', 'qjob', '=', 'self', '.', 'qjob', ',', 'num_flows_inbatch', '=', 'num_flows_inbatch', ')']
Submit a job script that will run the schedulers with `abirun.py`. Args: verbose: Verbosity level dry_run: Don't submit the script if dry_run. Default: False Returns: namedtuple with attributes: retcode: Return code as returned by the submission script. qjob: :class:`QueueJob` object. num_flows_inbatch: Number of flows executed by the batch script Return code of the job script submission.
['Submit', 'a', 'job', 'script', 'that', 'will', 'run', 'the', 'schedulers', 'with', 'abirun', '.', 'py', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/launcher.py#L1167-L1252
5,303
takaakiaoki/ofblockmeshdicthelper
ofblockmeshdicthelper/__init__.py
BlockMeshDict.merge_vertices
def merge_vertices(self): """call reduce_vertex on all vertices with identical values.""" # groupby expects sorted data sorted_vertices = sorted(list(self.vertices.items()), key=lambda v: hash(v[1])) groups = [] for k, g in groupby(sorted_vertices, lambda v: hash(v[1])): groups.append(list(g)) for group in groups: if len(group) == 1: continue names = [v[0] for v in group] self.reduce_vertex(*names)
python
def merge_vertices(self): """call reduce_vertex on all vertices with identical values.""" # groupby expects sorted data sorted_vertices = sorted(list(self.vertices.items()), key=lambda v: hash(v[1])) groups = [] for k, g in groupby(sorted_vertices, lambda v: hash(v[1])): groups.append(list(g)) for group in groups: if len(group) == 1: continue names = [v[0] for v in group] self.reduce_vertex(*names)
['def', 'merge_vertices', '(', 'self', ')', ':', '# groupby expects sorted data', 'sorted_vertices', '=', 'sorted', '(', 'list', '(', 'self', '.', 'vertices', '.', 'items', '(', ')', ')', ',', 'key', '=', 'lambda', 'v', ':', 'hash', '(', 'v', '[', '1', ']', ')', ')', 'groups', '=', '[', ']', 'for', 'k', ',', 'g', 'in', 'groupby', '(', 'sorted_vertices', ',', 'lambda', 'v', ':', 'hash', '(', 'v', '[', '1', ']', ')', ')', ':', 'groups', '.', 'append', '(', 'list', '(', 'g', ')', ')', 'for', 'group', 'in', 'groups', ':', 'if', 'len', '(', 'group', ')', '==', '1', ':', 'continue', 'names', '=', '[', 'v', '[', '0', ']', 'for', 'v', 'in', 'group', ']', 'self', '.', 'reduce_vertex', '(', '*', 'names', ')']
call reduce_vertex on all vertices with identical values.
['call', 'reduce_vertex', 'on', 'all', 'vertices', 'with', 'identical', 'values', '.']
train
https://github.com/takaakiaoki/ofblockmeshdicthelper/blob/df99e6b0e4f0334c9afe075b4f3ceaccb5bac9fd/ofblockmeshdicthelper/__init__.py#L384-L396
5,304
tradenity/python-sdk
tradenity/resources/brand.py
Brand.list_all_brands
def list_all_brands(cls, **kwargs): """List Brands Return a list of Brands This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_brands(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Brand] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_brands_with_http_info(**kwargs) else: (data) = cls._list_all_brands_with_http_info(**kwargs) return data
python
def list_all_brands(cls, **kwargs): """List Brands Return a list of Brands This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_brands(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Brand] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_brands_with_http_info(**kwargs) else: (data) = cls._list_all_brands_with_http_info(**kwargs) return data
['def', 'list_all_brands', '(', 'cls', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async'", ')', ':', 'return', 'cls', '.', '_list_all_brands_with_http_info', '(', '*', '*', 'kwargs', ')', 'else', ':', '(', 'data', ')', '=', 'cls', '.', '_list_all_brands_with_http_info', '(', '*', '*', 'kwargs', ')', 'return', 'data']
List Brands Return a list of Brands This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_brands(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Brand] If the method is called asynchronously, returns the request thread.
['List', 'Brands']
train
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/brand.py#L599-L621
5,305
bitcraft/PyTMX
pytmx/pytmx.py
TiledImageLayer.parse_xml
def parse_xml(self, node): """ Parse an Image Layer from ElementTree xml node :param node: ElementTree xml node :return: self """ self._set_properties(node) self.name = node.get('name', None) self.opacity = node.get('opacity', self.opacity) self.visible = node.get('visible', self.visible) image_node = node.find('image') self.source = image_node.get('source', None) self.trans = image_node.get('trans', None) return self
python
def parse_xml(self, node): """ Parse an Image Layer from ElementTree xml node :param node: ElementTree xml node :return: self """ self._set_properties(node) self.name = node.get('name', None) self.opacity = node.get('opacity', self.opacity) self.visible = node.get('visible', self.visible) image_node = node.find('image') self.source = image_node.get('source', None) self.trans = image_node.get('trans', None) return self
['def', 'parse_xml', '(', 'self', ',', 'node', ')', ':', 'self', '.', '_set_properties', '(', 'node', ')', 'self', '.', 'name', '=', 'node', '.', 'get', '(', "'name'", ',', 'None', ')', 'self', '.', 'opacity', '=', 'node', '.', 'get', '(', "'opacity'", ',', 'self', '.', 'opacity', ')', 'self', '.', 'visible', '=', 'node', '.', 'get', '(', "'visible'", ',', 'self', '.', 'visible', ')', 'image_node', '=', 'node', '.', 'find', '(', "'image'", ')', 'self', '.', 'source', '=', 'image_node', '.', 'get', '(', "'source'", ',', 'None', ')', 'self', '.', 'trans', '=', 'image_node', '.', 'get', '(', "'trans'", ',', 'None', ')', 'return', 'self']
Parse an Image Layer from ElementTree xml node :param node: ElementTree xml node :return: self
['Parse', 'an', 'Image', 'Layer', 'from', 'ElementTree', 'xml', 'node']
train
https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L1216-L1229
5,306
mlperf/training
reinforcement/tensorflow/minigo/gtp_cmd_handlers.py
MiniguiBasicCmdHandler._minigui_report_search_status
def _minigui_report_search_status(self, leaves): """Prints the current MCTS search status to stderr. Reports the current search path, root node's child_Q, root node's child_N, the most visited path in a format that can be parsed by one of the STDERR_HANDLERS in minigui.ts. Args: leaves: list of leaf MCTSNodes returned by tree_search(). """ root = self._player.get_root() msg = { "id": hex(id(root)), "n": int(root.N), "q": float(root.Q), } msg["childQ"] = [int(round(q * 1000)) for q in root.child_Q] msg["childN"] = [int(n) for n in root.child_N] ranked_children = root.rank_children() variations = {} for i in ranked_children[:15]: if root.child_N[i] == 0 or i not in root.children: break c = coords.to_gtp(coords.from_flat(i)) child = root.children[i] nodes = child.most_visited_path_nodes() moves = [coords.to_gtp(coords.from_flat(m.fmove)) for m in nodes] variations[c] = { "n": int(root.child_N[i]), "q": float(root.child_Q[i]), "moves": [c] + moves, } if leaves: path = [] leaf = leaves[0] while leaf != root: path.append(leaf.fmove) leaf = leaf.parent if path: path.reverse() variations["live"] = { "n": int(root.child_N[path[0]]), "q": float(root.child_Q[path[0]]), "moves": [coords.to_gtp(coords.from_flat(m)) for m in path] } if variations: msg["variations"] = variations dbg("mg-update:%s" % json.dumps(msg, sort_keys=True))
python
def _minigui_report_search_status(self, leaves): """Prints the current MCTS search status to stderr. Reports the current search path, root node's child_Q, root node's child_N, the most visited path in a format that can be parsed by one of the STDERR_HANDLERS in minigui.ts. Args: leaves: list of leaf MCTSNodes returned by tree_search(). """ root = self._player.get_root() msg = { "id": hex(id(root)), "n": int(root.N), "q": float(root.Q), } msg["childQ"] = [int(round(q * 1000)) for q in root.child_Q] msg["childN"] = [int(n) for n in root.child_N] ranked_children = root.rank_children() variations = {} for i in ranked_children[:15]: if root.child_N[i] == 0 or i not in root.children: break c = coords.to_gtp(coords.from_flat(i)) child = root.children[i] nodes = child.most_visited_path_nodes() moves = [coords.to_gtp(coords.from_flat(m.fmove)) for m in nodes] variations[c] = { "n": int(root.child_N[i]), "q": float(root.child_Q[i]), "moves": [c] + moves, } if leaves: path = [] leaf = leaves[0] while leaf != root: path.append(leaf.fmove) leaf = leaf.parent if path: path.reverse() variations["live"] = { "n": int(root.child_N[path[0]]), "q": float(root.child_Q[path[0]]), "moves": [coords.to_gtp(coords.from_flat(m)) for m in path] } if variations: msg["variations"] = variations dbg("mg-update:%s" % json.dumps(msg, sort_keys=True))
['def', '_minigui_report_search_status', '(', 'self', ',', 'leaves', ')', ':', 'root', '=', 'self', '.', '_player', '.', 'get_root', '(', ')', 'msg', '=', '{', '"id"', ':', 'hex', '(', 'id', '(', 'root', ')', ')', ',', '"n"', ':', 'int', '(', 'root', '.', 'N', ')', ',', '"q"', ':', 'float', '(', 'root', '.', 'Q', ')', ',', '}', 'msg', '[', '"childQ"', ']', '=', '[', 'int', '(', 'round', '(', 'q', '*', '1000', ')', ')', 'for', 'q', 'in', 'root', '.', 'child_Q', ']', 'msg', '[', '"childN"', ']', '=', '[', 'int', '(', 'n', ')', 'for', 'n', 'in', 'root', '.', 'child_N', ']', 'ranked_children', '=', 'root', '.', 'rank_children', '(', ')', 'variations', '=', '{', '}', 'for', 'i', 'in', 'ranked_children', '[', ':', '15', ']', ':', 'if', 'root', '.', 'child_N', '[', 'i', ']', '==', '0', 'or', 'i', 'not', 'in', 'root', '.', 'children', ':', 'break', 'c', '=', 'coords', '.', 'to_gtp', '(', 'coords', '.', 'from_flat', '(', 'i', ')', ')', 'child', '=', 'root', '.', 'children', '[', 'i', ']', 'nodes', '=', 'child', '.', 'most_visited_path_nodes', '(', ')', 'moves', '=', '[', 'coords', '.', 'to_gtp', '(', 'coords', '.', 'from_flat', '(', 'm', '.', 'fmove', ')', ')', 'for', 'm', 'in', 'nodes', ']', 'variations', '[', 'c', ']', '=', '{', '"n"', ':', 'int', '(', 'root', '.', 'child_N', '[', 'i', ']', ')', ',', '"q"', ':', 'float', '(', 'root', '.', 'child_Q', '[', 'i', ']', ')', ',', '"moves"', ':', '[', 'c', ']', '+', 'moves', ',', '}', 'if', 'leaves', ':', 'path', '=', '[', ']', 'leaf', '=', 'leaves', '[', '0', ']', 'while', 'leaf', '!=', 'root', ':', 'path', '.', 'append', '(', 'leaf', '.', 'fmove', ')', 'leaf', '=', 'leaf', '.', 'parent', 'if', 'path', ':', 'path', '.', 'reverse', '(', ')', 'variations', '[', '"live"', ']', '=', '{', '"n"', ':', 'int', '(', 'root', '.', 'child_N', '[', 'path', '[', '0', ']', ']', ')', ',', '"q"', ':', 'float', '(', 'root', '.', 'child_Q', '[', 'path', '[', '0', ']', ']', ')', ',', '"moves"', ':', '[', 'coords', '.', 'to_gtp', '(', 'coords', '.', 'from_flat', '(', 'm', ')', ')', 'for', 'm', 'in', 'path', ']', '}', 'if', 'variations', ':', 'msg', '[', '"variations"', ']', '=', 'variations', 'dbg', '(', '"mg-update:%s"', '%', 'json', '.', 'dumps', '(', 'msg', ',', 'sort_keys', '=', 'True', ')', ')']
Prints the current MCTS search status to stderr. Reports the current search path, root node's child_Q, root node's child_N, the most visited path in a format that can be parsed by one of the STDERR_HANDLERS in minigui.ts. Args: leaves: list of leaf MCTSNodes returned by tree_search().
['Prints', 'the', 'current', 'MCTS', 'search', 'status', 'to', 'stderr', '.']
train
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/gtp_cmd_handlers.py#L315-L369
5,307
Fantomas42/mots-vides
mots_vides/factory.py
StopWordFactory.get_collection_filename
def get_collection_filename(self, language): """ Returns the filename containing the stop words collection for a specific language. """ filename = os.path.join(self.data_directory, '%s.txt' % language) return filename
python
def get_collection_filename(self, language): """ Returns the filename containing the stop words collection for a specific language. """ filename = os.path.join(self.data_directory, '%s.txt' % language) return filename
['def', 'get_collection_filename', '(', 'self', ',', 'language', ')', ':', 'filename', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'data_directory', ',', "'%s.txt'", '%', 'language', ')', 'return', 'filename']
Returns the filename containing the stop words collection for a specific language.
['Returns', 'the', 'filename', 'containing', 'the', 'stop', 'words', 'collection', 'for', 'a', 'specific', 'language', '.']
train
https://github.com/Fantomas42/mots-vides/blob/eaeccf73bdb415d0c5559ccd74de360b37a2bbac/mots_vides/factory.py#L94-L100
5,308
juju/charm-helpers
charmhelpers/contrib/openstack/utils.py
_check_running_services
def _check_running_services(services): """Check that the services dict provided is actually running and provide a list of (service, boolean) tuples for each service. Returns both a zipped list of (service, boolean) and a list of booleans in the same order as the services. @param services: OrderedDict of strings: [ports], one for each service to check. @returns [(service, boolean), ...], : results for checks [boolean] : just the result of the service checks """ services_running = [service_running(s) for s in services] return list(zip(services, services_running)), services_running
python
def _check_running_services(services): """Check that the services dict provided is actually running and provide a list of (service, boolean) tuples for each service. Returns both a zipped list of (service, boolean) and a list of booleans in the same order as the services. @param services: OrderedDict of strings: [ports], one for each service to check. @returns [(service, boolean), ...], : results for checks [boolean] : just the result of the service checks """ services_running = [service_running(s) for s in services] return list(zip(services, services_running)), services_running
['def', '_check_running_services', '(', 'services', ')', ':', 'services_running', '=', '[', 'service_running', '(', 's', ')', 'for', 's', 'in', 'services', ']', 'return', 'list', '(', 'zip', '(', 'services', ',', 'services_running', ')', ')', ',', 'services_running']
Check that the services dict provided is actually running and provide a list of (service, boolean) tuples for each service. Returns both a zipped list of (service, boolean) and a list of booleans in the same order as the services. @param services: OrderedDict of strings: [ports], one for each service to check. @returns [(service, boolean), ...], : results for checks [boolean] : just the result of the service checks
['Check', 'that', 'the', 'services', 'dict', 'provided', 'is', 'actually', 'running', 'and', 'provide', 'a', 'list', 'of', '(', 'service', 'boolean', ')', 'tuples', 'for', 'each', 'service', '.']
train
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/utils.py#L1077-L1090
5,309
collectiveacuity/jsonModel
jsonmodel/_extensions.py
tabulate
def tabulate(self, format='html', syntax=''): ''' a function to create a table from the class model keyMap :param format: string with format for table output :param syntax: [optional] string with linguistic syntax :return: string with table ''' from tabulate import tabulate as _tabulate # define headers headers = ['Field', 'Datatype', 'Required', 'Default', 'Examples', 'Conditionals', 'Description'] rows = [] default_values = False additional_conditions = False field_description = False # construct rows for key, value in self.keyMap.items(): key_segments = _segment_path(key) if key_segments: row = [] # add field column field_name = '' if len(key_segments) > 1: for i in range(1,len(key_segments)): field_name += '&nbsp;&nbsp;&nbsp;&nbsp;' if key_segments[-1] == '0': field_name += '<i>item</i>' else: field_name += key_segments[-1] row.append(field_name) # add datatype column value_datatype = value['value_datatype'] if 'integer_data' in value.keys(): if value['integer_data'] and syntax != 'javascript': value_datatype = 'integer' elif value['value_datatype'] == 'map': if syntax == 'javascript': value_datatype = 'object' elif value['value_datatype'] == 'list': if syntax == 'javascript': value_datatype = 'array' # retrieve datatype of item in list item_key = key + '[0]' item_datatype = self.keyMap[item_key]['value_datatype'] if syntax == 'javascript': if item_datatype == 'list': item_datatype = 'array' elif item_datatype == 'map': item_datatype = 'object' elif 'integer_data' in self.keyMap[item_key].keys(): if self.keyMap[item_key]['integer_data']: item_datatype = 'integer' value_datatype += ' of %ss' % item_datatype row.append(value_datatype) # add required column if value['required_field']: row.append('yes') else: row.append('') # add default column if 'default_value' in value.keys(): default_values = True if isinstance(value['default_value'], str): row.append('"%s"' % value['default_value']) elif isinstance(value['default_value'], bool): row.append(str(value['default_value']).lower()) else: row.append(str(value['default_value'])) else: row.append('') # define recursive example constructor def determine_example(k, v): example_value = '' if 'example_values' in v.keys(): for i in v['example_values']: if example_value: example_value += ', ' if isinstance(i, str): example_value += '"%s"' % i else: example_value += value elif 'declared_value' in v.keys(): if isinstance(v['declared_value'], str): example_value = '"%s"' % v['declared_value'] elif isinstance(v['declared_value'], bool): example_value = str(v['declared_value']).lower() else: example_value = v['declared_value'] else: if v['value_datatype'] == 'map': example_value = '{...}' elif v['value_datatype'] == 'list': example_value = '[...]' elif v['value_datatype'] == 'null': example_value = 'null' return example_value # add examples column row.append(determine_example(key, value)) # add additional conditions conditions = '' description = '' for k, v in value.items(): extra_integer = False if k == 'integer_data' and syntax == 'javascript': extra_integer = True if k not in ('example_values', 'value_datatype', 'required_field', 'declared_value', 'default_value', 'field_position', 'field_metadata') or extra_integer: add_extra = False if k == 'extra_fields': if v: add_extra = True if k in ('field_description', 'field_title'): field_description = True if k == 'field_description': description = v elif not description: description = v elif k != 'extra_fields' or add_extra: additional_conditions = True if conditions: conditions += '<br>' condition_value = v if isinstance(v, str): condition_value = '"%s"' % v elif isinstance(v, bool): condition_value = str(v).lower() conditions += '%s: %s' % (k, condition_value) row.append(conditions) row.append(description) # add row to rows rows.append(row) # add rows for top field top_dict = self.keyMap['.'] if top_dict['extra_fields']: rows.append(['<i>**extra fields allowed</i>', '', '', '', '', '', '']) if 'max_bytes' in top_dict.keys(): rows.append(['<i>**max bytes: %s</i>' % top_dict['max_bytes'], '', '', '', '', '', '']) # eliminate unused columns if not field_description: headers.pop() if not additional_conditions: headers.pop() if not default_values: headers.pop(3) for row in rows: if not field_description: row.pop() if not additional_conditions: row.pop() if not default_values: row.pop(3) # construct table html table_html = _tabulate(rows, headers, tablefmt='html') # add links to urls in text # markdown_url = re.compile('\[(.*?)\]\((.*)\)') table_html = _add_links(table_html) return table_html
python
def tabulate(self, format='html', syntax=''): ''' a function to create a table from the class model keyMap :param format: string with format for table output :param syntax: [optional] string with linguistic syntax :return: string with table ''' from tabulate import tabulate as _tabulate # define headers headers = ['Field', 'Datatype', 'Required', 'Default', 'Examples', 'Conditionals', 'Description'] rows = [] default_values = False additional_conditions = False field_description = False # construct rows for key, value in self.keyMap.items(): key_segments = _segment_path(key) if key_segments: row = [] # add field column field_name = '' if len(key_segments) > 1: for i in range(1,len(key_segments)): field_name += '&nbsp;&nbsp;&nbsp;&nbsp;' if key_segments[-1] == '0': field_name += '<i>item</i>' else: field_name += key_segments[-1] row.append(field_name) # add datatype column value_datatype = value['value_datatype'] if 'integer_data' in value.keys(): if value['integer_data'] and syntax != 'javascript': value_datatype = 'integer' elif value['value_datatype'] == 'map': if syntax == 'javascript': value_datatype = 'object' elif value['value_datatype'] == 'list': if syntax == 'javascript': value_datatype = 'array' # retrieve datatype of item in list item_key = key + '[0]' item_datatype = self.keyMap[item_key]['value_datatype'] if syntax == 'javascript': if item_datatype == 'list': item_datatype = 'array' elif item_datatype == 'map': item_datatype = 'object' elif 'integer_data' in self.keyMap[item_key].keys(): if self.keyMap[item_key]['integer_data']: item_datatype = 'integer' value_datatype += ' of %ss' % item_datatype row.append(value_datatype) # add required column if value['required_field']: row.append('yes') else: row.append('') # add default column if 'default_value' in value.keys(): default_values = True if isinstance(value['default_value'], str): row.append('"%s"' % value['default_value']) elif isinstance(value['default_value'], bool): row.append(str(value['default_value']).lower()) else: row.append(str(value['default_value'])) else: row.append('') # define recursive example constructor def determine_example(k, v): example_value = '' if 'example_values' in v.keys(): for i in v['example_values']: if example_value: example_value += ', ' if isinstance(i, str): example_value += '"%s"' % i else: example_value += value elif 'declared_value' in v.keys(): if isinstance(v['declared_value'], str): example_value = '"%s"' % v['declared_value'] elif isinstance(v['declared_value'], bool): example_value = str(v['declared_value']).lower() else: example_value = v['declared_value'] else: if v['value_datatype'] == 'map': example_value = '{...}' elif v['value_datatype'] == 'list': example_value = '[...]' elif v['value_datatype'] == 'null': example_value = 'null' return example_value # add examples column row.append(determine_example(key, value)) # add additional conditions conditions = '' description = '' for k, v in value.items(): extra_integer = False if k == 'integer_data' and syntax == 'javascript': extra_integer = True if k not in ('example_values', 'value_datatype', 'required_field', 'declared_value', 'default_value', 'field_position', 'field_metadata') or extra_integer: add_extra = False if k == 'extra_fields': if v: add_extra = True if k in ('field_description', 'field_title'): field_description = True if k == 'field_description': description = v elif not description: description = v elif k != 'extra_fields' or add_extra: additional_conditions = True if conditions: conditions += '<br>' condition_value = v if isinstance(v, str): condition_value = '"%s"' % v elif isinstance(v, bool): condition_value = str(v).lower() conditions += '%s: %s' % (k, condition_value) row.append(conditions) row.append(description) # add row to rows rows.append(row) # add rows for top field top_dict = self.keyMap['.'] if top_dict['extra_fields']: rows.append(['<i>**extra fields allowed</i>', '', '', '', '', '', '']) if 'max_bytes' in top_dict.keys(): rows.append(['<i>**max bytes: %s</i>' % top_dict['max_bytes'], '', '', '', '', '', '']) # eliminate unused columns if not field_description: headers.pop() if not additional_conditions: headers.pop() if not default_values: headers.pop(3) for row in rows: if not field_description: row.pop() if not additional_conditions: row.pop() if not default_values: row.pop(3) # construct table html table_html = _tabulate(rows, headers, tablefmt='html') # add links to urls in text # markdown_url = re.compile('\[(.*?)\]\((.*)\)') table_html = _add_links(table_html) return table_html
['def', 'tabulate', '(', 'self', ',', 'format', '=', "'html'", ',', 'syntax', '=', "''", ')', ':', 'from', 'tabulate', 'import', 'tabulate', 'as', '_tabulate', '# define headers', 'headers', '=', '[', "'Field'", ',', "'Datatype'", ',', "'Required'", ',', "'Default'", ',', "'Examples'", ',', "'Conditionals'", ',', "'Description'", ']', 'rows', '=', '[', ']', 'default_values', '=', 'False', 'additional_conditions', '=', 'False', 'field_description', '=', 'False', '# construct rows', 'for', 'key', ',', 'value', 'in', 'self', '.', 'keyMap', '.', 'items', '(', ')', ':', 'key_segments', '=', '_segment_path', '(', 'key', ')', 'if', 'key_segments', ':', 'row', '=', '[', ']', '# add field column', 'field_name', '=', "''", 'if', 'len', '(', 'key_segments', ')', '>', '1', ':', 'for', 'i', 'in', 'range', '(', '1', ',', 'len', '(', 'key_segments', ')', ')', ':', 'field_name', '+=', "'&nbsp;&nbsp;&nbsp;&nbsp;'", 'if', 'key_segments', '[', '-', '1', ']', '==', "'0'", ':', 'field_name', '+=', "'<i>item</i>'", 'else', ':', 'field_name', '+=', 'key_segments', '[', '-', '1', ']', 'row', '.', 'append', '(', 'field_name', ')', '# add datatype column', 'value_datatype', '=', 'value', '[', "'value_datatype'", ']', 'if', "'integer_data'", 'in', 'value', '.', 'keys', '(', ')', ':', 'if', 'value', '[', "'integer_data'", ']', 'and', 'syntax', '!=', "'javascript'", ':', 'value_datatype', '=', "'integer'", 'elif', 'value', '[', "'value_datatype'", ']', '==', "'map'", ':', 'if', 'syntax', '==', "'javascript'", ':', 'value_datatype', '=', "'object'", 'elif', 'value', '[', "'value_datatype'", ']', '==', "'list'", ':', 'if', 'syntax', '==', "'javascript'", ':', 'value_datatype', '=', "'array'", '# retrieve datatype of item in list', 'item_key', '=', 'key', '+', "'[0]'", 'item_datatype', '=', 'self', '.', 'keyMap', '[', 'item_key', ']', '[', "'value_datatype'", ']', 'if', 'syntax', '==', "'javascript'", ':', 'if', 'item_datatype', '==', "'list'", ':', 'item_datatype', '=', "'array'", 'elif', 'item_datatype', '==', "'map'", ':', 'item_datatype', '=', "'object'", 'elif', "'integer_data'", 'in', 'self', '.', 'keyMap', '[', 'item_key', ']', '.', 'keys', '(', ')', ':', 'if', 'self', '.', 'keyMap', '[', 'item_key', ']', '[', "'integer_data'", ']', ':', 'item_datatype', '=', "'integer'", 'value_datatype', '+=', "' of %ss'", '%', 'item_datatype', 'row', '.', 'append', '(', 'value_datatype', ')', '# add required column', 'if', 'value', '[', "'required_field'", ']', ':', 'row', '.', 'append', '(', "'yes'", ')', 'else', ':', 'row', '.', 'append', '(', "''", ')', '# add default column', 'if', "'default_value'", 'in', 'value', '.', 'keys', '(', ')', ':', 'default_values', '=', 'True', 'if', 'isinstance', '(', 'value', '[', "'default_value'", ']', ',', 'str', ')', ':', 'row', '.', 'append', '(', '\'"%s"\'', '%', 'value', '[', "'default_value'", ']', ')', 'elif', 'isinstance', '(', 'value', '[', "'default_value'", ']', ',', 'bool', ')', ':', 'row', '.', 'append', '(', 'str', '(', 'value', '[', "'default_value'", ']', ')', '.', 'lower', '(', ')', ')', 'else', ':', 'row', '.', 'append', '(', 'str', '(', 'value', '[', "'default_value'", ']', ')', ')', 'else', ':', 'row', '.', 'append', '(', "''", ')', '# define recursive example constructor', 'def', 'determine_example', '(', 'k', ',', 'v', ')', ':', 'example_value', '=', "''", 'if', "'example_values'", 'in', 'v', '.', 'keys', '(', ')', ':', 'for', 'i', 'in', 'v', '[', "'example_values'", ']', ':', 'if', 'example_value', ':', 'example_value', '+=', "', '", 'if', 'isinstance', '(', 'i', ',', 'str', ')', ':', 'example_value', '+=', '\'"%s"\'', '%', 'i', 'else', ':', 'example_value', '+=', 'value', 'elif', "'declared_value'", 'in', 'v', '.', 'keys', '(', ')', ':', 'if', 'isinstance', '(', 'v', '[', "'declared_value'", ']', ',', 'str', ')', ':', 'example_value', '=', '\'"%s"\'', '%', 'v', '[', "'declared_value'", ']', 'elif', 'isinstance', '(', 'v', '[', "'declared_value'", ']', ',', 'bool', ')', ':', 'example_value', '=', 'str', '(', 'v', '[', "'declared_value'", ']', ')', '.', 'lower', '(', ')', 'else', ':', 'example_value', '=', 'v', '[', "'declared_value'", ']', 'else', ':', 'if', 'v', '[', "'value_datatype'", ']', '==', "'map'", ':', 'example_value', '=', "'{...}'", 'elif', 'v', '[', "'value_datatype'", ']', '==', "'list'", ':', 'example_value', '=', "'[...]'", 'elif', 'v', '[', "'value_datatype'", ']', '==', "'null'", ':', 'example_value', '=', "'null'", 'return', 'example_value', '# add examples column', 'row', '.', 'append', '(', 'determine_example', '(', 'key', ',', 'value', ')', ')', '# add additional conditions', 'conditions', '=', "''", 'description', '=', "''", 'for', 'k', ',', 'v', 'in', 'value', '.', 'items', '(', ')', ':', 'extra_integer', '=', 'False', 'if', 'k', '==', "'integer_data'", 'and', 'syntax', '==', "'javascript'", ':', 'extra_integer', '=', 'True', 'if', 'k', 'not', 'in', '(', "'example_values'", ',', "'value_datatype'", ',', "'required_field'", ',', "'declared_value'", ',', "'default_value'", ',', "'field_position'", ',', "'field_metadata'", ')', 'or', 'extra_integer', ':', 'add_extra', '=', 'False', 'if', 'k', '==', "'extra_fields'", ':', 'if', 'v', ':', 'add_extra', '=', 'True', 'if', 'k', 'in', '(', "'field_description'", ',', "'field_title'", ')', ':', 'field_description', '=', 'True', 'if', 'k', '==', "'field_description'", ':', 'description', '=', 'v', 'elif', 'not', 'description', ':', 'description', '=', 'v', 'elif', 'k', '!=', "'extra_fields'", 'or', 'add_extra', ':', 'additional_conditions', '=', 'True', 'if', 'conditions', ':', 'conditions', '+=', "'<br>'", 'condition_value', '=', 'v', 'if', 'isinstance', '(', 'v', ',', 'str', ')', ':', 'condition_value', '=', '\'"%s"\'', '%', 'v', 'elif', 'isinstance', '(', 'v', ',', 'bool', ')', ':', 'condition_value', '=', 'str', '(', 'v', ')', '.', 'lower', '(', ')', 'conditions', '+=', "'%s: %s'", '%', '(', 'k', ',', 'condition_value', ')', 'row', '.', 'append', '(', 'conditions', ')', 'row', '.', 'append', '(', 'description', ')', '# add row to rows', 'rows', '.', 'append', '(', 'row', ')', '# add rows for top field', 'top_dict', '=', 'self', '.', 'keyMap', '[', "'.'", ']', 'if', 'top_dict', '[', "'extra_fields'", ']', ':', 'rows', '.', 'append', '(', '[', "'<i>**extra fields allowed</i>'", ',', "''", ',', "''", ',', "''", ',', "''", ',', "''", ',', "''", ']', ')', 'if', "'max_bytes'", 'in', 'top_dict', '.', 'keys', '(', ')', ':', 'rows', '.', 'append', '(', '[', "'<i>**max bytes: %s</i>'", '%', 'top_dict', '[', "'max_bytes'", ']', ',', "''", ',', "''", ',', "''", ',', "''", ',', "''", ',', "''", ']', ')', '# eliminate unused columns', 'if', 'not', 'field_description', ':', 'headers', '.', 'pop', '(', ')', 'if', 'not', 'additional_conditions', ':', 'headers', '.', 'pop', '(', ')', 'if', 'not', 'default_values', ':', 'headers', '.', 'pop', '(', '3', ')', 'for', 'row', 'in', 'rows', ':', 'if', 'not', 'field_description', ':', 'row', '.', 'pop', '(', ')', 'if', 'not', 'additional_conditions', ':', 'row', '.', 'pop', '(', ')', 'if', 'not', 'default_values', ':', 'row', '.', 'pop', '(', '3', ')', '# construct table html', 'table_html', '=', '_tabulate', '(', 'rows', ',', 'headers', ',', 'tablefmt', '=', "'html'", ')', '# add links to urls in text', "# markdown_url = re.compile('\\[(.*?)\\]\\((.*)\\)')", 'table_html', '=', '_add_links', '(', 'table_html', ')', 'return', 'table_html']
a function to create a table from the class model keyMap :param format: string with format for table output :param syntax: [optional] string with linguistic syntax :return: string with table
['a', 'function', 'to', 'create', 'a', 'table', 'from', 'the', 'class', 'model', 'keyMap']
train
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/_extensions.py#L34-L209
5,310
david-caro/python-autosemver
autosemver/packaging.py
get_releasenotes
def get_releasenotes(project_dir=os.curdir, bugtracker_url=''): """ Retrieves the release notes, from the RELEASE_NOTES file (if in a package) or generates it from the git history. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: str: release notes Raises: RuntimeError: If the release notes could not be retrieved """ releasenotes = '' pkg_info_file = os.path.join(project_dir, 'PKG-INFO') releasenotes_file = os.path.join(project_dir, 'RELEASE_NOTES') if os.path.exists(pkg_info_file) and os.path.exists(releasenotes_file): with open(releasenotes_file) as releasenotes_fd: releasenotes = releasenotes_fd.read() else: releasenotes = api.get_releasenotes( repo_path=project_dir, bugtracker_url=bugtracker_url, ) return releasenotes
python
def get_releasenotes(project_dir=os.curdir, bugtracker_url=''): """ Retrieves the release notes, from the RELEASE_NOTES file (if in a package) or generates it from the git history. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: str: release notes Raises: RuntimeError: If the release notes could not be retrieved """ releasenotes = '' pkg_info_file = os.path.join(project_dir, 'PKG-INFO') releasenotes_file = os.path.join(project_dir, 'RELEASE_NOTES') if os.path.exists(pkg_info_file) and os.path.exists(releasenotes_file): with open(releasenotes_file) as releasenotes_fd: releasenotes = releasenotes_fd.read() else: releasenotes = api.get_releasenotes( repo_path=project_dir, bugtracker_url=bugtracker_url, ) return releasenotes
['def', 'get_releasenotes', '(', 'project_dir', '=', 'os', '.', 'curdir', ',', 'bugtracker_url', '=', "''", ')', ':', 'releasenotes', '=', "''", 'pkg_info_file', '=', 'os', '.', 'path', '.', 'join', '(', 'project_dir', ',', "'PKG-INFO'", ')', 'releasenotes_file', '=', 'os', '.', 'path', '.', 'join', '(', 'project_dir', ',', "'RELEASE_NOTES'", ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'pkg_info_file', ')', 'and', 'os', '.', 'path', '.', 'exists', '(', 'releasenotes_file', ')', ':', 'with', 'open', '(', 'releasenotes_file', ')', 'as', 'releasenotes_fd', ':', 'releasenotes', '=', 'releasenotes_fd', '.', 'read', '(', ')', 'else', ':', 'releasenotes', '=', 'api', '.', 'get_releasenotes', '(', 'repo_path', '=', 'project_dir', ',', 'bugtracker_url', '=', 'bugtracker_url', ',', ')', 'return', 'releasenotes']
Retrieves the release notes, from the RELEASE_NOTES file (if in a package) or generates it from the git history. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: str: release notes Raises: RuntimeError: If the release notes could not be retrieved
['Retrieves', 'the', 'release', 'notes', 'from', 'the', 'RELEASE_NOTES', 'file', '(', 'if', 'in', 'a', 'package', ')', 'or', 'generates', 'it', 'from', 'the', 'git', 'history', '.']
train
https://github.com/david-caro/python-autosemver/blob/3bc0adb70c33e4bd3623ae4c1944d5ee37f4303d/autosemver/packaging.py#L149-L177
5,311
tortoise/tortoise-orm
tortoise/queryset.py
QuerySet.count
def count(self) -> "CountQuery": """ Return count of objects in queryset instead of objects. """ return CountQuery( db=self._db, model=self.model, q_objects=self._q_objects, annotations=self._annotations, custom_filters=self._custom_filters, )
python
def count(self) -> "CountQuery": """ Return count of objects in queryset instead of objects. """ return CountQuery( db=self._db, model=self.model, q_objects=self._q_objects, annotations=self._annotations, custom_filters=self._custom_filters, )
['def', 'count', '(', 'self', ')', '->', '"CountQuery"', ':', 'return', 'CountQuery', '(', 'db', '=', 'self', '.', '_db', ',', 'model', '=', 'self', '.', 'model', ',', 'q_objects', '=', 'self', '.', '_q_objects', ',', 'annotations', '=', 'self', '.', '_annotations', ',', 'custom_filters', '=', 'self', '.', '_custom_filters', ',', ')']
Return count of objects in queryset instead of objects.
['Return', 'count', 'of', 'objects', 'in', 'queryset', 'instead', 'of', 'objects', '.']
train
https://github.com/tortoise/tortoise-orm/blob/7d16457731905e19d4d06ccd5b4ea16d4a9447b2/tortoise/queryset.py#L332-L342
5,312
spotify/luigi
luigi/contrib/hdfs/target.py
HdfsTarget.move
def move(self, path, raise_if_exists=False): """ Alias for ``rename()`` """ self.rename(path, raise_if_exists=raise_if_exists)
python
def move(self, path, raise_if_exists=False): """ Alias for ``rename()`` """ self.rename(path, raise_if_exists=raise_if_exists)
['def', 'move', '(', 'self', ',', 'path', ',', 'raise_if_exists', '=', 'False', ')', ':', 'self', '.', 'rename', '(', 'path', ',', 'raise_if_exists', '=', 'raise_if_exists', ')']
Alias for ``rename()``
['Alias', 'for', 'rename', '()']
train
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hdfs/target.py#L134-L138
5,313
expfactory/expfactory
expfactory/validator/experiments.py
ExperimentValidator._validate_config
def _validate_config(self, folder, validate_folder=True): ''' validate config is the primary validation function that checks for presence and format of required fields. Parameters ========== :folder: full path to folder with config.json :name: if provided, the folder name to check against exp_id ''' config = "%s/config.json" % folder name = os.path.basename(folder) if not os.path.exists(config): return notvalid("%s: config.json not found." %(folder)) # Load the config try: config = read_json(config) except: return notvalid("%s: cannot load json, invalid." %(name)) # Config.json should be single dict if isinstance(config, list): return notvalid("%s: config.json is a list, not valid." %(name)) # Check over required fields fields = self.get_validation_fields() for field,value,ftype in fields: bot.verbose('field: %s, required: %s' %(field,value)) # Field must be in the keys if required if field not in config.keys(): if value == 1: return notvalid("%s: config.json is missing required field %s" %(name,field)) # Field is present, check type else: if not isinstance(config[field], ftype): return notvalid("%s: invalid type, must be %s." %(name,str(ftype))) # Expid gets special treatment if field == "exp_id" and validate_folder is True: if config[field] != name: return notvalid("%s: exp_id parameter %s does not match folder name." %(name,config[field])) # name cannot have special characters, only _ and letters/numbers if not re.match("^[a-z0-9_-]*$", config[field]): message = "%s: exp_id parameter %s has invalid characters" message += "only lowercase [a-z],[0-9], -, and _ allowed." return notvalid(message %(name,config[field])) return True
python
def _validate_config(self, folder, validate_folder=True): ''' validate config is the primary validation function that checks for presence and format of required fields. Parameters ========== :folder: full path to folder with config.json :name: if provided, the folder name to check against exp_id ''' config = "%s/config.json" % folder name = os.path.basename(folder) if not os.path.exists(config): return notvalid("%s: config.json not found." %(folder)) # Load the config try: config = read_json(config) except: return notvalid("%s: cannot load json, invalid." %(name)) # Config.json should be single dict if isinstance(config, list): return notvalid("%s: config.json is a list, not valid." %(name)) # Check over required fields fields = self.get_validation_fields() for field,value,ftype in fields: bot.verbose('field: %s, required: %s' %(field,value)) # Field must be in the keys if required if field not in config.keys(): if value == 1: return notvalid("%s: config.json is missing required field %s" %(name,field)) # Field is present, check type else: if not isinstance(config[field], ftype): return notvalid("%s: invalid type, must be %s." %(name,str(ftype))) # Expid gets special treatment if field == "exp_id" and validate_folder is True: if config[field] != name: return notvalid("%s: exp_id parameter %s does not match folder name." %(name,config[field])) # name cannot have special characters, only _ and letters/numbers if not re.match("^[a-z0-9_-]*$", config[field]): message = "%s: exp_id parameter %s has invalid characters" message += "only lowercase [a-z],[0-9], -, and _ allowed." return notvalid(message %(name,config[field])) return True
['def', '_validate_config', '(', 'self', ',', 'folder', ',', 'validate_folder', '=', 'True', ')', ':', 'config', '=', '"%s/config.json"', '%', 'folder', 'name', '=', 'os', '.', 'path', '.', 'basename', '(', 'folder', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'config', ')', ':', 'return', 'notvalid', '(', '"%s: config.json not found."', '%', '(', 'folder', ')', ')', '# Load the config', 'try', ':', 'config', '=', 'read_json', '(', 'config', ')', 'except', ':', 'return', 'notvalid', '(', '"%s: cannot load json, invalid."', '%', '(', 'name', ')', ')', '# Config.json should be single dict', 'if', 'isinstance', '(', 'config', ',', 'list', ')', ':', 'return', 'notvalid', '(', '"%s: config.json is a list, not valid."', '%', '(', 'name', ')', ')', '# Check over required fields', 'fields', '=', 'self', '.', 'get_validation_fields', '(', ')', 'for', 'field', ',', 'value', ',', 'ftype', 'in', 'fields', ':', 'bot', '.', 'verbose', '(', "'field: %s, required: %s'", '%', '(', 'field', ',', 'value', ')', ')', '# Field must be in the keys if required', 'if', 'field', 'not', 'in', 'config', '.', 'keys', '(', ')', ':', 'if', 'value', '==', '1', ':', 'return', 'notvalid', '(', '"%s: config.json is missing required field %s"', '%', '(', 'name', ',', 'field', ')', ')', '# Field is present, check type', 'else', ':', 'if', 'not', 'isinstance', '(', 'config', '[', 'field', ']', ',', 'ftype', ')', ':', 'return', 'notvalid', '(', '"%s: invalid type, must be %s."', '%', '(', 'name', ',', 'str', '(', 'ftype', ')', ')', ')', '# Expid gets special treatment', 'if', 'field', '==', '"exp_id"', 'and', 'validate_folder', 'is', 'True', ':', 'if', 'config', '[', 'field', ']', '!=', 'name', ':', 'return', 'notvalid', '(', '"%s: exp_id parameter %s does not match folder name."', '%', '(', 'name', ',', 'config', '[', 'field', ']', ')', ')', '# name cannot have special characters, only _ and letters/numbers', 'if', 'not', 're', '.', 'match', '(', '"^[a-z0-9_-]*$"', ',', 'config', '[', 'field', ']', ')', ':', 'message', '=', '"%s: exp_id parameter %s has invalid characters"', 'message', '+=', '"only lowercase [a-z],[0-9], -, and _ allowed."', 'return', 'notvalid', '(', 'message', '%', '(', 'name', ',', 'config', '[', 'field', ']', ')', ')', 'return', 'True']
validate config is the primary validation function that checks for presence and format of required fields. Parameters ========== :folder: full path to folder with config.json :name: if provided, the folder name to check against exp_id
['validate', 'config', 'is', 'the', 'primary', 'validation', 'function', 'that', 'checks', 'for', 'presence', 'and', 'format', 'of', 'required', 'fields', '.']
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/validator/experiments.py#L93-L146
5,314
ejeschke/ginga
ginga/BaseImage.py
BaseImage.get_shape_mask
def get_shape_mask(self, shape_obj): """ Return full mask where True marks pixels within the given shape. """ wd, ht = self.get_size() yi = np.mgrid[:ht].reshape(-1, 1) xi = np.mgrid[:wd].reshape(1, -1) pts = np.asarray((xi, yi)).T contains = shape_obj.contains_pts(pts) return contains
python
def get_shape_mask(self, shape_obj): """ Return full mask where True marks pixels within the given shape. """ wd, ht = self.get_size() yi = np.mgrid[:ht].reshape(-1, 1) xi = np.mgrid[:wd].reshape(1, -1) pts = np.asarray((xi, yi)).T contains = shape_obj.contains_pts(pts) return contains
['def', 'get_shape_mask', '(', 'self', ',', 'shape_obj', ')', ':', 'wd', ',', 'ht', '=', 'self', '.', 'get_size', '(', ')', 'yi', '=', 'np', '.', 'mgrid', '[', ':', 'ht', ']', '.', 'reshape', '(', '-', '1', ',', '1', ')', 'xi', '=', 'np', '.', 'mgrid', '[', ':', 'wd', ']', '.', 'reshape', '(', '1', ',', '-', '1', ')', 'pts', '=', 'np', '.', 'asarray', '(', '(', 'xi', ',', 'yi', ')', ')', '.', 'T', 'contains', '=', 'shape_obj', '.', 'contains_pts', '(', 'pts', ')', 'return', 'contains']
Return full mask where True marks pixels within the given shape.
['Return', 'full', 'mask', 'where', 'True', 'marks', 'pixels', 'within', 'the', 'given', 'shape', '.']
train
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/BaseImage.py#L348-L357
5,315
pyviz/geoviews
geoviews/util.py
geom_length
def geom_length(geom): """ Calculates the length of coordinates in a shapely geometry. """ if geom.geom_type == 'Point': return 1 if hasattr(geom, 'exterior'): geom = geom.exterior if not geom.geom_type.startswith('Multi') and hasattr(geom, 'array_interface_base'): return len(geom.array_interface_base['data'])//2 else: length = 0 for g in geom: length += geom_length(g) return length
python
def geom_length(geom): """ Calculates the length of coordinates in a shapely geometry. """ if geom.geom_type == 'Point': return 1 if hasattr(geom, 'exterior'): geom = geom.exterior if not geom.geom_type.startswith('Multi') and hasattr(geom, 'array_interface_base'): return len(geom.array_interface_base['data'])//2 else: length = 0 for g in geom: length += geom_length(g) return length
['def', 'geom_length', '(', 'geom', ')', ':', 'if', 'geom', '.', 'geom_type', '==', "'Point'", ':', 'return', '1', 'if', 'hasattr', '(', 'geom', ',', "'exterior'", ')', ':', 'geom', '=', 'geom', '.', 'exterior', 'if', 'not', 'geom', '.', 'geom_type', '.', 'startswith', '(', "'Multi'", ')', 'and', 'hasattr', '(', 'geom', ',', "'array_interface_base'", ')', ':', 'return', 'len', '(', 'geom', '.', 'array_interface_base', '[', "'data'", ']', ')', '//', '2', 'else', ':', 'length', '=', '0', 'for', 'g', 'in', 'geom', ':', 'length', '+=', 'geom_length', '(', 'g', ')', 'return', 'length']
Calculates the length of coordinates in a shapely geometry.
['Calculates', 'the', 'length', 'of', 'coordinates', 'in', 'a', 'shapely', 'geometry', '.']
train
https://github.com/pyviz/geoviews/blob/cc70ac2d5a96307769bc6192eaef8576c3d24b30/geoviews/util.py#L327-L341
5,316
mitsei/dlkit
dlkit/json_/relationship/sessions.py
FamilyHierarchyDesignSession.remove_root_family
def remove_root_family(self, family_id): """Removes a root family. arg: family_id (osid.id.Id): the ``Id`` of a family raise: NotFound - ``family_id`` not a root raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchyDesignSession.remove_root_bin_template if self._catalog_session is not None: return self._catalog_session.remove_root_catalog(catalog_id=family_id) return self._hierarchy_session.remove_root(id_=family_id)
python
def remove_root_family(self, family_id): """Removes a root family. arg: family_id (osid.id.Id): the ``Id`` of a family raise: NotFound - ``family_id`` not a root raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchyDesignSession.remove_root_bin_template if self._catalog_session is not None: return self._catalog_session.remove_root_catalog(catalog_id=family_id) return self._hierarchy_session.remove_root(id_=family_id)
['def', 'remove_root_family', '(', 'self', ',', 'family_id', ')', ':', '# Implemented from template for', '# osid.resource.BinHierarchyDesignSession.remove_root_bin_template', 'if', 'self', '.', '_catalog_session', 'is', 'not', 'None', ':', 'return', 'self', '.', '_catalog_session', '.', 'remove_root_catalog', '(', 'catalog_id', '=', 'family_id', ')', 'return', 'self', '.', '_hierarchy_session', '.', 'remove_root', '(', 'id_', '=', 'family_id', ')']
Removes a root family. arg: family_id (osid.id.Id): the ``Id`` of a family raise: NotFound - ``family_id`` not a root raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
['Removes', 'a', 'root', 'family', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/relationship/sessions.py#L2475-L2490
5,317
broadinstitute/fiss
firecloud/workspace.py
Workspace.submissions
def submissions(self): """List job submissions in workspace.""" r = fapi.get_submissions(self.namespace, self.name, self.api_url) fapi._check_response_code(r, 200) return r.json()
python
def submissions(self): """List job submissions in workspace.""" r = fapi.get_submissions(self.namespace, self.name, self.api_url) fapi._check_response_code(r, 200) return r.json()
['def', 'submissions', '(', 'self', ')', ':', 'r', '=', 'fapi', '.', 'get_submissions', '(', 'self', '.', 'namespace', ',', 'self', '.', 'name', ',', 'self', '.', 'api_url', ')', 'fapi', '.', '_check_response_code', '(', 'r', ',', '200', ')', 'return', 'r', '.', 'json', '(', ')']
List job submissions in workspace.
['List', 'job', 'submissions', 'in', 'workspace', '.']
train
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/workspace.py#L214-L218
5,318
JNRowe/upoints
upoints/point.py
_dms_formatter
def _dms_formatter(latitude, longitude, mode, unistr=False): """Generate a human readable DM/DMS location string. Args: latitude (float): Location's latitude longitude (float): Location's longitude mode (str): Coordinate formatting system to use unistr (bool): Whether to use extended character set """ if unistr: chars = ('°', '′', '″') else: chars = ('°', "'", '"') latitude_dms = tuple(map(abs, utils.to_dms(latitude, mode))) longitude_dms = tuple(map(abs, utils.to_dms(longitude, mode))) text = [] if mode == 'dms': text.append('%%02i%s%%02i%s%%02i%s' % chars % latitude_dms) else: text.append('%%02i%s%%05.2f%s' % chars[:2] % latitude_dms) text.append('S' if latitude < 0 else 'N') if mode == 'dms': text.append(', %%03i%s%%02i%s%%02i%s' % chars % longitude_dms) else: text.append(', %%03i%s%%05.2f%s' % chars[:2] % longitude_dms) text.append('W' if longitude < 0 else 'E') return text
python
def _dms_formatter(latitude, longitude, mode, unistr=False): """Generate a human readable DM/DMS location string. Args: latitude (float): Location's latitude longitude (float): Location's longitude mode (str): Coordinate formatting system to use unistr (bool): Whether to use extended character set """ if unistr: chars = ('°', '′', '″') else: chars = ('°', "'", '"') latitude_dms = tuple(map(abs, utils.to_dms(latitude, mode))) longitude_dms = tuple(map(abs, utils.to_dms(longitude, mode))) text = [] if mode == 'dms': text.append('%%02i%s%%02i%s%%02i%s' % chars % latitude_dms) else: text.append('%%02i%s%%05.2f%s' % chars[:2] % latitude_dms) text.append('S' if latitude < 0 else 'N') if mode == 'dms': text.append(', %%03i%s%%02i%s%%02i%s' % chars % longitude_dms) else: text.append(', %%03i%s%%05.2f%s' % chars[:2] % longitude_dms) text.append('W' if longitude < 0 else 'E') return text
['def', '_dms_formatter', '(', 'latitude', ',', 'longitude', ',', 'mode', ',', 'unistr', '=', 'False', ')', ':', 'if', 'unistr', ':', 'chars', '=', '(', "'°',", ' ', "′', '", '″', ')', '', 'else', ':', 'chars', '=', '(', "'°',", ' ', '\'",', ' ', '"\')', '', 'latitude_dms', '=', 'tuple', '(', 'map', '(', 'abs', ',', 'utils', '.', 'to_dms', '(', 'latitude', ',', 'mode', ')', ')', ')', 'longitude_dms', '=', 'tuple', '(', 'map', '(', 'abs', ',', 'utils', '.', 'to_dms', '(', 'longitude', ',', 'mode', ')', ')', ')', 'text', '=', '[', ']', 'if', 'mode', '==', "'dms'", ':', 'text', '.', 'append', '(', "'%%02i%s%%02i%s%%02i%s'", '%', 'chars', '%', 'latitude_dms', ')', 'else', ':', 'text', '.', 'append', '(', "'%%02i%s%%05.2f%s'", '%', 'chars', '[', ':', '2', ']', '%', 'latitude_dms', ')', 'text', '.', 'append', '(', "'S'", 'if', 'latitude', '<', '0', 'else', "'N'", ')', 'if', 'mode', '==', "'dms'", ':', 'text', '.', 'append', '(', "', %%03i%s%%02i%s%%02i%s'", '%', 'chars', '%', 'longitude_dms', ')', 'else', ':', 'text', '.', 'append', '(', "', %%03i%s%%05.2f%s'", '%', 'chars', '[', ':', '2', ']', '%', 'longitude_dms', ')', 'text', '.', 'append', '(', "'W'", 'if', 'longitude', '<', '0', 'else', "'E'", ')', 'return', 'text']
Generate a human readable DM/DMS location string. Args: latitude (float): Location's latitude longitude (float): Location's longitude mode (str): Coordinate formatting system to use unistr (bool): Whether to use extended character set
['Generate', 'a', 'human', 'readable', 'DM', '/', 'DMS', 'location', 'string', '.']
train
https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/point.py#L41-L68
5,319
ray-project/ray
python/ray/tune/ray_trial_executor.py
RayTrialExecutor._train
def _train(self, trial): """Start one iteration of training and save remote id.""" assert trial.status == Trial.RUNNING, trial.status remote = trial.runner.train.remote() # Local Mode if isinstance(remote, dict): remote = _LocalWrapper(remote) self._running[remote] = trial
python
def _train(self, trial): """Start one iteration of training and save remote id.""" assert trial.status == Trial.RUNNING, trial.status remote = trial.runner.train.remote() # Local Mode if isinstance(remote, dict): remote = _LocalWrapper(remote) self._running[remote] = trial
['def', '_train', '(', 'self', ',', 'trial', ')', ':', 'assert', 'trial', '.', 'status', '==', 'Trial', '.', 'RUNNING', ',', 'trial', '.', 'status', 'remote', '=', 'trial', '.', 'runner', '.', 'train', '.', 'remote', '(', ')', '# Local Mode', 'if', 'isinstance', '(', 'remote', ',', 'dict', ')', ':', 'remote', '=', '_LocalWrapper', '(', 'remote', ')', 'self', '.', '_running', '[', 'remote', ']', '=', 'trial']
Start one iteration of training and save remote id.
['Start', 'one', 'iteration', 'of', 'training', 'and', 'save', 'remote', 'id', '.']
train
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/ray_trial_executor.py#L107-L117
5,320
zhanglab/psamm
psamm/importer.py
main_bigg
def main_bigg(args=None, urlopen=urlopen): """Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing. """ parser = argparse.ArgumentParser( description='Import from BiGG database') parser.add_argument('--dest', metavar='path', default='.', help='Destination directory (default is ".")') parser.add_argument('--no-exchange', action='store_true', help=('Disable importing exchange reactions as' ' exchange compound file.')) parser.add_argument('--split-subsystem', action='store_true', help='Enable splitting reaction files by subsystem') parser.add_argument('--merge-compounds', action='store_true', help=('Merge identical compounds occuring in various' ' compartments.')) parser.add_argument('--force', action='store_true', help='Enable overwriting model files') parser.add_argument('id', help='BiGG model to import ("list" to see all)') args = parser.parse_args(args) # Set up logging for the command line interface if 'PSAMM_DEBUG' in os.environ: level = getattr(logging, os.environ['PSAMM_DEBUG'].upper(), None) if level is not None: logging.basicConfig(level=level) else: logging.basicConfig( level=logging.INFO, format='%(levelname)s: %(message)s') # Print list of available models if args.id == 'list': print('Available models:') f = urlopen('http://bigg.ucsd.edu/api/v2/models') doc = json.loads(f.read().decode('utf-8')) results = doc['results'] id_width = min(max(len(result['bigg_id']) for result in results), 16) for result in sorted(results, key=lambda x: x.get('organism')): print('{} {}'.format( result.get('bigg_id').ljust(id_width), result.get('organism'))) return 0 importer_entry = None try: importer_entry = next( pkg_resources.iter_entry_points('psamm.importer', 'JSON')) except StopIteration: logger.error('Failed to locate the COBRA JSON model importer!') sys.exit(-1) importer_class = importer_entry.load() importer = importer_class() try: f = urlopen( 'http://bigg.ucsd.edu/api/v2/models/{}/download'.format( url_quote(args.id))) model = importer.import_model(codecs.getreader('utf-8')(f)) except ModelLoadError as e: logger.error('Failed to load model!', exc_info=True) importer.help() parser.error(text_type(e)) except ParseError as e: logger.error('Failed to parse model!', exc_info=True) logger.error(text_type(e)) sys.exit(-1) if args.merge_compounds: compounds_before = len(model.compounds) sbml.merge_equivalent_compounds(model) if len(model.compounds) < compounds_before: logger.info( 'Merged {} compound entries into {} entries by' ' removing duplicates in various compartments'.format( compounds_before, len(model.compounds))) print('Model: {}'.format(model.name)) print('- Biomass reaction: {}'.format(model.biomass_reaction)) print('- Compartments: {}'.format(len(model.compartments))) print('- Compounds: {}'.format(len(model.compounds))) print('- Reactions: {}'.format(len(model.reactions))) print('- Genes: {}'.format(count_genes(model))) # Check if dest directory is empty. If we get an error assume that the # directory does not exist. dest_is_empty = False try: dest_is_empty = len(os.listdir(args.dest)) == 0 except OSError: dest_is_empty = True if not dest_is_empty: if not args.force: logger.error('Destination directory is not empty. Use --force' ' option to proceed anyway, overwriting any existing' ' files in {}'.format(args.dest)) return 1 else: logger.warning('Destination directory is not empty, overwriting' ' existing files in {}'.format(args.dest)) # Create destination directory if not exists dest = args.dest mkdir_p(dest) convert_exchange = not args.no_exchange write_yaml_model(model, dest, convert_exchange=convert_exchange, split_subsystem=args.split_subsystem)
python
def main_bigg(args=None, urlopen=urlopen): """Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing. """ parser = argparse.ArgumentParser( description='Import from BiGG database') parser.add_argument('--dest', metavar='path', default='.', help='Destination directory (default is ".")') parser.add_argument('--no-exchange', action='store_true', help=('Disable importing exchange reactions as' ' exchange compound file.')) parser.add_argument('--split-subsystem', action='store_true', help='Enable splitting reaction files by subsystem') parser.add_argument('--merge-compounds', action='store_true', help=('Merge identical compounds occuring in various' ' compartments.')) parser.add_argument('--force', action='store_true', help='Enable overwriting model files') parser.add_argument('id', help='BiGG model to import ("list" to see all)') args = parser.parse_args(args) # Set up logging for the command line interface if 'PSAMM_DEBUG' in os.environ: level = getattr(logging, os.environ['PSAMM_DEBUG'].upper(), None) if level is not None: logging.basicConfig(level=level) else: logging.basicConfig( level=logging.INFO, format='%(levelname)s: %(message)s') # Print list of available models if args.id == 'list': print('Available models:') f = urlopen('http://bigg.ucsd.edu/api/v2/models') doc = json.loads(f.read().decode('utf-8')) results = doc['results'] id_width = min(max(len(result['bigg_id']) for result in results), 16) for result in sorted(results, key=lambda x: x.get('organism')): print('{} {}'.format( result.get('bigg_id').ljust(id_width), result.get('organism'))) return 0 importer_entry = None try: importer_entry = next( pkg_resources.iter_entry_points('psamm.importer', 'JSON')) except StopIteration: logger.error('Failed to locate the COBRA JSON model importer!') sys.exit(-1) importer_class = importer_entry.load() importer = importer_class() try: f = urlopen( 'http://bigg.ucsd.edu/api/v2/models/{}/download'.format( url_quote(args.id))) model = importer.import_model(codecs.getreader('utf-8')(f)) except ModelLoadError as e: logger.error('Failed to load model!', exc_info=True) importer.help() parser.error(text_type(e)) except ParseError as e: logger.error('Failed to parse model!', exc_info=True) logger.error(text_type(e)) sys.exit(-1) if args.merge_compounds: compounds_before = len(model.compounds) sbml.merge_equivalent_compounds(model) if len(model.compounds) < compounds_before: logger.info( 'Merged {} compound entries into {} entries by' ' removing duplicates in various compartments'.format( compounds_before, len(model.compounds))) print('Model: {}'.format(model.name)) print('- Biomass reaction: {}'.format(model.biomass_reaction)) print('- Compartments: {}'.format(len(model.compartments))) print('- Compounds: {}'.format(len(model.compounds))) print('- Reactions: {}'.format(len(model.reactions))) print('- Genes: {}'.format(count_genes(model))) # Check if dest directory is empty. If we get an error assume that the # directory does not exist. dest_is_empty = False try: dest_is_empty = len(os.listdir(args.dest)) == 0 except OSError: dest_is_empty = True if not dest_is_empty: if not args.force: logger.error('Destination directory is not empty. Use --force' ' option to proceed anyway, overwriting any existing' ' files in {}'.format(args.dest)) return 1 else: logger.warning('Destination directory is not empty, overwriting' ' existing files in {}'.format(args.dest)) # Create destination directory if not exists dest = args.dest mkdir_p(dest) convert_exchange = not args.no_exchange write_yaml_model(model, dest, convert_exchange=convert_exchange, split_subsystem=args.split_subsystem)
['def', 'main_bigg', '(', 'args', '=', 'None', ',', 'urlopen', '=', 'urlopen', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', "'Import from BiGG database'", ')', 'parser', '.', 'add_argument', '(', "'--dest'", ',', 'metavar', '=', "'path'", ',', 'default', '=', "'.'", ',', 'help', '=', '\'Destination directory (default is ".")\'', ')', 'parser', '.', 'add_argument', '(', "'--no-exchange'", ',', 'action', '=', "'store_true'", ',', 'help', '=', '(', "'Disable importing exchange reactions as'", "' exchange compound file.'", ')', ')', 'parser', '.', 'add_argument', '(', "'--split-subsystem'", ',', 'action', '=', "'store_true'", ',', 'help', '=', "'Enable splitting reaction files by subsystem'", ')', 'parser', '.', 'add_argument', '(', "'--merge-compounds'", ',', 'action', '=', "'store_true'", ',', 'help', '=', '(', "'Merge identical compounds occuring in various'", "' compartments.'", ')', ')', 'parser', '.', 'add_argument', '(', "'--force'", ',', 'action', '=', "'store_true'", ',', 'help', '=', "'Enable overwriting model files'", ')', 'parser', '.', 'add_argument', '(', "'id'", ',', 'help', '=', '\'BiGG model to import ("list" to see all)\'', ')', 'args', '=', 'parser', '.', 'parse_args', '(', 'args', ')', '# Set up logging for the command line interface', 'if', "'PSAMM_DEBUG'", 'in', 'os', '.', 'environ', ':', 'level', '=', 'getattr', '(', 'logging', ',', 'os', '.', 'environ', '[', "'PSAMM_DEBUG'", ']', '.', 'upper', '(', ')', ',', 'None', ')', 'if', 'level', 'is', 'not', 'None', ':', 'logging', '.', 'basicConfig', '(', 'level', '=', 'level', ')', 'else', ':', 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'INFO', ',', 'format', '=', "'%(levelname)s: %(message)s'", ')', '# Print list of available models', 'if', 'args', '.', 'id', '==', "'list'", ':', 'print', '(', "'Available models:'", ')', 'f', '=', 'urlopen', '(', "'http://bigg.ucsd.edu/api/v2/models'", ')', 'doc', '=', 'json', '.', 'loads', '(', 'f', '.', 'read', '(', ')', '.', 'decode', '(', "'utf-8'", ')', ')', 'results', '=', 'doc', '[', "'results'", ']', 'id_width', '=', 'min', '(', 'max', '(', 'len', '(', 'result', '[', "'bigg_id'", ']', ')', 'for', 'result', 'in', 'results', ')', ',', '16', ')', 'for', 'result', 'in', 'sorted', '(', 'results', ',', 'key', '=', 'lambda', 'x', ':', 'x', '.', 'get', '(', "'organism'", ')', ')', ':', 'print', '(', "'{} {}'", '.', 'format', '(', 'result', '.', 'get', '(', "'bigg_id'", ')', '.', 'ljust', '(', 'id_width', ')', ',', 'result', '.', 'get', '(', "'organism'", ')', ')', ')', 'return', '0', 'importer_entry', '=', 'None', 'try', ':', 'importer_entry', '=', 'next', '(', 'pkg_resources', '.', 'iter_entry_points', '(', "'psamm.importer'", ',', "'JSON'", ')', ')', 'except', 'StopIteration', ':', 'logger', '.', 'error', '(', "'Failed to locate the COBRA JSON model importer!'", ')', 'sys', '.', 'exit', '(', '-', '1', ')', 'importer_class', '=', 'importer_entry', '.', 'load', '(', ')', 'importer', '=', 'importer_class', '(', ')', 'try', ':', 'f', '=', 'urlopen', '(', "'http://bigg.ucsd.edu/api/v2/models/{}/download'", '.', 'format', '(', 'url_quote', '(', 'args', '.', 'id', ')', ')', ')', 'model', '=', 'importer', '.', 'import_model', '(', 'codecs', '.', 'getreader', '(', "'utf-8'", ')', '(', 'f', ')', ')', 'except', 'ModelLoadError', 'as', 'e', ':', 'logger', '.', 'error', '(', "'Failed to load model!'", ',', 'exc_info', '=', 'True', ')', 'importer', '.', 'help', '(', ')', 'parser', '.', 'error', '(', 'text_type', '(', 'e', ')', ')', 'except', 'ParseError', 'as', 'e', ':', 'logger', '.', 'error', '(', "'Failed to parse model!'", ',', 'exc_info', '=', 'True', ')', 'logger', '.', 'error', '(', 'text_type', '(', 'e', ')', ')', 'sys', '.', 'exit', '(', '-', '1', ')', 'if', 'args', '.', 'merge_compounds', ':', 'compounds_before', '=', 'len', '(', 'model', '.', 'compounds', ')', 'sbml', '.', 'merge_equivalent_compounds', '(', 'model', ')', 'if', 'len', '(', 'model', '.', 'compounds', ')', '<', 'compounds_before', ':', 'logger', '.', 'info', '(', "'Merged {} compound entries into {} entries by'", "' removing duplicates in various compartments'", '.', 'format', '(', 'compounds_before', ',', 'len', '(', 'model', '.', 'compounds', ')', ')', ')', 'print', '(', "'Model: {}'", '.', 'format', '(', 'model', '.', 'name', ')', ')', 'print', '(', "'- Biomass reaction: {}'", '.', 'format', '(', 'model', '.', 'biomass_reaction', ')', ')', 'print', '(', "'- Compartments: {}'", '.', 'format', '(', 'len', '(', 'model', '.', 'compartments', ')', ')', ')', 'print', '(', "'- Compounds: {}'", '.', 'format', '(', 'len', '(', 'model', '.', 'compounds', ')', ')', ')', 'print', '(', "'- Reactions: {}'", '.', 'format', '(', 'len', '(', 'model', '.', 'reactions', ')', ')', ')', 'print', '(', "'- Genes: {}'", '.', 'format', '(', 'count_genes', '(', 'model', ')', ')', ')', '# Check if dest directory is empty. If we get an error assume that the', '# directory does not exist.', 'dest_is_empty', '=', 'False', 'try', ':', 'dest_is_empty', '=', 'len', '(', 'os', '.', 'listdir', '(', 'args', '.', 'dest', ')', ')', '==', '0', 'except', 'OSError', ':', 'dest_is_empty', '=', 'True', 'if', 'not', 'dest_is_empty', ':', 'if', 'not', 'args', '.', 'force', ':', 'logger', '.', 'error', '(', "'Destination directory is not empty. Use --force'", "' option to proceed anyway, overwriting any existing'", "' files in {}'", '.', 'format', '(', 'args', '.', 'dest', ')', ')', 'return', '1', 'else', ':', 'logger', '.', 'warning', '(', "'Destination directory is not empty, overwriting'", "' existing files in {}'", '.', 'format', '(', 'args', '.', 'dest', ')', ')', '# Create destination directory if not exists', 'dest', '=', 'args', '.', 'dest', 'mkdir_p', '(', 'dest', ')', 'convert_exchange', '=', 'not', 'args', '.', 'no_exchange', 'write_yaml_model', '(', 'model', ',', 'dest', ',', 'convert_exchange', '=', 'convert_exchange', ',', 'split_subsystem', '=', 'args', '.', 'split_subsystem', ')']
Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing.
['Entry', 'point', 'for', 'BiGG', 'import', 'program', '.']
train
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/importer.py#L703-L813
5,321
madprime/cgivar2gvcf
cgivar2gvcf/__init__.py
convert_to_file
def convert_to_file(cgi_input, output_file, twobit_ref, twobit_name, var_only=False): """Convert a CGI var file and output VCF-formatted data to file""" if isinstance(output_file, str): output_file = auto_zip_open(output_file, 'w') conversion = convert(cgi_input=cgi_input, twobit_ref=twobit_ref, twobit_name=twobit_name, var_only=var_only) for line in conversion: output_file.write(line + "\n") output_file.close()
python
def convert_to_file(cgi_input, output_file, twobit_ref, twobit_name, var_only=False): """Convert a CGI var file and output VCF-formatted data to file""" if isinstance(output_file, str): output_file = auto_zip_open(output_file, 'w') conversion = convert(cgi_input=cgi_input, twobit_ref=twobit_ref, twobit_name=twobit_name, var_only=var_only) for line in conversion: output_file.write(line + "\n") output_file.close()
['def', 'convert_to_file', '(', 'cgi_input', ',', 'output_file', ',', 'twobit_ref', ',', 'twobit_name', ',', 'var_only', '=', 'False', ')', ':', 'if', 'isinstance', '(', 'output_file', ',', 'str', ')', ':', 'output_file', '=', 'auto_zip_open', '(', 'output_file', ',', "'w'", ')', 'conversion', '=', 'convert', '(', 'cgi_input', '=', 'cgi_input', ',', 'twobit_ref', '=', 'twobit_ref', ',', 'twobit_name', '=', 'twobit_name', ',', 'var_only', '=', 'var_only', ')', 'for', 'line', 'in', 'conversion', ':', 'output_file', '.', 'write', '(', 'line', '+', '"\\n"', ')', 'output_file', '.', 'close', '(', ')']
Convert a CGI var file and output VCF-formatted data to file
['Convert', 'a', 'CGI', 'var', 'file', 'and', 'output', 'VCF', '-', 'formatted', 'data', 'to', 'file']
train
https://github.com/madprime/cgivar2gvcf/blob/13b4cd8da08669f7e4b0ceed77a7a17082f91037/cgivar2gvcf/__init__.py#L443-L452
5,322
s1s1ty/py-jsonq
pyjsonq/matcher.py
Matcher._match
def _match(self, x, op, y): """Compare the given `x` and `y` based on `op` :@param x, y, op :@type x, y: mixed :@type op: string :@return bool :@throws ValueError """ if (op not in self.condition_mapper): raise ValueError('Invalid where condition given') func = getattr(self, self.condition_mapper.get(op)) return func(x, y)
python
def _match(self, x, op, y): """Compare the given `x` and `y` based on `op` :@param x, y, op :@type x, y: mixed :@type op: string :@return bool :@throws ValueError """ if (op not in self.condition_mapper): raise ValueError('Invalid where condition given') func = getattr(self, self.condition_mapper.get(op)) return func(x, y)
['def', '_match', '(', 'self', ',', 'x', ',', 'op', ',', 'y', ')', ':', 'if', '(', 'op', 'not', 'in', 'self', '.', 'condition_mapper', ')', ':', 'raise', 'ValueError', '(', "'Invalid where condition given'", ')', 'func', '=', 'getattr', '(', 'self', ',', 'self', '.', 'condition_mapper', '.', 'get', '(', 'op', ')', ')', 'return', 'func', '(', 'x', ',', 'y', ')']
Compare the given `x` and `y` based on `op` :@param x, y, op :@type x, y: mixed :@type op: string :@return bool :@throws ValueError
['Compare', 'the', 'given', 'x', 'and', 'y', 'based', 'on', 'op']
train
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/matcher.py#L162-L176
5,323
pantsbuild/pants
src/python/pants/backend/jvm/tasks/jvm_platform_analysis.py
JvmPlatformAnalysisMixin._unfiltered_jvm_dependency_map
def _unfiltered_jvm_dependency_map(self, fully_transitive=False): """Jvm dependency map without filtering out non-JvmTarget keys, exposed for testing. Unfiltered because the keys in the resulting map include non-JvmTargets. See the explanation in the jvm_dependency_map() docs for what this method produces. :param fully_transitive: if true, the elements of the map will be the full set of transitive JvmTarget dependencies, not just the "direct" ones. (see jvm_dependency_map for the definition of "direct") :return: map of target -> set of JvmTarget "direct" dependencies. """ targets = self.jvm_targets jvm_deps = defaultdict(set) def accumulate_jvm_deps(target): for dep in target.dependencies: if self._is_jvm_target(dep): jvm_deps[target].add(dep) if not fully_transitive: continue # If 'dep' isn't in jvm_deps, that means that it isn't in the `targets` list at all # (since this is a post-order traversal). If it's not in the targets list at all, # that means it cannot have any JvmTargets as transitive dependencies. In which case # we don't care about it, so it's fine that the line below is a no-op. # # Otherwise, we add in any transitive dependencies that were previously collected. jvm_deps[target].update(jvm_deps[dep]) # Vanilla DFS runs in O(|V|+|E|), and the code inside the loop in accumulate_jvm_deps ends up # being run once for each in the graph over the course of the entire search, which means that # the total asymptotic runtime complexity is O(|V|+2|E|), which is still O(|V|+|E|). self.context.build_graph.walk_transitive_dependency_graph( addresses=[t.address for t in targets], work=accumulate_jvm_deps, postorder=True ) return jvm_deps
python
def _unfiltered_jvm_dependency_map(self, fully_transitive=False): """Jvm dependency map without filtering out non-JvmTarget keys, exposed for testing. Unfiltered because the keys in the resulting map include non-JvmTargets. See the explanation in the jvm_dependency_map() docs for what this method produces. :param fully_transitive: if true, the elements of the map will be the full set of transitive JvmTarget dependencies, not just the "direct" ones. (see jvm_dependency_map for the definition of "direct") :return: map of target -> set of JvmTarget "direct" dependencies. """ targets = self.jvm_targets jvm_deps = defaultdict(set) def accumulate_jvm_deps(target): for dep in target.dependencies: if self._is_jvm_target(dep): jvm_deps[target].add(dep) if not fully_transitive: continue # If 'dep' isn't in jvm_deps, that means that it isn't in the `targets` list at all # (since this is a post-order traversal). If it's not in the targets list at all, # that means it cannot have any JvmTargets as transitive dependencies. In which case # we don't care about it, so it's fine that the line below is a no-op. # # Otherwise, we add in any transitive dependencies that were previously collected. jvm_deps[target].update(jvm_deps[dep]) # Vanilla DFS runs in O(|V|+|E|), and the code inside the loop in accumulate_jvm_deps ends up # being run once for each in the graph over the course of the entire search, which means that # the total asymptotic runtime complexity is O(|V|+2|E|), which is still O(|V|+|E|). self.context.build_graph.walk_transitive_dependency_graph( addresses=[t.address for t in targets], work=accumulate_jvm_deps, postorder=True ) return jvm_deps
['def', '_unfiltered_jvm_dependency_map', '(', 'self', ',', 'fully_transitive', '=', 'False', ')', ':', 'targets', '=', 'self', '.', 'jvm_targets', 'jvm_deps', '=', 'defaultdict', '(', 'set', ')', 'def', 'accumulate_jvm_deps', '(', 'target', ')', ':', 'for', 'dep', 'in', 'target', '.', 'dependencies', ':', 'if', 'self', '.', '_is_jvm_target', '(', 'dep', ')', ':', 'jvm_deps', '[', 'target', ']', '.', 'add', '(', 'dep', ')', 'if', 'not', 'fully_transitive', ':', 'continue', "# If 'dep' isn't in jvm_deps, that means that it isn't in the `targets` list at all", "# (since this is a post-order traversal). If it's not in the targets list at all,", '# that means it cannot have any JvmTargets as transitive dependencies. In which case', "# we don't care about it, so it's fine that the line below is a no-op.", '#', '# Otherwise, we add in any transitive dependencies that were previously collected.', 'jvm_deps', '[', 'target', ']', '.', 'update', '(', 'jvm_deps', '[', 'dep', ']', ')', '# Vanilla DFS runs in O(|V|+|E|), and the code inside the loop in accumulate_jvm_deps ends up', '# being run once for each in the graph over the course of the entire search, which means that', '# the total asymptotic runtime complexity is O(|V|+2|E|), which is still O(|V|+|E|).', 'self', '.', 'context', '.', 'build_graph', '.', 'walk_transitive_dependency_graph', '(', 'addresses', '=', '[', 't', '.', 'address', 'for', 't', 'in', 'targets', ']', ',', 'work', '=', 'accumulate_jvm_deps', ',', 'postorder', '=', 'True', ')', 'return', 'jvm_deps']
Jvm dependency map without filtering out non-JvmTarget keys, exposed for testing. Unfiltered because the keys in the resulting map include non-JvmTargets. See the explanation in the jvm_dependency_map() docs for what this method produces. :param fully_transitive: if true, the elements of the map will be the full set of transitive JvmTarget dependencies, not just the "direct" ones. (see jvm_dependency_map for the definition of "direct") :return: map of target -> set of JvmTarget "direct" dependencies.
['Jvm', 'dependency', 'map', 'without', 'filtering', 'out', 'non', '-', 'JvmTarget', 'keys', 'exposed', 'for', 'testing', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/jvm_platform_analysis.py#L39-L77
5,324
sorgerlab/indra
indra/sources/eidos/processor.py
ref_context_from_geoloc
def ref_context_from_geoloc(geoloc): """Return a RefContext object given a geoloc entry.""" text = geoloc.get('text') geoid = geoloc.get('geoID') rc = RefContext(name=text, db_refs={'GEOID': geoid}) return rc
python
def ref_context_from_geoloc(geoloc): """Return a RefContext object given a geoloc entry.""" text = geoloc.get('text') geoid = geoloc.get('geoID') rc = RefContext(name=text, db_refs={'GEOID': geoid}) return rc
['def', 'ref_context_from_geoloc', '(', 'geoloc', ')', ':', 'text', '=', 'geoloc', '.', 'get', '(', "'text'", ')', 'geoid', '=', 'geoloc', '.', 'get', '(', "'geoID'", ')', 'rc', '=', 'RefContext', '(', 'name', '=', 'text', ',', 'db_refs', '=', '{', "'GEOID'", ':', 'geoid', '}', ')', 'return', 'rc']
Return a RefContext object given a geoloc entry.
['Return', 'a', 'RefContext', 'object', 'given', 'a', 'geoloc', 'entry', '.']
train
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L401-L406
5,325
erdewit/ib_insync
ib_insync/ib.py
IB.accountSummary
def accountSummary(self, account: str = '') -> List[AccountValue]: """ List of account values for the given account, or of all accounts if account is left blank. This method is blocking on first run, non-blocking after that. Args: account: If specified, filter for this account name. """ if not self.wrapper.acctSummary: # loaded on demand since it takes ca. 250 ms self.reqAccountSummary() if account: return [v for v in self.wrapper.acctSummary.values() if v.account == account] else: return list(self.wrapper.acctSummary.values())
python
def accountSummary(self, account: str = '') -> List[AccountValue]: """ List of account values for the given account, or of all accounts if account is left blank. This method is blocking on first run, non-blocking after that. Args: account: If specified, filter for this account name. """ if not self.wrapper.acctSummary: # loaded on demand since it takes ca. 250 ms self.reqAccountSummary() if account: return [v for v in self.wrapper.acctSummary.values() if v.account == account] else: return list(self.wrapper.acctSummary.values())
['def', 'accountSummary', '(', 'self', ',', 'account', ':', 'str', '=', "''", ')', '->', 'List', '[', 'AccountValue', ']', ':', 'if', 'not', 'self', '.', 'wrapper', '.', 'acctSummary', ':', '# loaded on demand since it takes ca. 250 ms', 'self', '.', 'reqAccountSummary', '(', ')', 'if', 'account', ':', 'return', '[', 'v', 'for', 'v', 'in', 'self', '.', 'wrapper', '.', 'acctSummary', '.', 'values', '(', ')', 'if', 'v', '.', 'account', '==', 'account', ']', 'else', ':', 'return', 'list', '(', 'self', '.', 'wrapper', '.', 'acctSummary', '.', 'values', '(', ')', ')']
List of account values for the given account, or of all accounts if account is left blank. This method is blocking on first run, non-blocking after that. Args: account: If specified, filter for this account name.
['List', 'of', 'account', 'values', 'for', 'the', 'given', 'account', 'or', 'of', 'all', 'accounts', 'if', 'account', 'is', 'left', 'blank', '.']
train
https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/ib.py#L343-L360
5,326
mdickinson/bigfloat
bigfloat/core.py
agm
def agm(x, y, context=None): """ Return the arithmetic geometric mean of x and y. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_agm, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
python
def agm(x, y, context=None): """ Return the arithmetic geometric mean of x and y. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_agm, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
['def', 'agm', '(', 'x', ',', 'y', ',', 'context', '=', 'None', ')', ':', 'return', '_apply_function_in_current_context', '(', 'BigFloat', ',', 'mpfr', '.', 'mpfr_agm', ',', '(', 'BigFloat', '.', '_implicit_convert', '(', 'x', ')', ',', 'BigFloat', '.', '_implicit_convert', '(', 'y', ')', ',', ')', ',', 'context', ',', ')']
Return the arithmetic geometric mean of x and y.
['Return', 'the', 'arithmetic', 'geometric', 'mean', 'of', 'x', 'and', 'y', '.']
train
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L2210-L2223
5,327
PSPC-SPAC-buyandsell/von_anchor
von_anchor/anchor/demo.py
OrgHubAnchor.close
async def close(self) -> None: """ Explicit exit. If so configured, populate cache to prove for any creds on schemata, cred defs, and rev regs marked of interest in configuration at initialization, archive cache, and purge prior cache archives. :return: current object """ LOGGER.debug('OrgHubAnchor.close >>>') archive_caches = False if self.config.get('archive-holder-prover-caches-on-close', False): archive_caches = True await self.load_cache_for_proof(False) if self.config.get('archive-verifier-caches-on-close', {}): archive_caches = True await self.load_cache_for_verification(False) if archive_caches: ArchivableCaches.archive(self.dir_cache) ArchivableCaches.purge_archives(self.dir_cache, True) # Do not close wallet independently: allow for sharing open wallet over many anchor lifetimes # await self.wallet.close() #1.7.8 # Do not close pool independently: let relying party decide when to go on-line and off-line for path_rr_id in Tails.links(self._dir_tails): rr_id = basename(path_rr_id) try: await HolderProver._sync_revoc_for_proof(self, rr_id) except ClosedPool: LOGGER.warning('OrgHubAnchor sync-revoc on close required ledger for %s but pool was closed', rr_id) LOGGER.debug('OrgHubAnchor.close <<<')
python
async def close(self) -> None: """ Explicit exit. If so configured, populate cache to prove for any creds on schemata, cred defs, and rev regs marked of interest in configuration at initialization, archive cache, and purge prior cache archives. :return: current object """ LOGGER.debug('OrgHubAnchor.close >>>') archive_caches = False if self.config.get('archive-holder-prover-caches-on-close', False): archive_caches = True await self.load_cache_for_proof(False) if self.config.get('archive-verifier-caches-on-close', {}): archive_caches = True await self.load_cache_for_verification(False) if archive_caches: ArchivableCaches.archive(self.dir_cache) ArchivableCaches.purge_archives(self.dir_cache, True) # Do not close wallet independently: allow for sharing open wallet over many anchor lifetimes # await self.wallet.close() #1.7.8 # Do not close pool independently: let relying party decide when to go on-line and off-line for path_rr_id in Tails.links(self._dir_tails): rr_id = basename(path_rr_id) try: await HolderProver._sync_revoc_for_proof(self, rr_id) except ClosedPool: LOGGER.warning('OrgHubAnchor sync-revoc on close required ledger for %s but pool was closed', rr_id) LOGGER.debug('OrgHubAnchor.close <<<')
['async', 'def', 'close', '(', 'self', ')', '->', 'None', ':', 'LOGGER', '.', 'debug', '(', "'OrgHubAnchor.close >>>'", ')', 'archive_caches', '=', 'False', 'if', 'self', '.', 'config', '.', 'get', '(', "'archive-holder-prover-caches-on-close'", ',', 'False', ')', ':', 'archive_caches', '=', 'True', 'await', 'self', '.', 'load_cache_for_proof', '(', 'False', ')', 'if', 'self', '.', 'config', '.', 'get', '(', "'archive-verifier-caches-on-close'", ',', '{', '}', ')', ':', 'archive_caches', '=', 'True', 'await', 'self', '.', 'load_cache_for_verification', '(', 'False', ')', 'if', 'archive_caches', ':', 'ArchivableCaches', '.', 'archive', '(', 'self', '.', 'dir_cache', ')', 'ArchivableCaches', '.', 'purge_archives', '(', 'self', '.', 'dir_cache', ',', 'True', ')', '# Do not close wallet independently: allow for sharing open wallet over many anchor lifetimes', '# await self.wallet.close() #1.7.8', '# Do not close pool independently: let relying party decide when to go on-line and off-line', 'for', 'path_rr_id', 'in', 'Tails', '.', 'links', '(', 'self', '.', '_dir_tails', ')', ':', 'rr_id', '=', 'basename', '(', 'path_rr_id', ')', 'try', ':', 'await', 'HolderProver', '.', '_sync_revoc_for_proof', '(', 'self', ',', 'rr_id', ')', 'except', 'ClosedPool', ':', 'LOGGER', '.', 'warning', '(', "'OrgHubAnchor sync-revoc on close required ledger for %s but pool was closed'", ',', 'rr_id', ')', 'LOGGER', '.', 'debug', '(', "'OrgHubAnchor.close <<<'", ')']
Explicit exit. If so configured, populate cache to prove for any creds on schemata, cred defs, and rev regs marked of interest in configuration at initialization, archive cache, and purge prior cache archives. :return: current object
['Explicit', 'exit', '.', 'If', 'so', 'configured', 'populate', 'cache', 'to', 'prove', 'for', 'any', 'creds', 'on', 'schemata', 'cred', 'defs', 'and', 'rev', 'regs', 'marked', 'of', 'interest', 'in', 'configuration', 'at', 'initialization', 'archive', 'cache', 'and', 'purge', 'prior', 'cache', 'archives', '.']
train
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/demo.py#L149-L182
5,328
saltstack/salt
salt/modules/ssh.py
get_known_host_entries
def get_known_host_entries(user, hostname, config=None, port=None, fingerprint_hash_type=None): ''' .. versionadded:: 2018.3.0 Return information about known host entries from the configfile, if any. If there are no entries for a matching hostname, return None. CLI Example: .. code-block:: bash salt '*' ssh.get_known_host_entries <user> <hostname> ''' full = _get_known_hosts_file(config=config, user=user) if isinstance(full, dict): return full ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port) cmd = ['ssh-keygen', '-F', ssh_hostname, '-f', full] lines = __salt__['cmd.run'](cmd, ignore_retcode=True, python_shell=False).splitlines() known_host_entries = list( _parse_openssh_output(lines, fingerprint_hash_type=fingerprint_hash_type) ) return known_host_entries if known_host_entries else None
python
def get_known_host_entries(user, hostname, config=None, port=None, fingerprint_hash_type=None): ''' .. versionadded:: 2018.3.0 Return information about known host entries from the configfile, if any. If there are no entries for a matching hostname, return None. CLI Example: .. code-block:: bash salt '*' ssh.get_known_host_entries <user> <hostname> ''' full = _get_known_hosts_file(config=config, user=user) if isinstance(full, dict): return full ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port) cmd = ['ssh-keygen', '-F', ssh_hostname, '-f', full] lines = __salt__['cmd.run'](cmd, ignore_retcode=True, python_shell=False).splitlines() known_host_entries = list( _parse_openssh_output(lines, fingerprint_hash_type=fingerprint_hash_type) ) return known_host_entries if known_host_entries else None
['def', 'get_known_host_entries', '(', 'user', ',', 'hostname', ',', 'config', '=', 'None', ',', 'port', '=', 'None', ',', 'fingerprint_hash_type', '=', 'None', ')', ':', 'full', '=', '_get_known_hosts_file', '(', 'config', '=', 'config', ',', 'user', '=', 'user', ')', 'if', 'isinstance', '(', 'full', ',', 'dict', ')', ':', 'return', 'full', 'ssh_hostname', '=', '_hostname_and_port_to_ssh_hostname', '(', 'hostname', ',', 'port', ')', 'cmd', '=', '[', "'ssh-keygen'", ',', "'-F'", ',', 'ssh_hostname', ',', "'-f'", ',', 'full', ']', 'lines', '=', '__salt__', '[', "'cmd.run'", ']', '(', 'cmd', ',', 'ignore_retcode', '=', 'True', ',', 'python_shell', '=', 'False', ')', '.', 'splitlines', '(', ')', 'known_host_entries', '=', 'list', '(', '_parse_openssh_output', '(', 'lines', ',', 'fingerprint_hash_type', '=', 'fingerprint_hash_type', ')', ')', 'return', 'known_host_entries', 'if', 'known_host_entries', 'else', 'None']
.. versionadded:: 2018.3.0 Return information about known host entries from the configfile, if any. If there are no entries for a matching hostname, return None. CLI Example: .. code-block:: bash salt '*' ssh.get_known_host_entries <user> <hostname>
['..', 'versionadded', '::', '2018', '.', '3', '.', '0']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ssh.py#L842-L873
5,329
10gen/mongo-orchestration
mongo_orchestration/apps/__init__.py
setup_versioned_routes
def setup_versioned_routes(routes, version=None): """Set up routes with a version prefix.""" prefix = '/' + version if version else "" for r in routes: path, method = r route(prefix + path, method, routes[r])
python
def setup_versioned_routes(routes, version=None): """Set up routes with a version prefix.""" prefix = '/' + version if version else "" for r in routes: path, method = r route(prefix + path, method, routes[r])
['def', 'setup_versioned_routes', '(', 'routes', ',', 'version', '=', 'None', ')', ':', 'prefix', '=', "'/'", '+', 'version', 'if', 'version', 'else', '""', 'for', 'r', 'in', 'routes', ':', 'path', ',', 'method', '=', 'r', 'route', '(', 'prefix', '+', 'path', ',', 'method', ',', 'routes', '[', 'r', ']', ')']
Set up routes with a version prefix.
['Set', 'up', 'routes', 'with', 'a', 'version', 'prefix', '.']
train
https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/apps/__init__.py#L39-L44
5,330
derpferd/little-python
littlepython/parser.py
Parser.control
def control(self): """ control : 'if' ctrl_exp block ('elif' ctrl_exp block)* ('else' block) """ self.eat(TokenTypes.IF) ctrl = self.expression() block = self.block() ifs = [If(ctrl, block)] else_block = Block() while self.cur_token.type == TokenTypes.ELIF: self.eat(TokenTypes.ELIF) ctrl = self.expression() block = self.block() ifs.append(If(ctrl, block)) if self.cur_token.type == TokenTypes.ELSE: self.eat(TokenTypes.ELSE) else_block = self.block() return ControlBlock(ifs, else_block)
python
def control(self): """ control : 'if' ctrl_exp block ('elif' ctrl_exp block)* ('else' block) """ self.eat(TokenTypes.IF) ctrl = self.expression() block = self.block() ifs = [If(ctrl, block)] else_block = Block() while self.cur_token.type == TokenTypes.ELIF: self.eat(TokenTypes.ELIF) ctrl = self.expression() block = self.block() ifs.append(If(ctrl, block)) if self.cur_token.type == TokenTypes.ELSE: self.eat(TokenTypes.ELSE) else_block = self.block() return ControlBlock(ifs, else_block)
['def', 'control', '(', 'self', ')', ':', 'self', '.', 'eat', '(', 'TokenTypes', '.', 'IF', ')', 'ctrl', '=', 'self', '.', 'expression', '(', ')', 'block', '=', 'self', '.', 'block', '(', ')', 'ifs', '=', '[', 'If', '(', 'ctrl', ',', 'block', ')', ']', 'else_block', '=', 'Block', '(', ')', 'while', 'self', '.', 'cur_token', '.', 'type', '==', 'TokenTypes', '.', 'ELIF', ':', 'self', '.', 'eat', '(', 'TokenTypes', '.', 'ELIF', ')', 'ctrl', '=', 'self', '.', 'expression', '(', ')', 'block', '=', 'self', '.', 'block', '(', ')', 'ifs', '.', 'append', '(', 'If', '(', 'ctrl', ',', 'block', ')', ')', 'if', 'self', '.', 'cur_token', '.', 'type', '==', 'TokenTypes', '.', 'ELSE', ':', 'self', '.', 'eat', '(', 'TokenTypes', '.', 'ELSE', ')', 'else_block', '=', 'self', '.', 'block', '(', ')', 'return', 'ControlBlock', '(', 'ifs', ',', 'else_block', ')']
control : 'if' ctrl_exp block ('elif' ctrl_exp block)* ('else' block)
['control', ':', 'if', 'ctrl_exp', 'block', '(', 'elif', 'ctrl_exp', 'block', ')', '*', '(', 'else', 'block', ')']
train
https://github.com/derpferd/little-python/blob/3f89c74cffb6532c12c5b40843bd8ff8605638ba/littlepython/parser.py#L99-L116
5,331
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/output_writers.py
GoogleCloudStorageConsistentOutputWriter._try_to_clean_garbage
def _try_to_clean_garbage(self, writer_spec, exclude_list=()): """Tries to remove any files created by this shard that aren't needed. Args: writer_spec: writer_spec for the MR. exclude_list: A list of filenames (strings) that should not be removed. """ # Try to remove garbage (if any). Note that listbucket is not strongly # consistent so something might survive. tmpl = string.Template(self._TMPFILE_PREFIX) prefix = tmpl.substitute( id=self.status.mapreduce_id, shard=self.status.shard) bucket = self._get_tmp_gcs_bucket(writer_spec) account_id = self._get_tmp_account_id(writer_spec) for f in cloudstorage.listbucket("/%s/%s" % (bucket, prefix), _account_id=account_id): if f.filename not in exclude_list: self._remove_tmpfile(f.filename, self.status.writer_spec)
python
def _try_to_clean_garbage(self, writer_spec, exclude_list=()): """Tries to remove any files created by this shard that aren't needed. Args: writer_spec: writer_spec for the MR. exclude_list: A list of filenames (strings) that should not be removed. """ # Try to remove garbage (if any). Note that listbucket is not strongly # consistent so something might survive. tmpl = string.Template(self._TMPFILE_PREFIX) prefix = tmpl.substitute( id=self.status.mapreduce_id, shard=self.status.shard) bucket = self._get_tmp_gcs_bucket(writer_spec) account_id = self._get_tmp_account_id(writer_spec) for f in cloudstorage.listbucket("/%s/%s" % (bucket, prefix), _account_id=account_id): if f.filename not in exclude_list: self._remove_tmpfile(f.filename, self.status.writer_spec)
['def', '_try_to_clean_garbage', '(', 'self', ',', 'writer_spec', ',', 'exclude_list', '=', '(', ')', ')', ':', '# Try to remove garbage (if any). Note that listbucket is not strongly', '# consistent so something might survive.', 'tmpl', '=', 'string', '.', 'Template', '(', 'self', '.', '_TMPFILE_PREFIX', ')', 'prefix', '=', 'tmpl', '.', 'substitute', '(', 'id', '=', 'self', '.', 'status', '.', 'mapreduce_id', ',', 'shard', '=', 'self', '.', 'status', '.', 'shard', ')', 'bucket', '=', 'self', '.', '_get_tmp_gcs_bucket', '(', 'writer_spec', ')', 'account_id', '=', 'self', '.', '_get_tmp_account_id', '(', 'writer_spec', ')', 'for', 'f', 'in', 'cloudstorage', '.', 'listbucket', '(', '"/%s/%s"', '%', '(', 'bucket', ',', 'prefix', ')', ',', '_account_id', '=', 'account_id', ')', ':', 'if', 'f', '.', 'filename', 'not', 'in', 'exclude_list', ':', 'self', '.', '_remove_tmpfile', '(', 'f', '.', 'filename', ',', 'self', '.', 'status', '.', 'writer_spec', ')']
Tries to remove any files created by this shard that aren't needed. Args: writer_spec: writer_spec for the MR. exclude_list: A list of filenames (strings) that should not be removed.
['Tries', 'to', 'remove', 'any', 'files', 'created', 'by', 'this', 'shard', 'that', 'aren', 't', 'needed', '.']
train
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/output_writers.py#L1014-L1032
5,332
inveniosoftware-attic/invenio-utils
invenio_utils/text.py
remove_line_breaks
def remove_line_breaks(text): """Remove line breaks from input. Including unicode 'line separator', 'paragraph separator', and 'next line' characters. """ return unicode(text, 'utf-8').replace('\f', '').replace('\n', '') \ .replace('\r', '').replace(u'\xe2\x80\xa8', '') \ .replace(u'\xe2\x80\xa9', '').replace(u'\xc2\x85', '') \ .encode('utf-8')
python
def remove_line_breaks(text): """Remove line breaks from input. Including unicode 'line separator', 'paragraph separator', and 'next line' characters. """ return unicode(text, 'utf-8').replace('\f', '').replace('\n', '') \ .replace('\r', '').replace(u'\xe2\x80\xa8', '') \ .replace(u'\xe2\x80\xa9', '').replace(u'\xc2\x85', '') \ .encode('utf-8')
['def', 'remove_line_breaks', '(', 'text', ')', ':', 'return', 'unicode', '(', 'text', ',', "'utf-8'", ')', '.', 'replace', '(', "'\\f'", ',', "''", ')', '.', 'replace', '(', "'\\n'", ',', "''", ')', '.', 'replace', '(', "'\\r'", ',', "''", ')', '.', 'replace', '(', "u'\\xe2\\x80\\xa8'", ',', "''", ')', '.', 'replace', '(', "u'\\xe2\\x80\\xa9'", ',', "''", ')', '.', 'replace', '(', "u'\\xc2\\x85'", ',', "''", ')', '.', 'encode', '(', "'utf-8'", ')']
Remove line breaks from input. Including unicode 'line separator', 'paragraph separator', and 'next line' characters.
['Remove', 'line', 'breaks', 'from', 'input', '.']
train
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/text.py#L486-L495
5,333
ga4gh/ga4gh-server
ga4gh/server/datamodel/rna_quantification.py
SqliteRnaQuantificationSet.populateFromFile
def populateFromFile(self, dataUrl): """ Populates the instance variables of this RnaQuantificationSet from the specified data URL. """ self._dbFilePath = dataUrl self._db = SqliteRnaBackend(self._dbFilePath) self.addRnaQuants()
python
def populateFromFile(self, dataUrl): """ Populates the instance variables of this RnaQuantificationSet from the specified data URL. """ self._dbFilePath = dataUrl self._db = SqliteRnaBackend(self._dbFilePath) self.addRnaQuants()
['def', 'populateFromFile', '(', 'self', ',', 'dataUrl', ')', ':', 'self', '.', '_dbFilePath', '=', 'dataUrl', 'self', '.', '_db', '=', 'SqliteRnaBackend', '(', 'self', '.', '_dbFilePath', ')', 'self', '.', 'addRnaQuants', '(', ')']
Populates the instance variables of this RnaQuantificationSet from the specified data URL.
['Populates', 'the', 'instance', 'variables', 'of', 'this', 'RnaQuantificationSet', 'from', 'the', 'specified', 'data', 'URL', '.']
train
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/rna_quantification.py#L167-L174
5,334
senaite/senaite.core
bika/lims/workflow/__init__.py
doActionFor
def doActionFor(instance, action_id, idxs=None): """Tries to perform the transition to the instance. Object is reindexed after the transition takes place, but only if succeeds. If idxs is set, only these indexes will be reindexed. Otherwise, will try to use the indexes defined in ACTIONS_TO_INDEX mapping if any. :param instance: Object to be transitioned :param action_id: transition id :param idxs: indexes to be reindexed after the transition :returns: True if the transition has been performed, together with message :rtype: tuple (bool,str) """ if not instance: return False, "" if isinstance(instance, list): # TODO Workflow . Check if this is strictly necessary # This check is here because sometimes Plone creates a list # from submitted form elements. logger.warn("Got a list of obj in doActionFor!") if len(instance) > 1: logger.warn( "doActionFor is getting an instance parameter which is a list " "with more than one item. Instance: '{}', action_id: '{}'" .format(instance, action_id) ) return doActionFor(instance=instance[0], action_id=action_id, idxs=idxs) # Since a given transition can cascade or promote to other objects, we want # to reindex all objects for which the transition succeed at once, at the # end of process. Otherwise, same object will be reindexed multiple times # unnecessarily. Also, ActionsHandlerPool ensures the same transition is not # applied twice to the same object due to cascade/promote recursions. pool = ActionHandlerPool.get_instance() if pool.succeed(instance, action_id): return False, "Transition {} for {} already done"\ .format(action_id, instance.getId()) # Return False if transition is not permitted if not isTransitionAllowed(instance, action_id): return False, "Transition {} for {} is not allowed"\ .format(action_id, instance.getId()) # Add this batch process to the queue pool.queue_pool() succeed = False message = "" workflow = getToolByName(instance, "portal_workflow") try: workflow.doActionFor(instance, action_id) succeed = True except WorkflowException as e: message = str(e) curr_state = getCurrentState(instance) clazz_name = instance.__class__.__name__ logger.warning( "Transition '{0}' not allowed: {1} '{2}' ({3})"\ .format(action_id, clazz_name, instance.getId(), curr_state)) logger.error(message) # If no indexes to reindex have been defined, try to use those defined in # the ACTIONS_TO_INDEXES mapping. Reindexing only those indexes that might # be affected by the transition boosts the overall performance!. if idxs is None: portal_type = instance.portal_type idxs = ACTIONS_TO_INDEXES.get(portal_type, {}).get(action_id, []) # Add the current object to the pool and resume pool.push(instance, action_id, succeed, idxs=idxs) pool.resume() return succeed, message
python
def doActionFor(instance, action_id, idxs=None): """Tries to perform the transition to the instance. Object is reindexed after the transition takes place, but only if succeeds. If idxs is set, only these indexes will be reindexed. Otherwise, will try to use the indexes defined in ACTIONS_TO_INDEX mapping if any. :param instance: Object to be transitioned :param action_id: transition id :param idxs: indexes to be reindexed after the transition :returns: True if the transition has been performed, together with message :rtype: tuple (bool,str) """ if not instance: return False, "" if isinstance(instance, list): # TODO Workflow . Check if this is strictly necessary # This check is here because sometimes Plone creates a list # from submitted form elements. logger.warn("Got a list of obj in doActionFor!") if len(instance) > 1: logger.warn( "doActionFor is getting an instance parameter which is a list " "with more than one item. Instance: '{}', action_id: '{}'" .format(instance, action_id) ) return doActionFor(instance=instance[0], action_id=action_id, idxs=idxs) # Since a given transition can cascade or promote to other objects, we want # to reindex all objects for which the transition succeed at once, at the # end of process. Otherwise, same object will be reindexed multiple times # unnecessarily. Also, ActionsHandlerPool ensures the same transition is not # applied twice to the same object due to cascade/promote recursions. pool = ActionHandlerPool.get_instance() if pool.succeed(instance, action_id): return False, "Transition {} for {} already done"\ .format(action_id, instance.getId()) # Return False if transition is not permitted if not isTransitionAllowed(instance, action_id): return False, "Transition {} for {} is not allowed"\ .format(action_id, instance.getId()) # Add this batch process to the queue pool.queue_pool() succeed = False message = "" workflow = getToolByName(instance, "portal_workflow") try: workflow.doActionFor(instance, action_id) succeed = True except WorkflowException as e: message = str(e) curr_state = getCurrentState(instance) clazz_name = instance.__class__.__name__ logger.warning( "Transition '{0}' not allowed: {1} '{2}' ({3})"\ .format(action_id, clazz_name, instance.getId(), curr_state)) logger.error(message) # If no indexes to reindex have been defined, try to use those defined in # the ACTIONS_TO_INDEXES mapping. Reindexing only those indexes that might # be affected by the transition boosts the overall performance!. if idxs is None: portal_type = instance.portal_type idxs = ACTIONS_TO_INDEXES.get(portal_type, {}).get(action_id, []) # Add the current object to the pool and resume pool.push(instance, action_id, succeed, idxs=idxs) pool.resume() return succeed, message
['def', 'doActionFor', '(', 'instance', ',', 'action_id', ',', 'idxs', '=', 'None', ')', ':', 'if', 'not', 'instance', ':', 'return', 'False', ',', '""', 'if', 'isinstance', '(', 'instance', ',', 'list', ')', ':', '# TODO Workflow . Check if this is strictly necessary', '# This check is here because sometimes Plone creates a list', '# from submitted form elements.', 'logger', '.', 'warn', '(', '"Got a list of obj in doActionFor!"', ')', 'if', 'len', '(', 'instance', ')', '>', '1', ':', 'logger', '.', 'warn', '(', '"doActionFor is getting an instance parameter which is a list "', '"with more than one item. Instance: \'{}\', action_id: \'{}\'"', '.', 'format', '(', 'instance', ',', 'action_id', ')', ')', 'return', 'doActionFor', '(', 'instance', '=', 'instance', '[', '0', ']', ',', 'action_id', '=', 'action_id', ',', 'idxs', '=', 'idxs', ')', '# Since a given transition can cascade or promote to other objects, we want', '# to reindex all objects for which the transition succeed at once, at the', '# end of process. Otherwise, same object will be reindexed multiple times', '# unnecessarily. Also, ActionsHandlerPool ensures the same transition is not', '# applied twice to the same object due to cascade/promote recursions.', 'pool', '=', 'ActionHandlerPool', '.', 'get_instance', '(', ')', 'if', 'pool', '.', 'succeed', '(', 'instance', ',', 'action_id', ')', ':', 'return', 'False', ',', '"Transition {} for {} already done"', '.', 'format', '(', 'action_id', ',', 'instance', '.', 'getId', '(', ')', ')', '# Return False if transition is not permitted', 'if', 'not', 'isTransitionAllowed', '(', 'instance', ',', 'action_id', ')', ':', 'return', 'False', ',', '"Transition {} for {} is not allowed"', '.', 'format', '(', 'action_id', ',', 'instance', '.', 'getId', '(', ')', ')', '# Add this batch process to the queue', 'pool', '.', 'queue_pool', '(', ')', 'succeed', '=', 'False', 'message', '=', '""', 'workflow', '=', 'getToolByName', '(', 'instance', ',', '"portal_workflow"', ')', 'try', ':', 'workflow', '.', 'doActionFor', '(', 'instance', ',', 'action_id', ')', 'succeed', '=', 'True', 'except', 'WorkflowException', 'as', 'e', ':', 'message', '=', 'str', '(', 'e', ')', 'curr_state', '=', 'getCurrentState', '(', 'instance', ')', 'clazz_name', '=', 'instance', '.', '__class__', '.', '__name__', 'logger', '.', 'warning', '(', '"Transition \'{0}\' not allowed: {1} \'{2}\' ({3})"', '.', 'format', '(', 'action_id', ',', 'clazz_name', ',', 'instance', '.', 'getId', '(', ')', ',', 'curr_state', ')', ')', 'logger', '.', 'error', '(', 'message', ')', '# If no indexes to reindex have been defined, try to use those defined in', '# the ACTIONS_TO_INDEXES mapping. Reindexing only those indexes that might', '# be affected by the transition boosts the overall performance!.', 'if', 'idxs', 'is', 'None', ':', 'portal_type', '=', 'instance', '.', 'portal_type', 'idxs', '=', 'ACTIONS_TO_INDEXES', '.', 'get', '(', 'portal_type', ',', '{', '}', ')', '.', 'get', '(', 'action_id', ',', '[', ']', ')', '# Add the current object to the pool and resume', 'pool', '.', 'push', '(', 'instance', ',', 'action_id', ',', 'succeed', ',', 'idxs', '=', 'idxs', ')', 'pool', '.', 'resume', '(', ')', 'return', 'succeed', ',', 'message']
Tries to perform the transition to the instance. Object is reindexed after the transition takes place, but only if succeeds. If idxs is set, only these indexes will be reindexed. Otherwise, will try to use the indexes defined in ACTIONS_TO_INDEX mapping if any. :param instance: Object to be transitioned :param action_id: transition id :param idxs: indexes to be reindexed after the transition :returns: True if the transition has been performed, together with message :rtype: tuple (bool,str)
['Tries', 'to', 'perform', 'the', 'transition', 'to', 'the', 'instance', '.', 'Object', 'is', 'reindexed', 'after', 'the', 'transition', 'takes', 'place', 'but', 'only', 'if', 'succeeds', '.', 'If', 'idxs', 'is', 'set', 'only', 'these', 'indexes', 'will', 'be', 'reindexed', '.', 'Otherwise', 'will', 'try', 'to', 'use', 'the', 'indexes', 'defined', 'in', 'ACTIONS_TO_INDEX', 'mapping', 'if', 'any', '.', ':', 'param', 'instance', ':', 'Object', 'to', 'be', 'transitioned', ':', 'param', 'action_id', ':', 'transition', 'id', ':', 'param', 'idxs', ':', 'indexes', 'to', 'be', 'reindexed', 'after', 'the', 'transition', ':', 'returns', ':', 'True', 'if', 'the', 'transition', 'has', 'been', 'performed', 'together', 'with', 'message', ':', 'rtype', ':', 'tuple', '(', 'bool', 'str', ')']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/workflow/__init__.py#L70-L141
5,335
PredixDev/predixpy
predix/security/uaa.py
UserAccountAuthentication.assert_has_permission
def assert_has_permission(self, scope_required): """ Warn that the required scope is not found in the scopes granted to the currently authenticated user. :: # The admin user should have client admin permissions uaa.assert_has_permission('admin', 'clients.admin') """ if not self.authenticated: raise ValueError("Must first authenticate()") if scope_required not in self.get_scopes(): logging.warning("Authenticated as %s" % (self.client['id'])) logging.warning("Have scopes: %s" % (str.join(',', self.get_scopes()))) logging.warning("Insufficient scope %s for operation" % (scope_required)) raise ValueError("Client does not have permission.") return True
python
def assert_has_permission(self, scope_required): """ Warn that the required scope is not found in the scopes granted to the currently authenticated user. :: # The admin user should have client admin permissions uaa.assert_has_permission('admin', 'clients.admin') """ if not self.authenticated: raise ValueError("Must first authenticate()") if scope_required not in self.get_scopes(): logging.warning("Authenticated as %s" % (self.client['id'])) logging.warning("Have scopes: %s" % (str.join(',', self.get_scopes()))) logging.warning("Insufficient scope %s for operation" % (scope_required)) raise ValueError("Client does not have permission.") return True
['def', 'assert_has_permission', '(', 'self', ',', 'scope_required', ')', ':', 'if', 'not', 'self', '.', 'authenticated', ':', 'raise', 'ValueError', '(', '"Must first authenticate()"', ')', 'if', 'scope_required', 'not', 'in', 'self', '.', 'get_scopes', '(', ')', ':', 'logging', '.', 'warning', '(', '"Authenticated as %s"', '%', '(', 'self', '.', 'client', '[', "'id'", ']', ')', ')', 'logging', '.', 'warning', '(', '"Have scopes: %s"', '%', '(', 'str', '.', 'join', '(', "','", ',', 'self', '.', 'get_scopes', '(', ')', ')', ')', ')', 'logging', '.', 'warning', '(', '"Insufficient scope %s for operation"', '%', '(', 'scope_required', ')', ')', 'raise', 'ValueError', '(', '"Client does not have permission."', ')', 'return', 'True']
Warn that the required scope is not found in the scopes granted to the currently authenticated user. :: # The admin user should have client admin permissions uaa.assert_has_permission('admin', 'clients.admin')
['Warn', 'that', 'the', 'required', 'scope', 'is', 'not', 'found', 'in', 'the', 'scopes', 'granted', 'to', 'the', 'currently', 'authenticated', 'user', '.']
train
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/security/uaa.py#L334-L355
5,336
michaelpb/omnic
omnic/types/resource.py
Resource.cache_makedirs
def cache_makedirs(self, subdir=None): ''' Make necessary directories to hold cache value ''' if subdir is not None: dirname = self.cache_path if subdir: dirname = os.path.join(dirname, subdir) else: dirname = os.path.dirname(self.cache_path) os.makedirs(dirname, exist_ok=True)
python
def cache_makedirs(self, subdir=None): ''' Make necessary directories to hold cache value ''' if subdir is not None: dirname = self.cache_path if subdir: dirname = os.path.join(dirname, subdir) else: dirname = os.path.dirname(self.cache_path) os.makedirs(dirname, exist_ok=True)
['def', 'cache_makedirs', '(', 'self', ',', 'subdir', '=', 'None', ')', ':', 'if', 'subdir', 'is', 'not', 'None', ':', 'dirname', '=', 'self', '.', 'cache_path', 'if', 'subdir', ':', 'dirname', '=', 'os', '.', 'path', '.', 'join', '(', 'dirname', ',', 'subdir', ')', 'else', ':', 'dirname', '=', 'os', '.', 'path', '.', 'dirname', '(', 'self', '.', 'cache_path', ')', 'os', '.', 'makedirs', '(', 'dirname', ',', 'exist_ok', '=', 'True', ')']
Make necessary directories to hold cache value
['Make', 'necessary', 'directories', 'to', 'hold', 'cache', 'value']
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/types/resource.py#L63-L73
5,337
fhamborg/news-please
newsplease/pipeline/extractor/comparer/comparer_date.py
ComparerDate.extract
def extract(self, item, list_article_candidate): """Compares the extracted publish dates. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely publish date """ list_publish_date = [] for article_candidate in list_article_candidate: if article_candidate.publish_date != None: list_publish_date.append((article_candidate.publish_date, article_candidate.extractor)) # If there is no value in the list, return None. if len(list_publish_date) == 0: return None # If there are more options than one, return the result from date_extractor. list_date_extractor = [x for x in list_publish_date if x[1] == "date_extractor"] if len(list_date_extractor) == 0: # If there is no date extracted by date_extractor, return the first result of list_publish_date. return list_publish_date[0][0] else: return list_date_extractor[0][0]
python
def extract(self, item, list_article_candidate): """Compares the extracted publish dates. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely publish date """ list_publish_date = [] for article_candidate in list_article_candidate: if article_candidate.publish_date != None: list_publish_date.append((article_candidate.publish_date, article_candidate.extractor)) # If there is no value in the list, return None. if len(list_publish_date) == 0: return None # If there are more options than one, return the result from date_extractor. list_date_extractor = [x for x in list_publish_date if x[1] == "date_extractor"] if len(list_date_extractor) == 0: # If there is no date extracted by date_extractor, return the first result of list_publish_date. return list_publish_date[0][0] else: return list_date_extractor[0][0]
['def', 'extract', '(', 'self', ',', 'item', ',', 'list_article_candidate', ')', ':', 'list_publish_date', '=', '[', ']', 'for', 'article_candidate', 'in', 'list_article_candidate', ':', 'if', 'article_candidate', '.', 'publish_date', '!=', 'None', ':', 'list_publish_date', '.', 'append', '(', '(', 'article_candidate', '.', 'publish_date', ',', 'article_candidate', '.', 'extractor', ')', ')', '# If there is no value in the list, return None.', 'if', 'len', '(', 'list_publish_date', ')', '==', '0', ':', 'return', 'None', '# If there are more options than one, return the result from date_extractor.', 'list_date_extractor', '=', '[', 'x', 'for', 'x', 'in', 'list_publish_date', 'if', 'x', '[', '1', ']', '==', '"date_extractor"', ']', 'if', 'len', '(', 'list_date_extractor', ')', '==', '0', ':', '# If there is no date extracted by date_extractor, return the first result of list_publish_date.', 'return', 'list_publish_date', '[', '0', ']', '[', '0', ']', 'else', ':', 'return', 'list_date_extractor', '[', '0', ']', '[', '0', ']']
Compares the extracted publish dates. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely publish date
['Compares', 'the', 'extracted', 'publish', 'dates', '.']
train
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_date.py#L4-L28
5,338
noxdafox/clipspy
clips/agenda.py
Agenda.agenda_changed
def agenda_changed(self): """True if any rule activation changes have occurred.""" value = bool(lib.EnvGetAgendaChanged(self._env)) lib.EnvSetAgendaChanged(self._env, int(False)) return value
python
def agenda_changed(self): """True if any rule activation changes have occurred.""" value = bool(lib.EnvGetAgendaChanged(self._env)) lib.EnvSetAgendaChanged(self._env, int(False)) return value
['def', 'agenda_changed', '(', 'self', ')', ':', 'value', '=', 'bool', '(', 'lib', '.', 'EnvGetAgendaChanged', '(', 'self', '.', '_env', ')', ')', 'lib', '.', 'EnvSetAgendaChanged', '(', 'self', '.', '_env', ',', 'int', '(', 'False', ')', ')', 'return', 'value']
True if any rule activation changes have occurred.
['True', 'if', 'any', 'rule', 'activation', 'changes', 'have', 'occurred', '.']
train
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/agenda.py#L64-L69
5,339
cloudsigma/cgroupspy
cgroupspy/nodes.py
Node._get_controller_type
def _get_controller_type(self): """Returns the current node's controller type""" if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS: return self.name elif self.parent: return self.parent.controller_type else: return None
python
def _get_controller_type(self): """Returns the current node's controller type""" if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS: return self.name elif self.parent: return self.parent.controller_type else: return None
['def', '_get_controller_type', '(', 'self', ')', ':', 'if', 'self', '.', 'node_type', '==', 'self', '.', 'NODE_CONTROLLER_ROOT', 'and', 'self', '.', 'name', 'in', 'self', '.', 'CONTROLLERS', ':', 'return', 'self', '.', 'name', 'elif', 'self', '.', 'parent', ':', 'return', 'self', '.', 'parent', '.', 'controller_type', 'else', ':', 'return', 'None']
Returns the current node's controller type
['Returns', 'the', 'current', 'node', 's', 'controller', 'type']
train
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L120-L128
5,340
Erotemic/utool
utool/util_path.py
ancestor_paths
def ancestor_paths(start=None, limit={}): """ All paths above you """ import utool as ut limit = ut.ensure_iterable(limit) limit = {expanduser(p) for p in limit}.union(set(limit)) if start is None: start = os.getcwd() path = start prev = None while path != prev and prev not in limit: yield path prev = path path = dirname(path)
python
def ancestor_paths(start=None, limit={}): """ All paths above you """ import utool as ut limit = ut.ensure_iterable(limit) limit = {expanduser(p) for p in limit}.union(set(limit)) if start is None: start = os.getcwd() path = start prev = None while path != prev and prev not in limit: yield path prev = path path = dirname(path)
['def', 'ancestor_paths', '(', 'start', '=', 'None', ',', 'limit', '=', '{', '}', ')', ':', 'import', 'utool', 'as', 'ut', 'limit', '=', 'ut', '.', 'ensure_iterable', '(', 'limit', ')', 'limit', '=', '{', 'expanduser', '(', 'p', ')', 'for', 'p', 'in', 'limit', '}', '.', 'union', '(', 'set', '(', 'limit', ')', ')', 'if', 'start', 'is', 'None', ':', 'start', '=', 'os', '.', 'getcwd', '(', ')', 'path', '=', 'start', 'prev', '=', 'None', 'while', 'path', '!=', 'prev', 'and', 'prev', 'not', 'in', 'limit', ':', 'yield', 'path', 'prev', '=', 'path', 'path', '=', 'dirname', '(', 'path', ')']
All paths above you
['All', 'paths', 'above', 'you']
train
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L2436-L2450
5,341
romanorac/discomll
discomll/regression/locally_weighted_linear_regression.py
fit_predict
def fit_predict(training_data, fitting_data, tau=1, samples_per_job=0, save_results=True, show=False): from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator from disco.core import Disco """ training_data - training samples fitting_data - dataset to be fitted to training data. tau - controls how quickly the weight of a training sample falls off with distance of its x(i) from the query point x. samples_per_job - define a number of samples that will be processed in single mapreduce job. If 0, algorithm will calculate number of samples per job. """ try: tau = float(tau) if tau <= 0: raise Exception("Parameter tau should be >= 0.") except ValueError: raise Exception("Parameter tau should be numerical.") if fitting_data.params["id_index"] == -1: raise Exception("Predict data should have id_index set.") job = Job(worker=Worker(save_results=save_results)) job.pipeline = [ ("split", Stage("map", input_chain=fitting_data.params["input_chain"], init=simple_init, process=map_predict))] job.params = fitting_data.params job.run(name="lwlr_read_data", input=fitting_data.params["data_tag"]) samples = {} results = [] tau = float(2 * tau ** 2) # calculate tau once counter = 0 for test_id, x in result_iterator(job.wait(show=show)): if samples_per_job == 0: # calculate number of samples per job if len(x) <= 100: # if there is less than 100 attributes samples_per_job = 100 # 100 samples is max per on job else: # there is more than 100 attributes samples_per_job = len(x) * -25 / 900. + 53 # linear function samples[test_id] = x if counter == samples_per_job: results.append(_fit_predict(training_data, samples, tau, save_results, show)) counter = 0 samples = {} counter += 1 if len(samples) > 0: # if there is some samples left in the the dictionary results.append(_fit_predict(training_data, samples, tau, save_results, show)) # merge results of every iteration into a single tag ddfs = Disco().ddfs ddfs.tag(job.name, [[list(ddfs.blobs(tag))[0][0]] for tag in results]) return ["tag://" + job.name]
python
def fit_predict(training_data, fitting_data, tau=1, samples_per_job=0, save_results=True, show=False): from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator from disco.core import Disco """ training_data - training samples fitting_data - dataset to be fitted to training data. tau - controls how quickly the weight of a training sample falls off with distance of its x(i) from the query point x. samples_per_job - define a number of samples that will be processed in single mapreduce job. If 0, algorithm will calculate number of samples per job. """ try: tau = float(tau) if tau <= 0: raise Exception("Parameter tau should be >= 0.") except ValueError: raise Exception("Parameter tau should be numerical.") if fitting_data.params["id_index"] == -1: raise Exception("Predict data should have id_index set.") job = Job(worker=Worker(save_results=save_results)) job.pipeline = [ ("split", Stage("map", input_chain=fitting_data.params["input_chain"], init=simple_init, process=map_predict))] job.params = fitting_data.params job.run(name="lwlr_read_data", input=fitting_data.params["data_tag"]) samples = {} results = [] tau = float(2 * tau ** 2) # calculate tau once counter = 0 for test_id, x in result_iterator(job.wait(show=show)): if samples_per_job == 0: # calculate number of samples per job if len(x) <= 100: # if there is less than 100 attributes samples_per_job = 100 # 100 samples is max per on job else: # there is more than 100 attributes samples_per_job = len(x) * -25 / 900. + 53 # linear function samples[test_id] = x if counter == samples_per_job: results.append(_fit_predict(training_data, samples, tau, save_results, show)) counter = 0 samples = {} counter += 1 if len(samples) > 0: # if there is some samples left in the the dictionary results.append(_fit_predict(training_data, samples, tau, save_results, show)) # merge results of every iteration into a single tag ddfs = Disco().ddfs ddfs.tag(job.name, [[list(ddfs.blobs(tag))[0][0]] for tag in results]) return ["tag://" + job.name]
['def', 'fit_predict', '(', 'training_data', ',', 'fitting_data', ',', 'tau', '=', '1', ',', 'samples_per_job', '=', '0', ',', 'save_results', '=', 'True', ',', 'show', '=', 'False', ')', ':', 'from', 'disco', '.', 'worker', '.', 'pipeline', '.', 'worker', 'import', 'Worker', ',', 'Stage', 'from', 'disco', '.', 'core', 'import', 'Job', ',', 'result_iterator', 'from', 'disco', '.', 'core', 'import', 'Disco', 'try', ':', 'tau', '=', 'float', '(', 'tau', ')', 'if', 'tau', '<=', '0', ':', 'raise', 'Exception', '(', '"Parameter tau should be >= 0."', ')', 'except', 'ValueError', ':', 'raise', 'Exception', '(', '"Parameter tau should be numerical."', ')', 'if', 'fitting_data', '.', 'params', '[', '"id_index"', ']', '==', '-', '1', ':', 'raise', 'Exception', '(', '"Predict data should have id_index set."', ')', 'job', '=', 'Job', '(', 'worker', '=', 'Worker', '(', 'save_results', '=', 'save_results', ')', ')', 'job', '.', 'pipeline', '=', '[', '(', '"split"', ',', 'Stage', '(', '"map"', ',', 'input_chain', '=', 'fitting_data', '.', 'params', '[', '"input_chain"', ']', ',', 'init', '=', 'simple_init', ',', 'process', '=', 'map_predict', ')', ')', ']', 'job', '.', 'params', '=', 'fitting_data', '.', 'params', 'job', '.', 'run', '(', 'name', '=', '"lwlr_read_data"', ',', 'input', '=', 'fitting_data', '.', 'params', '[', '"data_tag"', ']', ')', 'samples', '=', '{', '}', 'results', '=', '[', ']', 'tau', '=', 'float', '(', '2', '*', 'tau', '**', '2', ')', '# calculate tau once', 'counter', '=', '0', 'for', 'test_id', ',', 'x', 'in', 'result_iterator', '(', 'job', '.', 'wait', '(', 'show', '=', 'show', ')', ')', ':', 'if', 'samples_per_job', '==', '0', ':', '# calculate number of samples per job', 'if', 'len', '(', 'x', ')', '<=', '100', ':', '# if there is less than 100 attributes', 'samples_per_job', '=', '100', '# 100 samples is max per on job', 'else', ':', '# there is more than 100 attributes', 'samples_per_job', '=', 'len', '(', 'x', ')', '*', '-', '25', '/', '900.', '+', '53', '# linear function', 'samples', '[', 'test_id', ']', '=', 'x', 'if', 'counter', '==', 'samples_per_job', ':', 'results', '.', 'append', '(', '_fit_predict', '(', 'training_data', ',', 'samples', ',', 'tau', ',', 'save_results', ',', 'show', ')', ')', 'counter', '=', '0', 'samples', '=', '{', '}', 'counter', '+=', '1', 'if', 'len', '(', 'samples', ')', '>', '0', ':', '# if there is some samples left in the the dictionary', 'results', '.', 'append', '(', '_fit_predict', '(', 'training_data', ',', 'samples', ',', 'tau', ',', 'save_results', ',', 'show', ')', ')', '# merge results of every iteration into a single tag', 'ddfs', '=', 'Disco', '(', ')', '.', 'ddfs', 'ddfs', '.', 'tag', '(', 'job', '.', 'name', ',', '[', '[', 'list', '(', 'ddfs', '.', 'blobs', '(', 'tag', ')', ')', '[', '0', ']', '[', '0', ']', ']', 'for', 'tag', 'in', 'results', ']', ')', 'return', '[', '"tag://"', '+', 'job', '.', 'name', ']']
training_data - training samples fitting_data - dataset to be fitted to training data. tau - controls how quickly the weight of a training sample falls off with distance of its x(i) from the query point x. samples_per_job - define a number of samples that will be processed in single mapreduce job. If 0, algorithm will calculate number of samples per job.
['training_data', '-', 'training', 'samples', 'fitting_data', '-', 'dataset', 'to', 'be', 'fitted', 'to', 'training', 'data', '.', 'tau', '-', 'controls', 'how', 'quickly', 'the', 'weight', 'of', 'a', 'training', 'sample', 'falls', 'off', 'with', 'distance', 'of', 'its', 'x', '(', 'i', ')', 'from', 'the', 'query', 'point', 'x', '.', 'samples_per_job', '-', 'define', 'a', 'number', 'of', 'samples', 'that', 'will', 'be', 'processed', 'in', 'single', 'mapreduce', 'job', '.', 'If', '0', 'algorithm', 'will', 'calculate', 'number', 'of', 'samples', 'per', 'job', '.']
train
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/regression/locally_weighted_linear_regression.py#L83-L139
5,342
spyder-ide/spyder
spyder/plugins/editor/plugin.py
Editor.new
def new(self, fname=None, editorstack=None, text=None): """ Create a new file - Untitled fname=None --> fname will be 'untitledXX.py' but do not create file fname=<basestring> --> create file """ # If no text is provided, create default content empty = False try: if text is None: default_content = True text, enc = encoding.read(self.TEMPLATE_PATH) enc_match = re.search(r'-*- coding: ?([a-z0-9A-Z\-]*) -*-', text) if enc_match: enc = enc_match.group(1) # Initialize template variables # Windows username = encoding.to_unicode_from_fs( os.environ.get('USERNAME', '')) # Linux, Mac OS X if not username: username = encoding.to_unicode_from_fs( os.environ.get('USER', '-')) VARS = { 'date': time.ctime(), 'username': username, } try: text = text % VARS except Exception: pass else: default_content = False enc = encoding.read(self.TEMPLATE_PATH)[1] except (IOError, OSError): text = '' enc = 'utf-8' default_content = True empty = True create_fname = lambda n: to_text_string(_("untitled")) + ("%d.py" % n) # Creating editor widget if editorstack is None: current_es = self.get_current_editorstack() else: current_es = editorstack created_from_here = fname is None if created_from_here: while True: fname = create_fname(self.untitled_num) self.untitled_num += 1 if not osp.isfile(fname): break basedir = getcwd_or_home() if self.main.projects.get_active_project() is not None: basedir = self.main.projects.get_active_project_path() else: c_fname = self.get_current_filename() if c_fname is not None and c_fname != self.TEMPFILE_PATH: basedir = osp.dirname(c_fname) fname = osp.abspath(osp.join(basedir, fname)) else: # QString when triggered by a Qt signal fname = osp.abspath(to_text_string(fname)) index = current_es.has_filename(fname) if index is not None and not current_es.close_file(index): return # Creating the editor widget in the first editorstack (the one that # can't be destroyed), then cloning this editor widget in all other # editorstacks: finfo = self.editorstacks[0].new(fname, enc, text, default_content, empty) finfo.path = self.main.get_spyder_pythonpath() self._clone_file_everywhere(finfo) current_editor = current_es.set_current_filename(finfo.filename) self.register_widget_shortcuts(current_editor) if not created_from_here: self.save(force=True)
python
def new(self, fname=None, editorstack=None, text=None): """ Create a new file - Untitled fname=None --> fname will be 'untitledXX.py' but do not create file fname=<basestring> --> create file """ # If no text is provided, create default content empty = False try: if text is None: default_content = True text, enc = encoding.read(self.TEMPLATE_PATH) enc_match = re.search(r'-*- coding: ?([a-z0-9A-Z\-]*) -*-', text) if enc_match: enc = enc_match.group(1) # Initialize template variables # Windows username = encoding.to_unicode_from_fs( os.environ.get('USERNAME', '')) # Linux, Mac OS X if not username: username = encoding.to_unicode_from_fs( os.environ.get('USER', '-')) VARS = { 'date': time.ctime(), 'username': username, } try: text = text % VARS except Exception: pass else: default_content = False enc = encoding.read(self.TEMPLATE_PATH)[1] except (IOError, OSError): text = '' enc = 'utf-8' default_content = True empty = True create_fname = lambda n: to_text_string(_("untitled")) + ("%d.py" % n) # Creating editor widget if editorstack is None: current_es = self.get_current_editorstack() else: current_es = editorstack created_from_here = fname is None if created_from_here: while True: fname = create_fname(self.untitled_num) self.untitled_num += 1 if not osp.isfile(fname): break basedir = getcwd_or_home() if self.main.projects.get_active_project() is not None: basedir = self.main.projects.get_active_project_path() else: c_fname = self.get_current_filename() if c_fname is not None and c_fname != self.TEMPFILE_PATH: basedir = osp.dirname(c_fname) fname = osp.abspath(osp.join(basedir, fname)) else: # QString when triggered by a Qt signal fname = osp.abspath(to_text_string(fname)) index = current_es.has_filename(fname) if index is not None and not current_es.close_file(index): return # Creating the editor widget in the first editorstack (the one that # can't be destroyed), then cloning this editor widget in all other # editorstacks: finfo = self.editorstacks[0].new(fname, enc, text, default_content, empty) finfo.path = self.main.get_spyder_pythonpath() self._clone_file_everywhere(finfo) current_editor = current_es.set_current_filename(finfo.filename) self.register_widget_shortcuts(current_editor) if not created_from_here: self.save(force=True)
['def', 'new', '(', 'self', ',', 'fname', '=', 'None', ',', 'editorstack', '=', 'None', ',', 'text', '=', 'None', ')', ':', '# If no text is provided, create default content\r', 'empty', '=', 'False', 'try', ':', 'if', 'text', 'is', 'None', ':', 'default_content', '=', 'True', 'text', ',', 'enc', '=', 'encoding', '.', 'read', '(', 'self', '.', 'TEMPLATE_PATH', ')', 'enc_match', '=', 're', '.', 'search', '(', "r'-*- coding: ?([a-z0-9A-Z\\-]*) -*-'", ',', 'text', ')', 'if', 'enc_match', ':', 'enc', '=', 'enc_match', '.', 'group', '(', '1', ')', '# Initialize template variables\r', '# Windows\r', 'username', '=', 'encoding', '.', 'to_unicode_from_fs', '(', 'os', '.', 'environ', '.', 'get', '(', "'USERNAME'", ',', "''", ')', ')', '# Linux, Mac OS X\r', 'if', 'not', 'username', ':', 'username', '=', 'encoding', '.', 'to_unicode_from_fs', '(', 'os', '.', 'environ', '.', 'get', '(', "'USER'", ',', "'-'", ')', ')', 'VARS', '=', '{', "'date'", ':', 'time', '.', 'ctime', '(', ')', ',', "'username'", ':', 'username', ',', '}', 'try', ':', 'text', '=', 'text', '%', 'VARS', 'except', 'Exception', ':', 'pass', 'else', ':', 'default_content', '=', 'False', 'enc', '=', 'encoding', '.', 'read', '(', 'self', '.', 'TEMPLATE_PATH', ')', '[', '1', ']', 'except', '(', 'IOError', ',', 'OSError', ')', ':', 'text', '=', "''", 'enc', '=', "'utf-8'", 'default_content', '=', 'True', 'empty', '=', 'True', 'create_fname', '=', 'lambda', 'n', ':', 'to_text_string', '(', '_', '(', '"untitled"', ')', ')', '+', '(', '"%d.py"', '%', 'n', ')', '# Creating editor widget\r', 'if', 'editorstack', 'is', 'None', ':', 'current_es', '=', 'self', '.', 'get_current_editorstack', '(', ')', 'else', ':', 'current_es', '=', 'editorstack', 'created_from_here', '=', 'fname', 'is', 'None', 'if', 'created_from_here', ':', 'while', 'True', ':', 'fname', '=', 'create_fname', '(', 'self', '.', 'untitled_num', ')', 'self', '.', 'untitled_num', '+=', '1', 'if', 'not', 'osp', '.', 'isfile', '(', 'fname', ')', ':', 'break', 'basedir', '=', 'getcwd_or_home', '(', ')', 'if', 'self', '.', 'main', '.', 'projects', '.', 'get_active_project', '(', ')', 'is', 'not', 'None', ':', 'basedir', '=', 'self', '.', 'main', '.', 'projects', '.', 'get_active_project_path', '(', ')', 'else', ':', 'c_fname', '=', 'self', '.', 'get_current_filename', '(', ')', 'if', 'c_fname', 'is', 'not', 'None', 'and', 'c_fname', '!=', 'self', '.', 'TEMPFILE_PATH', ':', 'basedir', '=', 'osp', '.', 'dirname', '(', 'c_fname', ')', 'fname', '=', 'osp', '.', 'abspath', '(', 'osp', '.', 'join', '(', 'basedir', ',', 'fname', ')', ')', 'else', ':', '# QString when triggered by a Qt signal\r', 'fname', '=', 'osp', '.', 'abspath', '(', 'to_text_string', '(', 'fname', ')', ')', 'index', '=', 'current_es', '.', 'has_filename', '(', 'fname', ')', 'if', 'index', 'is', 'not', 'None', 'and', 'not', 'current_es', '.', 'close_file', '(', 'index', ')', ':', 'return', '# Creating the editor widget in the first editorstack (the one that\r', "# can't be destroyed), then cloning this editor widget in all other\r", '# editorstacks:\r', 'finfo', '=', 'self', '.', 'editorstacks', '[', '0', ']', '.', 'new', '(', 'fname', ',', 'enc', ',', 'text', ',', 'default_content', ',', 'empty', ')', 'finfo', '.', 'path', '=', 'self', '.', 'main', '.', 'get_spyder_pythonpath', '(', ')', 'self', '.', '_clone_file_everywhere', '(', 'finfo', ')', 'current_editor', '=', 'current_es', '.', 'set_current_filename', '(', 'finfo', '.', 'filename', ')', 'self', '.', 'register_widget_shortcuts', '(', 'current_editor', ')', 'if', 'not', 'created_from_here', ':', 'self', '.', 'save', '(', 'force', '=', 'True', ')']
Create a new file - Untitled fname=None --> fname will be 'untitledXX.py' but do not create file fname=<basestring> --> create file
['Create', 'a', 'new', 'file', '-', 'Untitled', 'fname', '=', 'None', '--', '>', 'fname', 'will', 'be', 'untitledXX', '.', 'py', 'but', 'do', 'not', 'create', 'file', 'fname', '=', '<basestring', '>', '--', '>', 'create', 'file']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/plugin.py#L1565-L1646
5,343
guaix-ucm/numina
numina/array/interpolation.py
SteffenInterpolator._poly_eval_0
def _poly_eval_0(self, u, ids): """Evaluate internal polynomial.""" return u * (u * (self._a[ids] * u + self._b[ids]) + self._c[ids]) + self._d[ids]
python
def _poly_eval_0(self, u, ids): """Evaluate internal polynomial.""" return u * (u * (self._a[ids] * u + self._b[ids]) + self._c[ids]) + self._d[ids]
['def', '_poly_eval_0', '(', 'self', ',', 'u', ',', 'ids', ')', ':', 'return', 'u', '*', '(', 'u', '*', '(', 'self', '.', '_a', '[', 'ids', ']', '*', 'u', '+', 'self', '.', '_b', '[', 'ids', ']', ')', '+', 'self', '.', '_c', '[', 'ids', ']', ')', '+', 'self', '.', '_d', '[', 'ids', ']']
Evaluate internal polynomial.
['Evaluate', 'internal', 'polynomial', '.']
train
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/interpolation.py#L218-L220
5,344
cggh/scikit-allel
allel/model/ndarray.py
FeatureTable.from_gff3
def from_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', dtype=None): """Read a feature table from a GFF3 format file. Parameters ---------- path : string File path. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. dtype : numpy dtype, optional Manually specify a dtype. Returns ------- ft : FeatureTable """ a = gff3_to_recarray(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, dtype=dtype) if a is None: return None else: return FeatureTable(a, copy=False)
python
def from_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', dtype=None): """Read a feature table from a GFF3 format file. Parameters ---------- path : string File path. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. dtype : numpy dtype, optional Manually specify a dtype. Returns ------- ft : FeatureTable """ a = gff3_to_recarray(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, dtype=dtype) if a is None: return None else: return FeatureTable(a, copy=False)
['def', 'from_gff3', '(', 'path', ',', 'attributes', '=', 'None', ',', 'region', '=', 'None', ',', 'score_fill', '=', '-', '1', ',', 'phase_fill', '=', '-', '1', ',', 'attributes_fill', '=', "'.'", ',', 'dtype', '=', 'None', ')', ':', 'a', '=', 'gff3_to_recarray', '(', 'path', ',', 'attributes', '=', 'attributes', ',', 'region', '=', 'region', ',', 'score_fill', '=', 'score_fill', ',', 'phase_fill', '=', 'phase_fill', ',', 'attributes_fill', '=', 'attributes_fill', ',', 'dtype', '=', 'dtype', ')', 'if', 'a', 'is', 'None', ':', 'return', 'None', 'else', ':', 'return', 'FeatureTable', '(', 'a', ',', 'copy', '=', 'False', ')']
Read a feature table from a GFF3 format file. Parameters ---------- path : string File path. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. dtype : numpy dtype, optional Manually specify a dtype. Returns ------- ft : FeatureTable
['Read', 'a', 'feature', 'table', 'from', 'a', 'GFF3', 'format', 'file', '.']
train
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L4760-L4795
5,345
scopus-api/scopus
scopus/abstract_retrieval.py
AbstractRetrieval.confdate
def confdate(self): """Date range of the conference the abstract belongs to represented by two tuples in the form (YYYY, MM, DD). """ date = self._confevent.get('confdate', {}) if len(date) > 0: start = {k: int(v) for k, v in date['startdate'].items()} end = {k: int(v) for k, v in date['enddate'].items()} return ((start['@year'], start['@month'], start['@day']), (end['@year'], end['@month'], end['@day'])) else: return ((None, None, None), (None, None, None))
python
def confdate(self): """Date range of the conference the abstract belongs to represented by two tuples in the form (YYYY, MM, DD). """ date = self._confevent.get('confdate', {}) if len(date) > 0: start = {k: int(v) for k, v in date['startdate'].items()} end = {k: int(v) for k, v in date['enddate'].items()} return ((start['@year'], start['@month'], start['@day']), (end['@year'], end['@month'], end['@day'])) else: return ((None, None, None), (None, None, None))
['def', 'confdate', '(', 'self', ')', ':', 'date', '=', 'self', '.', '_confevent', '.', 'get', '(', "'confdate'", ',', '{', '}', ')', 'if', 'len', '(', 'date', ')', '>', '0', ':', 'start', '=', '{', 'k', ':', 'int', '(', 'v', ')', 'for', 'k', ',', 'v', 'in', 'date', '[', "'startdate'", ']', '.', 'items', '(', ')', '}', 'end', '=', '{', 'k', ':', 'int', '(', 'v', ')', 'for', 'k', ',', 'v', 'in', 'date', '[', "'enddate'", ']', '.', 'items', '(', ')', '}', 'return', '(', '(', 'start', '[', "'@year'", ']', ',', 'start', '[', "'@month'", ']', ',', 'start', '[', "'@day'", ']', ')', ',', '(', 'end', '[', "'@year'", ']', ',', 'end', '[', "'@month'", ']', ',', 'end', '[', "'@day'", ']', ')', ')', 'else', ':', 'return', '(', '(', 'None', ',', 'None', ',', 'None', ')', ',', '(', 'None', ',', 'None', ',', 'None', ')', ')']
Date range of the conference the abstract belongs to represented by two tuples in the form (YYYY, MM, DD).
['Date', 'range', 'of', 'the', 'conference', 'the', 'abstract', 'belongs', 'to', 'represented', 'by', 'two', 'tuples', 'in', 'the', 'form', '(', 'YYYY', 'MM', 'DD', ')', '.']
train
https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/abstract_retrieval.py#L153-L164
5,346
Roastero/freshroastsr700
freshroastsr700/__init__.py
freshroastsr700.heat_setting
def heat_setting(self, value): """Verifies that the heat setting is between 0 and 3.""" if value not in range(0, 4): raise exceptions.RoasterValueError self._heat_setting.value = value
python
def heat_setting(self, value): """Verifies that the heat setting is between 0 and 3.""" if value not in range(0, 4): raise exceptions.RoasterValueError self._heat_setting.value = value
['def', 'heat_setting', '(', 'self', ',', 'value', ')', ':', 'if', 'value', 'not', 'in', 'range', '(', '0', ',', '4', ')', ':', 'raise', 'exceptions', '.', 'RoasterValueError', 'self', '.', '_heat_setting', '.', 'value', '=', 'value']
Verifies that the heat setting is between 0 and 3.
['Verifies', 'that', 'the', 'heat', 'setting', 'is', 'between', '0', 'and', '3', '.']
train
https://github.com/Roastero/freshroastsr700/blob/49cf4961444c0f56d051d5ac5088ace480b54f02/freshroastsr700/__init__.py#L259-L264
5,347
PythonCharmers/python-future
src/future/backports/email/encoders.py
encode_7or8bit
def encode_7or8bit(msg): """Set the Content-Transfer-Encoding header to 7bit or 8bit.""" orig = msg.get_payload() if orig is None: # There's no payload. For backwards compatibility we use 7bit msg['Content-Transfer-Encoding'] = '7bit' return # We play a trick to make this go fast. If encoding/decode to ASCII # succeeds, we know the data must be 7bit, otherwise treat it as 8bit. try: if isinstance(orig, str): orig.encode('ascii') else: orig.decode('ascii') except UnicodeError: charset = msg.get_charset() output_cset = charset and charset.output_charset # iso-2022-* is non-ASCII but encodes to a 7-bit representation if output_cset and output_cset.lower().startswith('iso-2022-'): msg['Content-Transfer-Encoding'] = '7bit' else: msg['Content-Transfer-Encoding'] = '8bit' else: msg['Content-Transfer-Encoding'] = '7bit' if not isinstance(orig, str): msg.set_payload(orig.decode('ascii', 'surrogateescape'))
python
def encode_7or8bit(msg): """Set the Content-Transfer-Encoding header to 7bit or 8bit.""" orig = msg.get_payload() if orig is None: # There's no payload. For backwards compatibility we use 7bit msg['Content-Transfer-Encoding'] = '7bit' return # We play a trick to make this go fast. If encoding/decode to ASCII # succeeds, we know the data must be 7bit, otherwise treat it as 8bit. try: if isinstance(orig, str): orig.encode('ascii') else: orig.decode('ascii') except UnicodeError: charset = msg.get_charset() output_cset = charset and charset.output_charset # iso-2022-* is non-ASCII but encodes to a 7-bit representation if output_cset and output_cset.lower().startswith('iso-2022-'): msg['Content-Transfer-Encoding'] = '7bit' else: msg['Content-Transfer-Encoding'] = '8bit' else: msg['Content-Transfer-Encoding'] = '7bit' if not isinstance(orig, str): msg.set_payload(orig.decode('ascii', 'surrogateescape'))
['def', 'encode_7or8bit', '(', 'msg', ')', ':', 'orig', '=', 'msg', '.', 'get_payload', '(', ')', 'if', 'orig', 'is', 'None', ':', "# There's no payload. For backwards compatibility we use 7bit", 'msg', '[', "'Content-Transfer-Encoding'", ']', '=', "'7bit'", 'return', '# We play a trick to make this go fast. If encoding/decode to ASCII', '# succeeds, we know the data must be 7bit, otherwise treat it as 8bit.', 'try', ':', 'if', 'isinstance', '(', 'orig', ',', 'str', ')', ':', 'orig', '.', 'encode', '(', "'ascii'", ')', 'else', ':', 'orig', '.', 'decode', '(', "'ascii'", ')', 'except', 'UnicodeError', ':', 'charset', '=', 'msg', '.', 'get_charset', '(', ')', 'output_cset', '=', 'charset', 'and', 'charset', '.', 'output_charset', '# iso-2022-* is non-ASCII but encodes to a 7-bit representation', 'if', 'output_cset', 'and', 'output_cset', '.', 'lower', '(', ')', '.', 'startswith', '(', "'iso-2022-'", ')', ':', 'msg', '[', "'Content-Transfer-Encoding'", ']', '=', "'7bit'", 'else', ':', 'msg', '[', "'Content-Transfer-Encoding'", ']', '=', "'8bit'", 'else', ':', 'msg', '[', "'Content-Transfer-Encoding'", ']', '=', "'7bit'", 'if', 'not', 'isinstance', '(', 'orig', ',', 'str', ')', ':', 'msg', '.', 'set_payload', '(', 'orig', '.', 'decode', '(', "'ascii'", ',', "'surrogateescape'", ')', ')']
Set the Content-Transfer-Encoding header to 7bit or 8bit.
['Set', 'the', 'Content', '-', 'Transfer', '-', 'Encoding', 'header', 'to', '7bit', 'or', '8bit', '.']
train
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/encoders.py#L55-L80
5,348
explosion/spaCy
spacy/util.py
compile_suffix_regex
def compile_suffix_regex(entries): """Compile a sequence of suffix rules into a regex object. entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES. RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search. """ expression = "|".join([piece + "$" for piece in entries if piece.strip()]) return re.compile(expression)
python
def compile_suffix_regex(entries): """Compile a sequence of suffix rules into a regex object. entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES. RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search. """ expression = "|".join([piece + "$" for piece in entries if piece.strip()]) return re.compile(expression)
['def', 'compile_suffix_regex', '(', 'entries', ')', ':', 'expression', '=', '"|"', '.', 'join', '(', '[', 'piece', '+', '"$"', 'for', 'piece', 'in', 'entries', 'if', 'piece', '.', 'strip', '(', ')', ']', ')', 'return', 're', '.', 'compile', '(', 'expression', ')']
Compile a sequence of suffix rules into a regex object. entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES. RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search.
['Compile', 'a', 'sequence', 'of', 'suffix', 'rules', 'into', 'a', 'regex', 'object', '.']
train
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L346-L353
5,349
EnigmaBridge/client.py
ebclient/process_data.py
ProcessData.call
def call(self, input_data=None, *args, **kwargs): """ Calls the request with input data using given configuration (retry, timeout, ...). :param input_data: :param args: :param kwargs: :return: """ self.build_request(input_data) self.caller = RequestCall(self.request) self.exception = None try: self.caller.call() self.response = self.caller.response self.decrypt_result() return self.decrypted except Exception as e: self.exception = e logger.info("Exception throw %s", e) pass
python
def call(self, input_data=None, *args, **kwargs): """ Calls the request with input data using given configuration (retry, timeout, ...). :param input_data: :param args: :param kwargs: :return: """ self.build_request(input_data) self.caller = RequestCall(self.request) self.exception = None try: self.caller.call() self.response = self.caller.response self.decrypt_result() return self.decrypted except Exception as e: self.exception = e logger.info("Exception throw %s", e) pass
['def', 'call', '(', 'self', ',', 'input_data', '=', 'None', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'build_request', '(', 'input_data', ')', 'self', '.', 'caller', '=', 'RequestCall', '(', 'self', '.', 'request', ')', 'self', '.', 'exception', '=', 'None', 'try', ':', 'self', '.', 'caller', '.', 'call', '(', ')', 'self', '.', 'response', '=', 'self', '.', 'caller', '.', 'response', 'self', '.', 'decrypt_result', '(', ')', 'return', 'self', '.', 'decrypted', 'except', 'Exception', 'as', 'e', ':', 'self', '.', 'exception', '=', 'e', 'logger', '.', 'info', '(', '"Exception throw %s"', ',', 'e', ')', 'pass']
Calls the request with input data using given configuration (retry, timeout, ...). :param input_data: :param args: :param kwargs: :return:
['Calls', 'the', 'request', 'with', 'input', 'data', 'using', 'given', 'configuration', '(', 'retry', 'timeout', '...', ')', '.', ':', 'param', 'input_data', ':', ':', 'param', 'args', ':', ':', 'param', 'kwargs', ':', ':', 'return', ':']
train
https://github.com/EnigmaBridge/client.py/blob/0fafe3902da394da88e9f960751d695ca65bbabd/ebclient/process_data.py#L32-L53
5,350
lrq3000/pyFileFixity
pyFileFixity/replication_repair.py
synchronize_files
def synchronize_files(inputpaths, outpath, database=None, tqdm_bar=None, report_file=None, ptee=None, verbose=False): ''' Main function to synchronize files contents by majority vote The main job of this function is to walk through the input folders and align the files, so that we can compare every files across every folders, one by one. The whole trick here is to align files, so that we don't need to memorize all the files in memory and we compare all equivalent files together: to do that, we ensure that we walk through the input directories in alphabetical order, and we pick the relative filepath at the top of the alphabetical order, this ensures the alignment of files between different folders, without memorizing the whole trees structures. ''' # (Generator) Files Synchronization Algorithm: # Needs a function stable_dir_walking, which will walk through directories recursively but in always the same order on all platforms (same order for files but also for folders), whatever order it is, as long as it is stable. # Until there's no file in any of the input folders to be processed: # - curfiles <- load first file for each folder by using stable_dir_walking on each input folder. # - curfiles_grouped <- group curfiles_ordered: # * curfiles_ordered <- order curfiles alphabetically (need to separate the relative parent directory and the filename, to account for both without ambiguity) # * curfiles_grouped <- empty list # * curfiles_grouped[0] = add first element in curfiles_ordered # * last_group = 0 # * for every subsequent element nextelt in curfiles_ordered: # . if nextelt == curfiles_grouped[last_group][0]: add nextelt into curfiles_grouped[last_group] (the latest group in curfiles_grouped) # . else: create a new group in curfiles_grouped (last_group += 1) and add nextelt into curfiles_grouped[last_group] # At this stage, curfiles_grouped[0] should contain a group of files with the same relative filepath from different input folders, and since we used stable_dir_walking, we are guaranteed that this file is the next to be processed in alphabetical order. # - Majority vote byte-by-byte for each of curfiles_grouped[0], and output winning byte to the output file. # - Update files list alignment: we will now ditch files in curfiles_grouped[0] from curfiles, and replace by the next files respectively from each respective folder. Since we processed in alphabetical (or whatever) order, the next loaded files will match the files in other curfiles_grouped groups that we could not process before. # At this point (after the loop), all input files have been processed in order, without maintaining the whole files list in memory, just one file per input folder. # Init files walking generator for each inputpaths recgen = [recwalk(path, sorting=True) for path in inputpaths] curfiles = {} recgen_exhausted = {} recgen_exhausted_count = 0 nbpaths = len(inputpaths) retcode = 0 if not ptee: ptee = sys.stdout # Open report file and write header if report_file is not None: rfile = open(report_file, 'wb') r_writer = csv.writer(rfile, delimiter='|', lineterminator='\n', quotechar='"') r_header = ["filepath"] + ["dir%i" % (i+1) for i in xrange(nbpaths)] + ["hash-correct", "error_code", "errors"] r_length = len(r_header) r_writer.writerow(r_header) # Initialization: load the first batch of files, one for each folder for i in xrange(len(recgen)): recgen_exhausted[i] = False try: if curfiles.get(i, None) is None: curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1] except StopIteration: recgen_exhausted[i] = True recgen_exhausted_count += 1 # Files lists alignment loop while recgen_exhausted_count < nbpaths: errcode = 0 errmsg = None # Init a new report's row if report_file: r_row = ["-"] * r_length # -- Group equivalent relative filepaths together #print curfiles # debug curfiles_grouped = sort_group(curfiles, True) # -- Extract first group of equivalent filepaths (this allows us to process with the same alphabetical order on all platforms) # Note that the remaining files in other groups will be processed later, because their alphabetical order is higher to the first group, this means that the first group is to be processed now to_process = curfiles_grouped[0] #print to_process # debug # -- Byte-by-byte majority vote on the first group of files # Need the relative filepath also (note that there's only one since it's a group of equivalent relative filepaths, only the absolute path is different between files of a same group) relfilepath = path2unix(os.path.join(*to_process[0][1])) if report_file: r_row[0] = relfilepath if verbose: ptee.write("- Processing file %s." % relfilepath) # Generate output path outpathfull = os.path.join(outpath, relfilepath) create_dir_if_not_exist(os.path.dirname(outpathfull)) # Initialize the list of absolute filepaths fileslist = [] for elt in to_process: i = elt[0] fileslist.append(os.path.join(inputpaths[i], os.path.join(*elt[1]))) if report_file: r_row[i+1] = 'X' # put an X in the report file below each folder that contains this file # If there's only one file, just copy it over if len(to_process) == 1: shutil.copyfile(fileslist[0], outpathfull) id = to_process[0][0] if report_file: r_row[id+1] = 'O' # Else, merge by majority vote else: # Before-merge check using rfigc database, if provided # If one of the files in the input folders is already correct, just copy it over correct_file = None if database: for id, filepath in enumerate(fileslist): if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (filepath, database)) == 0: correct_file = filepath correct_id = to_process[id][0] break # If one correct file was found, copy it over if correct_file: create_dir_if_not_exist(os.path.dirname(outpathfull)) shutil.copyfile(correct_file, outpathfull) if report_file: r_row[correct_id+1] = "O" r_row[-3] = "OK" # Else, we need to do the majority vote merge else: # Do the majority vote merge errcode, errmsg = majority_vote_byte_scan(relfilepath, fileslist, outpath) # After-merge/move check using rfigc database, if provided if database: if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (outpathfull, database)) == 1: errcode = 1 r_row[-3] = "KO" if not errmsg: errmsg = '' errmsg += " File could not be totally repaired according to rfigc database." else: if report_file: r_row[-3] = "OK" if errmsg: errmsg += " But merged file is correct according to rfigc database." # Display errors if any if errcode: if report_file: r_row[-2] = "KO" r_row[-1] = errmsg ptee.write(errmsg) retcode = 1 else: if report_file: r_row[-2] = "OK" # Save current report's row if report_file: r_writer.writerow(r_row) # -- Update files lists alignment (ie, retrieve new files but while trying to keep the alignment) for elt in to_process: # for files of the first group (the ones we processed) i = elt[0] # Walk their respective folders and load up the next file try: if not recgen_exhausted.get(i, False): curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1] # If there's no file left in this folder, mark this input folder as exhausted and continue with the others except StopIteration: curfiles[i] = None recgen_exhausted[i] = True recgen_exhausted_count += 1 if tqdm_bar: tqdm_bar.update() if tqdm_bar: tqdm_bar.close() # Closing report file if report_file: # Write list of directories and legend rfile.write("\n=> Input directories:") for id, ipath in enumerate(inputpaths): rfile.write("\n\t- dir%i = %s" % ((id+1), ipath)) rfile.write("\n=> Output directory: %s" % outpath) rfile.write("\n=> Legend: X=existing/selected for majority vote, O=only used this file, - = not existing, OK = check correct, KO = check incorrect (file was not recovered)\n") # Close the report file handle rfile.close() return retcode
python
def synchronize_files(inputpaths, outpath, database=None, tqdm_bar=None, report_file=None, ptee=None, verbose=False): ''' Main function to synchronize files contents by majority vote The main job of this function is to walk through the input folders and align the files, so that we can compare every files across every folders, one by one. The whole trick here is to align files, so that we don't need to memorize all the files in memory and we compare all equivalent files together: to do that, we ensure that we walk through the input directories in alphabetical order, and we pick the relative filepath at the top of the alphabetical order, this ensures the alignment of files between different folders, without memorizing the whole trees structures. ''' # (Generator) Files Synchronization Algorithm: # Needs a function stable_dir_walking, which will walk through directories recursively but in always the same order on all platforms (same order for files but also for folders), whatever order it is, as long as it is stable. # Until there's no file in any of the input folders to be processed: # - curfiles <- load first file for each folder by using stable_dir_walking on each input folder. # - curfiles_grouped <- group curfiles_ordered: # * curfiles_ordered <- order curfiles alphabetically (need to separate the relative parent directory and the filename, to account for both without ambiguity) # * curfiles_grouped <- empty list # * curfiles_grouped[0] = add first element in curfiles_ordered # * last_group = 0 # * for every subsequent element nextelt in curfiles_ordered: # . if nextelt == curfiles_grouped[last_group][0]: add nextelt into curfiles_grouped[last_group] (the latest group in curfiles_grouped) # . else: create a new group in curfiles_grouped (last_group += 1) and add nextelt into curfiles_grouped[last_group] # At this stage, curfiles_grouped[0] should contain a group of files with the same relative filepath from different input folders, and since we used stable_dir_walking, we are guaranteed that this file is the next to be processed in alphabetical order. # - Majority vote byte-by-byte for each of curfiles_grouped[0], and output winning byte to the output file. # - Update files list alignment: we will now ditch files in curfiles_grouped[0] from curfiles, and replace by the next files respectively from each respective folder. Since we processed in alphabetical (or whatever) order, the next loaded files will match the files in other curfiles_grouped groups that we could not process before. # At this point (after the loop), all input files have been processed in order, without maintaining the whole files list in memory, just one file per input folder. # Init files walking generator for each inputpaths recgen = [recwalk(path, sorting=True) for path in inputpaths] curfiles = {} recgen_exhausted = {} recgen_exhausted_count = 0 nbpaths = len(inputpaths) retcode = 0 if not ptee: ptee = sys.stdout # Open report file and write header if report_file is not None: rfile = open(report_file, 'wb') r_writer = csv.writer(rfile, delimiter='|', lineterminator='\n', quotechar='"') r_header = ["filepath"] + ["dir%i" % (i+1) for i in xrange(nbpaths)] + ["hash-correct", "error_code", "errors"] r_length = len(r_header) r_writer.writerow(r_header) # Initialization: load the first batch of files, one for each folder for i in xrange(len(recgen)): recgen_exhausted[i] = False try: if curfiles.get(i, None) is None: curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1] except StopIteration: recgen_exhausted[i] = True recgen_exhausted_count += 1 # Files lists alignment loop while recgen_exhausted_count < nbpaths: errcode = 0 errmsg = None # Init a new report's row if report_file: r_row = ["-"] * r_length # -- Group equivalent relative filepaths together #print curfiles # debug curfiles_grouped = sort_group(curfiles, True) # -- Extract first group of equivalent filepaths (this allows us to process with the same alphabetical order on all platforms) # Note that the remaining files in other groups will be processed later, because their alphabetical order is higher to the first group, this means that the first group is to be processed now to_process = curfiles_grouped[0] #print to_process # debug # -- Byte-by-byte majority vote on the first group of files # Need the relative filepath also (note that there's only one since it's a group of equivalent relative filepaths, only the absolute path is different between files of a same group) relfilepath = path2unix(os.path.join(*to_process[0][1])) if report_file: r_row[0] = relfilepath if verbose: ptee.write("- Processing file %s." % relfilepath) # Generate output path outpathfull = os.path.join(outpath, relfilepath) create_dir_if_not_exist(os.path.dirname(outpathfull)) # Initialize the list of absolute filepaths fileslist = [] for elt in to_process: i = elt[0] fileslist.append(os.path.join(inputpaths[i], os.path.join(*elt[1]))) if report_file: r_row[i+1] = 'X' # put an X in the report file below each folder that contains this file # If there's only one file, just copy it over if len(to_process) == 1: shutil.copyfile(fileslist[0], outpathfull) id = to_process[0][0] if report_file: r_row[id+1] = 'O' # Else, merge by majority vote else: # Before-merge check using rfigc database, if provided # If one of the files in the input folders is already correct, just copy it over correct_file = None if database: for id, filepath in enumerate(fileslist): if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (filepath, database)) == 0: correct_file = filepath correct_id = to_process[id][0] break # If one correct file was found, copy it over if correct_file: create_dir_if_not_exist(os.path.dirname(outpathfull)) shutil.copyfile(correct_file, outpathfull) if report_file: r_row[correct_id+1] = "O" r_row[-3] = "OK" # Else, we need to do the majority vote merge else: # Do the majority vote merge errcode, errmsg = majority_vote_byte_scan(relfilepath, fileslist, outpath) # After-merge/move check using rfigc database, if provided if database: if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (outpathfull, database)) == 1: errcode = 1 r_row[-3] = "KO" if not errmsg: errmsg = '' errmsg += " File could not be totally repaired according to rfigc database." else: if report_file: r_row[-3] = "OK" if errmsg: errmsg += " But merged file is correct according to rfigc database." # Display errors if any if errcode: if report_file: r_row[-2] = "KO" r_row[-1] = errmsg ptee.write(errmsg) retcode = 1 else: if report_file: r_row[-2] = "OK" # Save current report's row if report_file: r_writer.writerow(r_row) # -- Update files lists alignment (ie, retrieve new files but while trying to keep the alignment) for elt in to_process: # for files of the first group (the ones we processed) i = elt[0] # Walk their respective folders and load up the next file try: if not recgen_exhausted.get(i, False): curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1] # If there's no file left in this folder, mark this input folder as exhausted and continue with the others except StopIteration: curfiles[i] = None recgen_exhausted[i] = True recgen_exhausted_count += 1 if tqdm_bar: tqdm_bar.update() if tqdm_bar: tqdm_bar.close() # Closing report file if report_file: # Write list of directories and legend rfile.write("\n=> Input directories:") for id, ipath in enumerate(inputpaths): rfile.write("\n\t- dir%i = %s" % ((id+1), ipath)) rfile.write("\n=> Output directory: %s" % outpath) rfile.write("\n=> Legend: X=existing/selected for majority vote, O=only used this file, - = not existing, OK = check correct, KO = check incorrect (file was not recovered)\n") # Close the report file handle rfile.close() return retcode
['def', 'synchronize_files', '(', 'inputpaths', ',', 'outpath', ',', 'database', '=', 'None', ',', 'tqdm_bar', '=', 'None', ',', 'report_file', '=', 'None', ',', 'ptee', '=', 'None', ',', 'verbose', '=', 'False', ')', ':', '# (Generator) Files Synchronization Algorithm:', '# Needs a function stable_dir_walking, which will walk through directories recursively but in always the same order on all platforms (same order for files but also for folders), whatever order it is, as long as it is stable.', "# Until there's no file in any of the input folders to be processed:", '# - curfiles <- load first file for each folder by using stable_dir_walking on each input folder.', '# - curfiles_grouped <- group curfiles_ordered:', '# * curfiles_ordered <- order curfiles alphabetically (need to separate the relative parent directory and the filename, to account for both without ambiguity)', '# * curfiles_grouped <- empty list', '# * curfiles_grouped[0] = add first element in curfiles_ordered', '# * last_group = 0', '# * for every subsequent element nextelt in curfiles_ordered:', '# . if nextelt == curfiles_grouped[last_group][0]: add nextelt into curfiles_grouped[last_group] (the latest group in curfiles_grouped)', '# . else: create a new group in curfiles_grouped (last_group += 1) and add nextelt into curfiles_grouped[last_group]', '# At this stage, curfiles_grouped[0] should contain a group of files with the same relative filepath from different input folders, and since we used stable_dir_walking, we are guaranteed that this file is the next to be processed in alphabetical order.', '# - Majority vote byte-by-byte for each of curfiles_grouped[0], and output winning byte to the output file.', '# - Update files list alignment: we will now ditch files in curfiles_grouped[0] from curfiles, and replace by the next files respectively from each respective folder. Since we processed in alphabetical (or whatever) order, the next loaded files will match the files in other curfiles_grouped groups that we could not process before.', '# At this point (after the loop), all input files have been processed in order, without maintaining the whole files list in memory, just one file per input folder.', '# Init files walking generator for each inputpaths', 'recgen', '=', '[', 'recwalk', '(', 'path', ',', 'sorting', '=', 'True', ')', 'for', 'path', 'in', 'inputpaths', ']', 'curfiles', '=', '{', '}', 'recgen_exhausted', '=', '{', '}', 'recgen_exhausted_count', '=', '0', 'nbpaths', '=', 'len', '(', 'inputpaths', ')', 'retcode', '=', '0', 'if', 'not', 'ptee', ':', 'ptee', '=', 'sys', '.', 'stdout', '# Open report file and write header', 'if', 'report_file', 'is', 'not', 'None', ':', 'rfile', '=', 'open', '(', 'report_file', ',', "'wb'", ')', 'r_writer', '=', 'csv', '.', 'writer', '(', 'rfile', ',', 'delimiter', '=', "'|'", ',', 'lineterminator', '=', "'\\n'", ',', 'quotechar', '=', '\'"\'', ')', 'r_header', '=', '[', '"filepath"', ']', '+', '[', '"dir%i"', '%', '(', 'i', '+', '1', ')', 'for', 'i', 'in', 'xrange', '(', 'nbpaths', ')', ']', '+', '[', '"hash-correct"', ',', '"error_code"', ',', '"errors"', ']', 'r_length', '=', 'len', '(', 'r_header', ')', 'r_writer', '.', 'writerow', '(', 'r_header', ')', '# Initialization: load the first batch of files, one for each folder', 'for', 'i', 'in', 'xrange', '(', 'len', '(', 'recgen', ')', ')', ':', 'recgen_exhausted', '[', 'i', ']', '=', 'False', 'try', ':', 'if', 'curfiles', '.', 'get', '(', 'i', ',', 'None', ')', 'is', 'None', ':', 'curfiles', '[', 'i', ']', '=', 'relpath_posix', '(', 'recgen', '[', 'i', ']', '.', 'next', '(', ')', ',', 'inputpaths', '[', 'i', ']', ')', '[', '1', ']', 'except', 'StopIteration', ':', 'recgen_exhausted', '[', 'i', ']', '=', 'True', 'recgen_exhausted_count', '+=', '1', '# Files lists alignment loop', 'while', 'recgen_exhausted_count', '<', 'nbpaths', ':', 'errcode', '=', '0', 'errmsg', '=', 'None', "# Init a new report's row", 'if', 'report_file', ':', 'r_row', '=', '[', '"-"', ']', '*', 'r_length', '# -- Group equivalent relative filepaths together', '#print curfiles # debug', 'curfiles_grouped', '=', 'sort_group', '(', 'curfiles', ',', 'True', ')', '# -- Extract first group of equivalent filepaths (this allows us to process with the same alphabetical order on all platforms)', '# Note that the remaining files in other groups will be processed later, because their alphabetical order is higher to the first group, this means that the first group is to be processed now', 'to_process', '=', 'curfiles_grouped', '[', '0', ']', '#print to_process # debug', '# -- Byte-by-byte majority vote on the first group of files', "# Need the relative filepath also (note that there's only one since it's a group of equivalent relative filepaths, only the absolute path is different between files of a same group)", 'relfilepath', '=', 'path2unix', '(', 'os', '.', 'path', '.', 'join', '(', '*', 'to_process', '[', '0', ']', '[', '1', ']', ')', ')', 'if', 'report_file', ':', 'r_row', '[', '0', ']', '=', 'relfilepath', 'if', 'verbose', ':', 'ptee', '.', 'write', '(', '"- Processing file %s."', '%', 'relfilepath', ')', '# Generate output path', 'outpathfull', '=', 'os', '.', 'path', '.', 'join', '(', 'outpath', ',', 'relfilepath', ')', 'create_dir_if_not_exist', '(', 'os', '.', 'path', '.', 'dirname', '(', 'outpathfull', ')', ')', '# Initialize the list of absolute filepaths', 'fileslist', '=', '[', ']', 'for', 'elt', 'in', 'to_process', ':', 'i', '=', 'elt', '[', '0', ']', 'fileslist', '.', 'append', '(', 'os', '.', 'path', '.', 'join', '(', 'inputpaths', '[', 'i', ']', ',', 'os', '.', 'path', '.', 'join', '(', '*', 'elt', '[', '1', ']', ')', ')', ')', 'if', 'report_file', ':', 'r_row', '[', 'i', '+', '1', ']', '=', "'X'", '# put an X in the report file below each folder that contains this file', "# If there's only one file, just copy it over", 'if', 'len', '(', 'to_process', ')', '==', '1', ':', 'shutil', '.', 'copyfile', '(', 'fileslist', '[', '0', ']', ',', 'outpathfull', ')', 'id', '=', 'to_process', '[', '0', ']', '[', '0', ']', 'if', 'report_file', ':', 'r_row', '[', 'id', '+', '1', ']', '=', "'O'", '# Else, merge by majority vote', 'else', ':', '# Before-merge check using rfigc database, if provided', '# If one of the files in the input folders is already correct, just copy it over', 'correct_file', '=', 'None', 'if', 'database', ':', 'for', 'id', ',', 'filepath', 'in', 'enumerate', '(', 'fileslist', ')', ':', 'if', 'rfigc', '.', 'main', '(', '"-i \\"%s\\" -d \\"%s\\" -m --silent"', '%', '(', 'filepath', ',', 'database', ')', ')', '==', '0', ':', 'correct_file', '=', 'filepath', 'correct_id', '=', 'to_process', '[', 'id', ']', '[', '0', ']', 'break', '# If one correct file was found, copy it over', 'if', 'correct_file', ':', 'create_dir_if_not_exist', '(', 'os', '.', 'path', '.', 'dirname', '(', 'outpathfull', ')', ')', 'shutil', '.', 'copyfile', '(', 'correct_file', ',', 'outpathfull', ')', 'if', 'report_file', ':', 'r_row', '[', 'correct_id', '+', '1', ']', '=', '"O"', 'r_row', '[', '-', '3', ']', '=', '"OK"', '# Else, we need to do the majority vote merge', 'else', ':', '# Do the majority vote merge', 'errcode', ',', 'errmsg', '=', 'majority_vote_byte_scan', '(', 'relfilepath', ',', 'fileslist', ',', 'outpath', ')', '# After-merge/move check using rfigc database, if provided', 'if', 'database', ':', 'if', 'rfigc', '.', 'main', '(', '"-i \\"%s\\" -d \\"%s\\" -m --silent"', '%', '(', 'outpathfull', ',', 'database', ')', ')', '==', '1', ':', 'errcode', '=', '1', 'r_row', '[', '-', '3', ']', '=', '"KO"', 'if', 'not', 'errmsg', ':', 'errmsg', '=', "''", 'errmsg', '+=', '" File could not be totally repaired according to rfigc database."', 'else', ':', 'if', 'report_file', ':', 'r_row', '[', '-', '3', ']', '=', '"OK"', 'if', 'errmsg', ':', 'errmsg', '+=', '" But merged file is correct according to rfigc database."', '# Display errors if any', 'if', 'errcode', ':', 'if', 'report_file', ':', 'r_row', '[', '-', '2', ']', '=', '"KO"', 'r_row', '[', '-', '1', ']', '=', 'errmsg', 'ptee', '.', 'write', '(', 'errmsg', ')', 'retcode', '=', '1', 'else', ':', 'if', 'report_file', ':', 'r_row', '[', '-', '2', ']', '=', '"OK"', "# Save current report's row", 'if', 'report_file', ':', 'r_writer', '.', 'writerow', '(', 'r_row', ')', '# -- Update files lists alignment (ie, retrieve new files but while trying to keep the alignment)', 'for', 'elt', 'in', 'to_process', ':', '# for files of the first group (the ones we processed)', 'i', '=', 'elt', '[', '0', ']', '# Walk their respective folders and load up the next file', 'try', ':', 'if', 'not', 'recgen_exhausted', '.', 'get', '(', 'i', ',', 'False', ')', ':', 'curfiles', '[', 'i', ']', '=', 'relpath_posix', '(', 'recgen', '[', 'i', ']', '.', 'next', '(', ')', ',', 'inputpaths', '[', 'i', ']', ')', '[', '1', ']', "# If there's no file left in this folder, mark this input folder as exhausted and continue with the others", 'except', 'StopIteration', ':', 'curfiles', '[', 'i', ']', '=', 'None', 'recgen_exhausted', '[', 'i', ']', '=', 'True', 'recgen_exhausted_count', '+=', '1', 'if', 'tqdm_bar', ':', 'tqdm_bar', '.', 'update', '(', ')', 'if', 'tqdm_bar', ':', 'tqdm_bar', '.', 'close', '(', ')', '# Closing report file', 'if', 'report_file', ':', '# Write list of directories and legend', 'rfile', '.', 'write', '(', '"\\n=> Input directories:"', ')', 'for', 'id', ',', 'ipath', 'in', 'enumerate', '(', 'inputpaths', ')', ':', 'rfile', '.', 'write', '(', '"\\n\\t- dir%i = %s"', '%', '(', '(', 'id', '+', '1', ')', ',', 'ipath', ')', ')', 'rfile', '.', 'write', '(', '"\\n=> Output directory: %s"', '%', 'outpath', ')', 'rfile', '.', 'write', '(', '"\\n=> Legend: X=existing/selected for majority vote, O=only used this file, - = not existing, OK = check correct, KO = check incorrect (file was not recovered)\\n"', ')', '# Close the report file handle', 'rfile', '.', 'close', '(', ')', 'return', 'retcode']
Main function to synchronize files contents by majority vote The main job of this function is to walk through the input folders and align the files, so that we can compare every files across every folders, one by one. The whole trick here is to align files, so that we don't need to memorize all the files in memory and we compare all equivalent files together: to do that, we ensure that we walk through the input directories in alphabetical order, and we pick the relative filepath at the top of the alphabetical order, this ensures the alignment of files between different folders, without memorizing the whole trees structures.
['Main', 'function', 'to', 'synchronize', 'files', 'contents', 'by', 'majority', 'vote', 'The', 'main', 'job', 'of', 'this', 'function', 'is', 'to', 'walk', 'through', 'the', 'input', 'folders', 'and', 'align', 'the', 'files', 'so', 'that', 'we', 'can', 'compare', 'every', 'files', 'across', 'every', 'folders', 'one', 'by', 'one', '.', 'The', 'whole', 'trick', 'here', 'is', 'to', 'align', 'files', 'so', 'that', 'we', 'don', 't', 'need', 'to', 'memorize', 'all', 'the', 'files', 'in', 'memory', 'and', 'we', 'compare', 'all', 'equivalent', 'files', 'together', ':', 'to', 'do', 'that', 'we', 'ensure', 'that', 'we', 'walk', 'through', 'the', 'input', 'directories', 'in', 'alphabetical', 'order', 'and', 'we', 'pick', 'the', 'relative', 'filepath', 'at', 'the', 'top', 'of', 'the', 'alphabetical', 'order', 'this', 'ensures', 'the', 'alignment', 'of', 'files', 'between', 'different', 'folders', 'without', 'memorizing', 'the', 'whole', 'trees', 'structures', '.']
train
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/replication_repair.py#L232-L394
5,351
jadolg/rocketchat_API
rocketchat_API/rocketchat.py
RocketChat.channels_archive
def channels_archive(self, room_id, **kwargs): """Archives a channel.""" return self.__call_api_post('channels.archive', roomId=room_id, kwargs=kwargs)
python
def channels_archive(self, room_id, **kwargs): """Archives a channel.""" return self.__call_api_post('channels.archive', roomId=room_id, kwargs=kwargs)
['def', 'channels_archive', '(', 'self', ',', 'room_id', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', '__call_api_post', '(', "'channels.archive'", ',', 'roomId', '=', 'room_id', ',', 'kwargs', '=', 'kwargs', ')']
Archives a channel.
['Archives', 'a', 'channel', '.']
train
https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L316-L318
5,352
mitsei/dlkit
dlkit/json_/authorization/objects.py
Authorization.get_qualifier_id
def get_qualifier_id(self): """Gets the ``Qualifier Id`` for this authorization. return: (osid.id.Id) - the qualifier ``Id`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.learning.Activity.get_objective_id if not bool(self._my_map['qualifierId']): raise errors.IllegalState('qualifier empty') return Id(self._my_map['qualifierId'])
python
def get_qualifier_id(self): """Gets the ``Qualifier Id`` for this authorization. return: (osid.id.Id) - the qualifier ``Id`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.learning.Activity.get_objective_id if not bool(self._my_map['qualifierId']): raise errors.IllegalState('qualifier empty') return Id(self._my_map['qualifierId'])
['def', 'get_qualifier_id', '(', 'self', ')', ':', '# Implemented from template for osid.learning.Activity.get_objective_id', 'if', 'not', 'bool', '(', 'self', '.', '_my_map', '[', "'qualifierId'", ']', ')', ':', 'raise', 'errors', '.', 'IllegalState', '(', "'qualifier empty'", ')', 'return', 'Id', '(', 'self', '.', '_my_map', '[', "'qualifierId'", ']', ')']
Gets the ``Qualifier Id`` for this authorization. return: (osid.id.Id) - the qualifier ``Id`` *compliance: mandatory -- This method must be implemented.*
['Gets', 'the', 'Qualifier', 'Id', 'for', 'this', 'authorization', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/objects.py#L261-L271
5,353
senaite/senaite.core
bika/lims/content/worksheet.py
Worksheet.add_duplicate_analysis
def add_duplicate_analysis(self, src_analysis, destination_slot, ref_gid=None): """ Creates a duplicate of the src_analysis passed in. If the analysis passed in is not an IRoutineAnalysis, is retracted or has dependent services, returns None.If no reference analyses group id (ref_gid) is set, the value will be generated automatically. :param src_analysis: analysis to create a duplicate from :param destination_slot: slot where duplicate analysis must be stored :param ref_gid: the reference analysis group id to be set :return: the duplicate analysis or None """ if not src_analysis: return None if not IRoutineAnalysis.providedBy(src_analysis): logger.warning('Cannot create duplicate analysis from a non ' 'routine analysis: {}'.format(src_analysis.getId())) return None if api.get_review_status(src_analysis) == 'retracted': logger.warning('Cannot create duplicate analysis from a retracted' 'analysis: {}'.format(src_analysis.getId())) return None # TODO Workflow - Duplicate Analyses - Consider duplicates with deps # Removing this check from here and ensuring that duplicate.getSiblings # returns the analyses sorted by priority (duplicates from same # AR > routine analyses from same AR > duplicates from same WS > # routine analyses from same WS) should be almost enough calc = src_analysis.getCalculation() if calc and calc.getDependentServices(): logger.warning('Cannot create duplicate analysis from an' 'analysis with dependent services: {}' .format(src_analysis.getId())) return None # Create the duplicate duplicate = _createObjectByType("DuplicateAnalysis", self, tmpID()) duplicate.setAnalysis(src_analysis) # Set ReferenceAnalysesGroupID (same id for the analyses from # the same Reference Sample and same Worksheet) if not ref_gid: ref_gid = self.nextRefAnalysesGroupID(duplicate.getRequest()) duplicate.setReferenceAnalysesGroupID(ref_gid) # Add the duplicate into the worksheet self.addToLayout(duplicate, destination_slot) self.setAnalyses(self.getAnalyses() + [duplicate, ]) # Reindex duplicate.reindexObject(idxs=["getAnalyst", "getWorksheetUID", "getReferenceAnalysesGroupID"]) self.reindexObject(idxs=["getAnalysesUIDs"]) return duplicate
python
def add_duplicate_analysis(self, src_analysis, destination_slot, ref_gid=None): """ Creates a duplicate of the src_analysis passed in. If the analysis passed in is not an IRoutineAnalysis, is retracted or has dependent services, returns None.If no reference analyses group id (ref_gid) is set, the value will be generated automatically. :param src_analysis: analysis to create a duplicate from :param destination_slot: slot where duplicate analysis must be stored :param ref_gid: the reference analysis group id to be set :return: the duplicate analysis or None """ if not src_analysis: return None if not IRoutineAnalysis.providedBy(src_analysis): logger.warning('Cannot create duplicate analysis from a non ' 'routine analysis: {}'.format(src_analysis.getId())) return None if api.get_review_status(src_analysis) == 'retracted': logger.warning('Cannot create duplicate analysis from a retracted' 'analysis: {}'.format(src_analysis.getId())) return None # TODO Workflow - Duplicate Analyses - Consider duplicates with deps # Removing this check from here and ensuring that duplicate.getSiblings # returns the analyses sorted by priority (duplicates from same # AR > routine analyses from same AR > duplicates from same WS > # routine analyses from same WS) should be almost enough calc = src_analysis.getCalculation() if calc and calc.getDependentServices(): logger.warning('Cannot create duplicate analysis from an' 'analysis with dependent services: {}' .format(src_analysis.getId())) return None # Create the duplicate duplicate = _createObjectByType("DuplicateAnalysis", self, tmpID()) duplicate.setAnalysis(src_analysis) # Set ReferenceAnalysesGroupID (same id for the analyses from # the same Reference Sample and same Worksheet) if not ref_gid: ref_gid = self.nextRefAnalysesGroupID(duplicate.getRequest()) duplicate.setReferenceAnalysesGroupID(ref_gid) # Add the duplicate into the worksheet self.addToLayout(duplicate, destination_slot) self.setAnalyses(self.getAnalyses() + [duplicate, ]) # Reindex duplicate.reindexObject(idxs=["getAnalyst", "getWorksheetUID", "getReferenceAnalysesGroupID"]) self.reindexObject(idxs=["getAnalysesUIDs"]) return duplicate
['def', 'add_duplicate_analysis', '(', 'self', ',', 'src_analysis', ',', 'destination_slot', ',', 'ref_gid', '=', 'None', ')', ':', 'if', 'not', 'src_analysis', ':', 'return', 'None', 'if', 'not', 'IRoutineAnalysis', '.', 'providedBy', '(', 'src_analysis', ')', ':', 'logger', '.', 'warning', '(', "'Cannot create duplicate analysis from a non '", "'routine analysis: {}'", '.', 'format', '(', 'src_analysis', '.', 'getId', '(', ')', ')', ')', 'return', 'None', 'if', 'api', '.', 'get_review_status', '(', 'src_analysis', ')', '==', "'retracted'", ':', 'logger', '.', 'warning', '(', "'Cannot create duplicate analysis from a retracted'", "'analysis: {}'", '.', 'format', '(', 'src_analysis', '.', 'getId', '(', ')', ')', ')', 'return', 'None', '# TODO Workflow - Duplicate Analyses - Consider duplicates with deps', '# Removing this check from here and ensuring that duplicate.getSiblings', '# returns the analyses sorted by priority (duplicates from same', '# AR > routine analyses from same AR > duplicates from same WS >', '# routine analyses from same WS) should be almost enough', 'calc', '=', 'src_analysis', '.', 'getCalculation', '(', ')', 'if', 'calc', 'and', 'calc', '.', 'getDependentServices', '(', ')', ':', 'logger', '.', 'warning', '(', "'Cannot create duplicate analysis from an'", "'analysis with dependent services: {}'", '.', 'format', '(', 'src_analysis', '.', 'getId', '(', ')', ')', ')', 'return', 'None', '# Create the duplicate', 'duplicate', '=', '_createObjectByType', '(', '"DuplicateAnalysis"', ',', 'self', ',', 'tmpID', '(', ')', ')', 'duplicate', '.', 'setAnalysis', '(', 'src_analysis', ')', '# Set ReferenceAnalysesGroupID (same id for the analyses from', '# the same Reference Sample and same Worksheet)', 'if', 'not', 'ref_gid', ':', 'ref_gid', '=', 'self', '.', 'nextRefAnalysesGroupID', '(', 'duplicate', '.', 'getRequest', '(', ')', ')', 'duplicate', '.', 'setReferenceAnalysesGroupID', '(', 'ref_gid', ')', '# Add the duplicate into the worksheet', 'self', '.', 'addToLayout', '(', 'duplicate', ',', 'destination_slot', ')', 'self', '.', 'setAnalyses', '(', 'self', '.', 'getAnalyses', '(', ')', '+', '[', 'duplicate', ',', ']', ')', '# Reindex', 'duplicate', '.', 'reindexObject', '(', 'idxs', '=', '[', '"getAnalyst"', ',', '"getWorksheetUID"', ',', '"getReferenceAnalysesGroupID"', ']', ')', 'self', '.', 'reindexObject', '(', 'idxs', '=', '[', '"getAnalysesUIDs"', ']', ')', 'return', 'duplicate']
Creates a duplicate of the src_analysis passed in. If the analysis passed in is not an IRoutineAnalysis, is retracted or has dependent services, returns None.If no reference analyses group id (ref_gid) is set, the value will be generated automatically. :param src_analysis: analysis to create a duplicate from :param destination_slot: slot where duplicate analysis must be stored :param ref_gid: the reference analysis group id to be set :return: the duplicate analysis or None
['Creates', 'a', 'duplicate', 'of', 'the', 'src_analysis', 'passed', 'in', '.', 'If', 'the', 'analysis', 'passed', 'in', 'is', 'not', 'an', 'IRoutineAnalysis', 'is', 'retracted', 'or', 'has', 'dependent', 'services', 'returns', 'None', '.', 'If', 'no', 'reference', 'analyses', 'group', 'id', '(', 'ref_gid', ')', 'is', 'set', 'the', 'value', 'will', 'be', 'generated', 'automatically', '.', ':', 'param', 'src_analysis', ':', 'analysis', 'to', 'create', 'a', 'duplicate', 'from', ':', 'param', 'destination_slot', ':', 'slot', 'where', 'duplicate', 'analysis', 'must', 'be', 'stored', ':', 'param', 'ref_gid', ':', 'the', 'reference', 'analysis', 'group', 'id', 'to', 'be', 'set', ':', 'return', ':', 'the', 'duplicate', 'analysis', 'or', 'None']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/worksheet.py#L475-L530
5,354
treycucco/bidon
bidon/util/transform.py
get_composition
def get_composition(source, *fxns): """Compose several extractors together, on a source.""" val = source for fxn in fxns: val = fxn(val) return val
python
def get_composition(source, *fxns): """Compose several extractors together, on a source.""" val = source for fxn in fxns: val = fxn(val) return val
['def', 'get_composition', '(', 'source', ',', '*', 'fxns', ')', ':', 'val', '=', 'source', 'for', 'fxn', 'in', 'fxns', ':', 'val', '=', 'fxn', '(', 'val', ')', 'return', 'val']
Compose several extractors together, on a source.
['Compose', 'several', 'extractors', 'together', 'on', 'a', 'source', '.']
train
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/util/transform.py#L65-L70
5,355
pgjones/quart
quart/app.py
Quart.after_websocket
def after_websocket(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable: """Add an after websocket function. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.after_websocket def func(response): return response Arguments: func: The after websocket function itself. name: Optional blueprint key name. """ handler = ensure_coroutine(func) self.after_websocket_funcs[name].append(handler) return func
python
def after_websocket(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable: """Add an after websocket function. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.after_websocket def func(response): return response Arguments: func: The after websocket function itself. name: Optional blueprint key name. """ handler = ensure_coroutine(func) self.after_websocket_funcs[name].append(handler) return func
['def', 'after_websocket', '(', 'self', ',', 'func', ':', 'Callable', ',', 'name', ':', 'AppOrBlueprintKey', '=', 'None', ')', '->', 'Callable', ':', 'handler', '=', 'ensure_coroutine', '(', 'func', ')', 'self', '.', 'after_websocket_funcs', '[', 'name', ']', '.', 'append', '(', 'handler', ')', 'return', 'func']
Add an after websocket function. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.after_websocket def func(response): return response Arguments: func: The after websocket function itself. name: Optional blueprint key name.
['Add', 'an', 'after', 'websocket', 'function', '.']
train
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/app.py#L1104-L1121
5,356
HazyResearch/fonduer
src/fonduer/features/feature_libs/table_features.py
tablelib_binary_features
def tablelib_binary_features(span1, span2): """ Table-/structure-related features for a pair of spans """ binary_features = settings["featurization"]["table"]["binary_features"] if span1.sentence.is_tabular() and span2.sentence.is_tabular(): if span1.sentence.table == span2.sentence.table: yield "SAME_TABLE", DEF_VALUE if span1.sentence.cell is not None and span2.sentence.cell is not None: row_diff = min_row_diff( span1.sentence, span2.sentence, absolute=binary_features["min_row_diff"]["absolute"], ) col_diff = min_col_diff( span1.sentence, span2.sentence, absolute=binary_features["min_col_diff"]["absolute"], ) yield f"SAME_TABLE_ROW_DIFF_[{row_diff}]", DEF_VALUE yield f"SAME_TABLE_COL_DIFF_[{col_diff}]", DEF_VALUE yield ( f"SAME_TABLE_MANHATTAN_DIST_[{abs(row_diff) + abs(col_diff)}]" ), DEF_VALUE if span1.sentence.cell == span2.sentence.cell: yield "SAME_CELL", DEF_VALUE yield ( f"WORD_DIFF_[" f"{span1.get_word_start_index() - span2.get_word_start_index()}" f"]" ), DEF_VALUE yield ( f"CHAR_DIFF_[{span1.char_start - span2.char_start}]" ), DEF_VALUE if span1.sentence == span2.sentence: yield "SAME_SENTENCE", DEF_VALUE else: if span1.sentence.cell is not None and span2.sentence.cell is not None: yield "DIFF_TABLE", DEF_VALUE row_diff = min_row_diff( span1.sentence, span2.sentence, absolute=binary_features["min_row_diff"]["absolute"], ) col_diff = min_col_diff( span1.sentence, span2.sentence, absolute=binary_features["min_col_diff"]["absolute"], ) yield f"DIFF_TABLE_ROW_DIFF_[{row_diff}]", DEF_VALUE yield f"DIFF_TABLE_COL_DIFF_[{col_diff}]", DEF_VALUE yield ( f"DIFF_TABLE_MANHATTAN_DIST_[{abs(row_diff) + abs(col_diff)}]" ), DEF_VALUE
python
def tablelib_binary_features(span1, span2): """ Table-/structure-related features for a pair of spans """ binary_features = settings["featurization"]["table"]["binary_features"] if span1.sentence.is_tabular() and span2.sentence.is_tabular(): if span1.sentence.table == span2.sentence.table: yield "SAME_TABLE", DEF_VALUE if span1.sentence.cell is not None and span2.sentence.cell is not None: row_diff = min_row_diff( span1.sentence, span2.sentence, absolute=binary_features["min_row_diff"]["absolute"], ) col_diff = min_col_diff( span1.sentence, span2.sentence, absolute=binary_features["min_col_diff"]["absolute"], ) yield f"SAME_TABLE_ROW_DIFF_[{row_diff}]", DEF_VALUE yield f"SAME_TABLE_COL_DIFF_[{col_diff}]", DEF_VALUE yield ( f"SAME_TABLE_MANHATTAN_DIST_[{abs(row_diff) + abs(col_diff)}]" ), DEF_VALUE if span1.sentence.cell == span2.sentence.cell: yield "SAME_CELL", DEF_VALUE yield ( f"WORD_DIFF_[" f"{span1.get_word_start_index() - span2.get_word_start_index()}" f"]" ), DEF_VALUE yield ( f"CHAR_DIFF_[{span1.char_start - span2.char_start}]" ), DEF_VALUE if span1.sentence == span2.sentence: yield "SAME_SENTENCE", DEF_VALUE else: if span1.sentence.cell is not None and span2.sentence.cell is not None: yield "DIFF_TABLE", DEF_VALUE row_diff = min_row_diff( span1.sentence, span2.sentence, absolute=binary_features["min_row_diff"]["absolute"], ) col_diff = min_col_diff( span1.sentence, span2.sentence, absolute=binary_features["min_col_diff"]["absolute"], ) yield f"DIFF_TABLE_ROW_DIFF_[{row_diff}]", DEF_VALUE yield f"DIFF_TABLE_COL_DIFF_[{col_diff}]", DEF_VALUE yield ( f"DIFF_TABLE_MANHATTAN_DIST_[{abs(row_diff) + abs(col_diff)}]" ), DEF_VALUE
['def', 'tablelib_binary_features', '(', 'span1', ',', 'span2', ')', ':', 'binary_features', '=', 'settings', '[', '"featurization"', ']', '[', '"table"', ']', '[', '"binary_features"', ']', 'if', 'span1', '.', 'sentence', '.', 'is_tabular', '(', ')', 'and', 'span2', '.', 'sentence', '.', 'is_tabular', '(', ')', ':', 'if', 'span1', '.', 'sentence', '.', 'table', '==', 'span2', '.', 'sentence', '.', 'table', ':', 'yield', '"SAME_TABLE"', ',', 'DEF_VALUE', 'if', 'span1', '.', 'sentence', '.', 'cell', 'is', 'not', 'None', 'and', 'span2', '.', 'sentence', '.', 'cell', 'is', 'not', 'None', ':', 'row_diff', '=', 'min_row_diff', '(', 'span1', '.', 'sentence', ',', 'span2', '.', 'sentence', ',', 'absolute', '=', 'binary_features', '[', '"min_row_diff"', ']', '[', '"absolute"', ']', ',', ')', 'col_diff', '=', 'min_col_diff', '(', 'span1', '.', 'sentence', ',', 'span2', '.', 'sentence', ',', 'absolute', '=', 'binary_features', '[', '"min_col_diff"', ']', '[', '"absolute"', ']', ',', ')', 'yield', 'f"SAME_TABLE_ROW_DIFF_[{row_diff}]"', ',', 'DEF_VALUE', 'yield', 'f"SAME_TABLE_COL_DIFF_[{col_diff}]"', ',', 'DEF_VALUE', 'yield', '(', 'f"SAME_TABLE_MANHATTAN_DIST_[{abs(row_diff) + abs(col_diff)}]"', ')', ',', 'DEF_VALUE', 'if', 'span1', '.', 'sentence', '.', 'cell', '==', 'span2', '.', 'sentence', '.', 'cell', ':', 'yield', '"SAME_CELL"', ',', 'DEF_VALUE', 'yield', '(', 'f"WORD_DIFF_["', 'f"{span1.get_word_start_index() - span2.get_word_start_index()}"', 'f"]"', ')', ',', 'DEF_VALUE', 'yield', '(', 'f"CHAR_DIFF_[{span1.char_start - span2.char_start}]"', ')', ',', 'DEF_VALUE', 'if', 'span1', '.', 'sentence', '==', 'span2', '.', 'sentence', ':', 'yield', '"SAME_SENTENCE"', ',', 'DEF_VALUE', 'else', ':', 'if', 'span1', '.', 'sentence', '.', 'cell', 'is', 'not', 'None', 'and', 'span2', '.', 'sentence', '.', 'cell', 'is', 'not', 'None', ':', 'yield', '"DIFF_TABLE"', ',', 'DEF_VALUE', 'row_diff', '=', 'min_row_diff', '(', 'span1', '.', 'sentence', ',', 'span2', '.', 'sentence', ',', 'absolute', '=', 'binary_features', '[', '"min_row_diff"', ']', '[', '"absolute"', ']', ',', ')', 'col_diff', '=', 'min_col_diff', '(', 'span1', '.', 'sentence', ',', 'span2', '.', 'sentence', ',', 'absolute', '=', 'binary_features', '[', '"min_col_diff"', ']', '[', '"absolute"', ']', ',', ')', 'yield', 'f"DIFF_TABLE_ROW_DIFF_[{row_diff}]"', ',', 'DEF_VALUE', 'yield', 'f"DIFF_TABLE_COL_DIFF_[{col_diff}]"', ',', 'DEF_VALUE', 'yield', '(', 'f"DIFF_TABLE_MANHATTAN_DIST_[{abs(row_diff) + abs(col_diff)}]"', ')', ',', 'DEF_VALUE']
Table-/structure-related features for a pair of spans
['Table', '-', '/', 'structure', '-', 'related', 'features', 'for', 'a', 'pair', 'of', 'spans']
train
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/features/feature_libs/table_features.py#L128-L181
5,357
blockstack/blockstack-files
blockstack_file/blockstack_file.py
file_sign
def file_sign( blockchain_id, hostname, input_path, passphrase=None, config_path=CONFIG_PATH, wallet_keys=None ): """ Sign a file with the current blockchain ID's host's public key. @config_path should be for the *client*, not blockstack-file Return {'status': True, 'sender_key_id': ..., 'sig': ...} on success, and write ciphertext to output_path Return {'error': ...} on error """ config_dir = os.path.dirname(config_path) # find our encryption key key_info = file_key_lookup( blockchain_id, 0, hostname, config_path=config_path, wallet_keys=wallet_keys ) if 'error' in key_info: return {'error': 'Failed to lookup encryption key'} # sign res = blockstack_gpg.gpg_sign( input_path, key_info, config_dir=config_dir ) if 'error' in res: log.error("Failed to encrypt: %s" % res['error']) return {'error': 'Failed to encrypt'} return {'status': True, 'sender_key_id': key_info['key_id'], 'sig': res['sig']}
python
def file_sign( blockchain_id, hostname, input_path, passphrase=None, config_path=CONFIG_PATH, wallet_keys=None ): """ Sign a file with the current blockchain ID's host's public key. @config_path should be for the *client*, not blockstack-file Return {'status': True, 'sender_key_id': ..., 'sig': ...} on success, and write ciphertext to output_path Return {'error': ...} on error """ config_dir = os.path.dirname(config_path) # find our encryption key key_info = file_key_lookup( blockchain_id, 0, hostname, config_path=config_path, wallet_keys=wallet_keys ) if 'error' in key_info: return {'error': 'Failed to lookup encryption key'} # sign res = blockstack_gpg.gpg_sign( input_path, key_info, config_dir=config_dir ) if 'error' in res: log.error("Failed to encrypt: %s" % res['error']) return {'error': 'Failed to encrypt'} return {'status': True, 'sender_key_id': key_info['key_id'], 'sig': res['sig']}
['def', 'file_sign', '(', 'blockchain_id', ',', 'hostname', ',', 'input_path', ',', 'passphrase', '=', 'None', ',', 'config_path', '=', 'CONFIG_PATH', ',', 'wallet_keys', '=', 'None', ')', ':', 'config_dir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'config_path', ')', '# find our encryption key', 'key_info', '=', 'file_key_lookup', '(', 'blockchain_id', ',', '0', ',', 'hostname', ',', 'config_path', '=', 'config_path', ',', 'wallet_keys', '=', 'wallet_keys', ')', 'if', "'error'", 'in', 'key_info', ':', 'return', '{', "'error'", ':', "'Failed to lookup encryption key'", '}', '# sign', 'res', '=', 'blockstack_gpg', '.', 'gpg_sign', '(', 'input_path', ',', 'key_info', ',', 'config_dir', '=', 'config_dir', ')', 'if', "'error'", 'in', 'res', ':', 'log', '.', 'error', '(', '"Failed to encrypt: %s"', '%', 'res', '[', "'error'", ']', ')', 'return', '{', "'error'", ':', "'Failed to encrypt'", '}', 'return', '{', "'status'", ':', 'True', ',', "'sender_key_id'", ':', 'key_info', '[', "'key_id'", ']', ',', "'sig'", ':', 'res', '[', "'sig'", ']', '}']
Sign a file with the current blockchain ID's host's public key. @config_path should be for the *client*, not blockstack-file Return {'status': True, 'sender_key_id': ..., 'sig': ...} on success, and write ciphertext to output_path Return {'error': ...} on error
['Sign', 'a', 'file', 'with', 'the', 'current', 'blockchain', 'ID', 's', 'host', 's', 'public', 'key', '.']
train
https://github.com/blockstack/blockstack-files/blob/8d88cc48bdf8ed57f17d4bba860e972bde321921/blockstack_file/blockstack_file.py#L419-L439
5,358
twitterdev/twitter-python-ads-sdk
twitter_ads/cursor.py
Cursor.next
def next(self): """Returns the next item in the cursor.""" if self._current_index < len(self._collection): value = self._collection[self._current_index] self._current_index += 1 return value elif self._next_cursor: self.__fetch_next() return self.next() else: self._current_index = 0 raise StopIteration
python
def next(self): """Returns the next item in the cursor.""" if self._current_index < len(self._collection): value = self._collection[self._current_index] self._current_index += 1 return value elif self._next_cursor: self.__fetch_next() return self.next() else: self._current_index = 0 raise StopIteration
['def', 'next', '(', 'self', ')', ':', 'if', 'self', '.', '_current_index', '<', 'len', '(', 'self', '.', '_collection', ')', ':', 'value', '=', 'self', '.', '_collection', '[', 'self', '.', '_current_index', ']', 'self', '.', '_current_index', '+=', '1', 'return', 'value', 'elif', 'self', '.', '_next_cursor', ':', 'self', '.', '__fetch_next', '(', ')', 'return', 'self', '.', 'next', '(', ')', 'else', ':', 'self', '.', '_current_index', '=', '0', 'raise', 'StopIteration']
Returns the next item in the cursor.
['Returns', 'the', 'next', 'item', 'in', 'the', 'cursor', '.']
train
https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/cursor.py#L62-L73
5,359
Chilipp/psyplot
psyplot/data.py
InteractiveList.start_update
def start_update(self, draw=None, queues=None): """ Conduct the formerly registered updates This method conducts the updates that have been registered via the :meth:`update` method. You can call this method if the :attr:`auto_update` attribute of this instance is True and the `auto_update` parameter in the :meth:`update` method has been set to False Parameters ---------- %(InteractiveBase.start_update.parameters)s Returns ------- %(InteractiveBase.start_update.returns)s See Also -------- :attr:`no_auto_update`, update """ if queues is not None: queues[0].get() try: for arr in self: arr.psy.start_update(draw=False) self.onupdate.emit() except Exception: self._finish_all(queues) raise if queues is not None: queues[0].task_done() return InteractiveBase.start_update(self, draw=draw, queues=queues)
python
def start_update(self, draw=None, queues=None): """ Conduct the formerly registered updates This method conducts the updates that have been registered via the :meth:`update` method. You can call this method if the :attr:`auto_update` attribute of this instance is True and the `auto_update` parameter in the :meth:`update` method has been set to False Parameters ---------- %(InteractiveBase.start_update.parameters)s Returns ------- %(InteractiveBase.start_update.returns)s See Also -------- :attr:`no_auto_update`, update """ if queues is not None: queues[0].get() try: for arr in self: arr.psy.start_update(draw=False) self.onupdate.emit() except Exception: self._finish_all(queues) raise if queues is not None: queues[0].task_done() return InteractiveBase.start_update(self, draw=draw, queues=queues)
['def', 'start_update', '(', 'self', ',', 'draw', '=', 'None', ',', 'queues', '=', 'None', ')', ':', 'if', 'queues', 'is', 'not', 'None', ':', 'queues', '[', '0', ']', '.', 'get', '(', ')', 'try', ':', 'for', 'arr', 'in', 'self', ':', 'arr', '.', 'psy', '.', 'start_update', '(', 'draw', '=', 'False', ')', 'self', '.', 'onupdate', '.', 'emit', '(', ')', 'except', 'Exception', ':', 'self', '.', '_finish_all', '(', 'queues', ')', 'raise', 'if', 'queues', 'is', 'not', 'None', ':', 'queues', '[', '0', ']', '.', 'task_done', '(', ')', 'return', 'InteractiveBase', '.', 'start_update', '(', 'self', ',', 'draw', '=', 'draw', ',', 'queues', '=', 'queues', ')']
Conduct the formerly registered updates This method conducts the updates that have been registered via the :meth:`update` method. You can call this method if the :attr:`auto_update` attribute of this instance is True and the `auto_update` parameter in the :meth:`update` method has been set to False Parameters ---------- %(InteractiveBase.start_update.parameters)s Returns ------- %(InteractiveBase.start_update.returns)s See Also -------- :attr:`no_auto_update`, update
['Conduct', 'the', 'formerly', 'registered', 'updates']
train
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L4616-L4649
5,360
summa-tx/riemann
riemann/encoding/bech32.py
segwit_encode
def segwit_encode(hrp, witver, witprog): """Encode a segwit address.""" ret = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5)) if segwit_decode(hrp, ret) == (None, None): return None return ret
python
def segwit_encode(hrp, witver, witprog): """Encode a segwit address.""" ret = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5)) if segwit_decode(hrp, ret) == (None, None): return None return ret
['def', 'segwit_encode', '(', 'hrp', ',', 'witver', ',', 'witprog', ')', ':', 'ret', '=', 'bech32_encode', '(', 'hrp', ',', '[', 'witver', ']', '+', 'convertbits', '(', 'witprog', ',', '8', ',', '5', ')', ')', 'if', 'segwit_decode', '(', 'hrp', ',', 'ret', ')', '==', '(', 'None', ',', 'None', ')', ':', 'return', 'None', 'return', 'ret']
Encode a segwit address.
['Encode', 'a', 'segwit', 'address', '.']
train
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/bech32.py#L69-L74
5,361
curious-containers/cc-core
cc_core/commons/input_references.py
_partition_all_internal
def _partition_all_internal(s, sep): """ Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings. :param s: The string to split. :param sep: A separator string. :return: A list of parts split by sep """ parts = list(s.partition(sep)) # if sep found if parts[1] == sep: new_parts = partition_all(parts[2], sep) parts.pop() parts.extend(new_parts) return [p for p in parts if p] else: if parts[0]: return [parts[0]] else: return []
python
def _partition_all_internal(s, sep): """ Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings. :param s: The string to split. :param sep: A separator string. :return: A list of parts split by sep """ parts = list(s.partition(sep)) # if sep found if parts[1] == sep: new_parts = partition_all(parts[2], sep) parts.pop() parts.extend(new_parts) return [p for p in parts if p] else: if parts[0]: return [parts[0]] else: return []
['def', '_partition_all_internal', '(', 's', ',', 'sep', ')', ':', 'parts', '=', 'list', '(', 's', '.', 'partition', '(', 'sep', ')', ')', '# if sep found', 'if', 'parts', '[', '1', ']', '==', 'sep', ':', 'new_parts', '=', 'partition_all', '(', 'parts', '[', '2', ']', ',', 'sep', ')', 'parts', '.', 'pop', '(', ')', 'parts', '.', 'extend', '(', 'new_parts', ')', 'return', '[', 'p', 'for', 'p', 'in', 'parts', 'if', 'p', ']', 'else', ':', 'if', 'parts', '[', '0', ']', ':', 'return', '[', 'parts', '[', '0', ']', ']', 'else', ':', 'return', '[', ']']
Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings. :param s: The string to split. :param sep: A separator string. :return: A list of parts split by sep
['Uses', 'str', '.', 'partition', '()', 'to', 'split', 'every', 'occurrence', 'of', 'sep', 'in', 's', '.', 'The', 'returned', 'list', 'does', 'not', 'contain', 'empty', 'strings', '.']
train
https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/input_references.py#L35-L55
5,362
rsmuc/health_monitoring_plugins
health_monitoring_plugins/snmpSessionBaseClass.py
state_summary
def state_summary(value, name, state_list, helper, ok_value = 'ok', info = None): """ Always add the status to the long output, and if the status is not ok (or ok_value), we show it in the summary and set the status to critical """ # translate the value (integer) we receive to a human readable value (e.g. ok, critical etc.) with the given state_list state_value = state_list[int(value)] summary_output = '' long_output = '' if not info: info = '' if state_value != ok_value: summary_output += ('%s status: %s %s ' % (name, state_value, info)) helper.status(pynag.Plugins.critical) long_output += ('%s status: %s %s\n' % (name, state_value, info)) return (summary_output, long_output)
python
def state_summary(value, name, state_list, helper, ok_value = 'ok', info = None): """ Always add the status to the long output, and if the status is not ok (or ok_value), we show it in the summary and set the status to critical """ # translate the value (integer) we receive to a human readable value (e.g. ok, critical etc.) with the given state_list state_value = state_list[int(value)] summary_output = '' long_output = '' if not info: info = '' if state_value != ok_value: summary_output += ('%s status: %s %s ' % (name, state_value, info)) helper.status(pynag.Plugins.critical) long_output += ('%s status: %s %s\n' % (name, state_value, info)) return (summary_output, long_output)
['def', 'state_summary', '(', 'value', ',', 'name', ',', 'state_list', ',', 'helper', ',', 'ok_value', '=', "'ok'", ',', 'info', '=', 'None', ')', ':', '# translate the value (integer) we receive to a human readable value (e.g. ok, critical etc.) with the given state_list', 'state_value', '=', 'state_list', '[', 'int', '(', 'value', ')', ']', 'summary_output', '=', "''", 'long_output', '=', "''", 'if', 'not', 'info', ':', 'info', '=', "''", 'if', 'state_value', '!=', 'ok_value', ':', 'summary_output', '+=', '(', "'%s status: %s %s '", '%', '(', 'name', ',', 'state_value', ',', 'info', ')', ')', 'helper', '.', 'status', '(', 'pynag', '.', 'Plugins', '.', 'critical', ')', 'long_output', '+=', '(', "'%s status: %s %s\\n'", '%', '(', 'name', ',', 'state_value', ',', 'info', ')', ')', 'return', '(', 'summary_output', ',', 'long_output', ')']
Always add the status to the long output, and if the status is not ok (or ok_value), we show it in the summary and set the status to critical
['Always', 'add', 'the', 'status', 'to', 'the', 'long', 'output', 'and', 'if', 'the', 'status', 'is', 'not', 'ok', '(', 'or', 'ok_value', ')', 'we', 'show', 'it', 'in', 'the', 'summary', 'and', 'set', 'the', 'status', 'to', 'critical']
train
https://github.com/rsmuc/health_monitoring_plugins/blob/7ac29dfb9fe46c055b018cb72ad0d7d8065589b9/health_monitoring_plugins/snmpSessionBaseClass.py#L152-L167
5,363
GNS3/gns3-server
gns3server/compute/qemu/qemu_vm.py
QemuVM._graphic
def _graphic(self): """ Adds the correct graphic options depending of the OS """ if sys.platform.startswith("win"): return [] if len(os.environ.get("DISPLAY", "")) > 0: return [] if "-nographic" not in self._options: return ["-nographic"] return []
python
def _graphic(self): """ Adds the correct graphic options depending of the OS """ if sys.platform.startswith("win"): return [] if len(os.environ.get("DISPLAY", "")) > 0: return [] if "-nographic" not in self._options: return ["-nographic"] return []
['def', '_graphic', '(', 'self', ')', ':', 'if', 'sys', '.', 'platform', '.', 'startswith', '(', '"win"', ')', ':', 'return', '[', ']', 'if', 'len', '(', 'os', '.', 'environ', '.', 'get', '(', '"DISPLAY"', ',', '""', ')', ')', '>', '0', ':', 'return', '[', ']', 'if', '"-nographic"', 'not', 'in', 'self', '.', '_options', ':', 'return', '[', '"-nographic"', ']', 'return', '[', ']']
Adds the correct graphic options depending of the OS
['Adds', 'the', 'correct', 'graphic', 'options', 'depending', 'of', 'the', 'OS']
train
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/qemu/qemu_vm.py#L1603-L1614
5,364
bcbio/bcbio-nextgen
bcbio/broad/picardrun.py
picard_sort
def picard_sort(picard, align_bam, sort_order="coordinate", out_file=None, compression_level=None, pipe=False): """Sort a BAM file by coordinates. """ base, ext = os.path.splitext(align_bam) if out_file is None: out_file = "%s-sort%s" % (base, ext) if not file_exists(out_file): with tx_tmpdir(picard._config) as tmp_dir: with file_transaction(picard._config, out_file) as tx_out_file: opts = [("INPUT", align_bam), ("OUTPUT", out_file if pipe else tx_out_file), ("TMP_DIR", tmp_dir), ("SORT_ORDER", sort_order)] if compression_level: opts.append(("COMPRESSION_LEVEL", compression_level)) picard.run("SortSam", opts, pipe=pipe) return out_file
python
def picard_sort(picard, align_bam, sort_order="coordinate", out_file=None, compression_level=None, pipe=False): """Sort a BAM file by coordinates. """ base, ext = os.path.splitext(align_bam) if out_file is None: out_file = "%s-sort%s" % (base, ext) if not file_exists(out_file): with tx_tmpdir(picard._config) as tmp_dir: with file_transaction(picard._config, out_file) as tx_out_file: opts = [("INPUT", align_bam), ("OUTPUT", out_file if pipe else tx_out_file), ("TMP_DIR", tmp_dir), ("SORT_ORDER", sort_order)] if compression_level: opts.append(("COMPRESSION_LEVEL", compression_level)) picard.run("SortSam", opts, pipe=pipe) return out_file
['def', 'picard_sort', '(', 'picard', ',', 'align_bam', ',', 'sort_order', '=', '"coordinate"', ',', 'out_file', '=', 'None', ',', 'compression_level', '=', 'None', ',', 'pipe', '=', 'False', ')', ':', 'base', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'align_bam', ')', 'if', 'out_file', 'is', 'None', ':', 'out_file', '=', '"%s-sort%s"', '%', '(', 'base', ',', 'ext', ')', 'if', 'not', 'file_exists', '(', 'out_file', ')', ':', 'with', 'tx_tmpdir', '(', 'picard', '.', '_config', ')', 'as', 'tmp_dir', ':', 'with', 'file_transaction', '(', 'picard', '.', '_config', ',', 'out_file', ')', 'as', 'tx_out_file', ':', 'opts', '=', '[', '(', '"INPUT"', ',', 'align_bam', ')', ',', '(', '"OUTPUT"', ',', 'out_file', 'if', 'pipe', 'else', 'tx_out_file', ')', ',', '(', '"TMP_DIR"', ',', 'tmp_dir', ')', ',', '(', '"SORT_ORDER"', ',', 'sort_order', ')', ']', 'if', 'compression_level', ':', 'opts', '.', 'append', '(', '(', '"COMPRESSION_LEVEL"', ',', 'compression_level', ')', ')', 'picard', '.', 'run', '(', '"SortSam"', ',', 'opts', ',', 'pipe', '=', 'pipe', ')', 'return', 'out_file']
Sort a BAM file by coordinates.
['Sort', 'a', 'BAM', 'file', 'by', 'coordinates', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L46-L63
5,365
MillionIntegrals/vel
vel/rl/reinforcers/buffered_off_policy_iteration_reinforcer.py
BufferedOffPolicyIterationReinforcer.roll_out_and_store
def roll_out_and_store(self, batch_info): """ Roll out environment and store result in the replay buffer """ self.model.train() if self.env_roller.is_ready_for_sampling(): rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device) # Store some information about the rollout, no training phase batch_info['frames'] = rollout.frames() batch_info['episode_infos'] = rollout.episode_information() else: frames = 0 episode_infos = [] with tqdm.tqdm(desc="Populating memory", total=self.env_roller.initial_memory_size_hint()) as pbar: while not self.env_roller.is_ready_for_sampling(): rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device) new_frames = rollout.frames() frames += new_frames episode_infos.extend(rollout.episode_information()) pbar.update(new_frames) # Store some information about the rollout, no training phase batch_info['frames'] = frames batch_info['episode_infos'] = episode_infos
python
def roll_out_and_store(self, batch_info): """ Roll out environment and store result in the replay buffer """ self.model.train() if self.env_roller.is_ready_for_sampling(): rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device) # Store some information about the rollout, no training phase batch_info['frames'] = rollout.frames() batch_info['episode_infos'] = rollout.episode_information() else: frames = 0 episode_infos = [] with tqdm.tqdm(desc="Populating memory", total=self.env_roller.initial_memory_size_hint()) as pbar: while not self.env_roller.is_ready_for_sampling(): rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device) new_frames = rollout.frames() frames += new_frames episode_infos.extend(rollout.episode_information()) pbar.update(new_frames) # Store some information about the rollout, no training phase batch_info['frames'] = frames batch_info['episode_infos'] = episode_infos
['def', 'roll_out_and_store', '(', 'self', ',', 'batch_info', ')', ':', 'self', '.', 'model', '.', 'train', '(', ')', 'if', 'self', '.', 'env_roller', '.', 'is_ready_for_sampling', '(', ')', ':', 'rollout', '=', 'self', '.', 'env_roller', '.', 'rollout', '(', 'batch_info', ',', 'self', '.', 'model', ',', 'self', '.', 'settings', '.', 'rollout_steps', ')', '.', 'to_device', '(', 'self', '.', 'device', ')', '# Store some information about the rollout, no training phase', 'batch_info', '[', "'frames'", ']', '=', 'rollout', '.', 'frames', '(', ')', 'batch_info', '[', "'episode_infos'", ']', '=', 'rollout', '.', 'episode_information', '(', ')', 'else', ':', 'frames', '=', '0', 'episode_infos', '=', '[', ']', 'with', 'tqdm', '.', 'tqdm', '(', 'desc', '=', '"Populating memory"', ',', 'total', '=', 'self', '.', 'env_roller', '.', 'initial_memory_size_hint', '(', ')', ')', 'as', 'pbar', ':', 'while', 'not', 'self', '.', 'env_roller', '.', 'is_ready_for_sampling', '(', ')', ':', 'rollout', '=', 'self', '.', 'env_roller', '.', 'rollout', '(', 'batch_info', ',', 'self', '.', 'model', ',', 'self', '.', 'settings', '.', 'rollout_steps', ')', '.', 'to_device', '(', 'self', '.', 'device', ')', 'new_frames', '=', 'rollout', '.', 'frames', '(', ')', 'frames', '+=', 'new_frames', 'episode_infos', '.', 'extend', '(', 'rollout', '.', 'episode_information', '(', ')', ')', 'pbar', '.', 'update', '(', 'new_frames', ')', '# Store some information about the rollout, no training phase', 'batch_info', '[', "'frames'", ']', '=', 'frames', 'batch_info', '[', "'episode_infos'", ']', '=', 'episode_infos']
Roll out environment and store result in the replay buffer
['Roll', 'out', 'environment', 'and', 'store', 'result', 'in', 'the', 'replay', 'buffer']
train
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/reinforcers/buffered_off_policy_iteration_reinforcer.py#L109-L135
5,366
GNS3/gns3-server
gns3server/compute/vpcs/__init__.py
VPCS.close_node
def close_node(self, node_id, *args, **kwargs): """ Closes a VPCS VM. :returns: VPCSVM instance """ node = self.get_node(node_id) if node_id in self._used_mac_ids: i = self._used_mac_ids[node_id] self._free_mac_ids[node.project.id].insert(0, i) del self._used_mac_ids[node_id] yield from super().close_node(node_id, *args, **kwargs) return node
python
def close_node(self, node_id, *args, **kwargs): """ Closes a VPCS VM. :returns: VPCSVM instance """ node = self.get_node(node_id) if node_id in self._used_mac_ids: i = self._used_mac_ids[node_id] self._free_mac_ids[node.project.id].insert(0, i) del self._used_mac_ids[node_id] yield from super().close_node(node_id, *args, **kwargs) return node
['def', 'close_node', '(', 'self', ',', 'node_id', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'node', '=', 'self', '.', 'get_node', '(', 'node_id', ')', 'if', 'node_id', 'in', 'self', '.', '_used_mac_ids', ':', 'i', '=', 'self', '.', '_used_mac_ids', '[', 'node_id', ']', 'self', '.', '_free_mac_ids', '[', 'node', '.', 'project', '.', 'id', ']', '.', 'insert', '(', '0', ',', 'i', ')', 'del', 'self', '.', '_used_mac_ids', '[', 'node_id', ']', 'yield', 'from', 'super', '(', ')', '.', 'close_node', '(', 'node_id', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'node']
Closes a VPCS VM. :returns: VPCSVM instance
['Closes', 'a', 'VPCS', 'VM', '.']
train
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vpcs/__init__.py#L57-L70
5,367
reichlab/pymmwr
pymmwr.py
date_to_epiweek
def date_to_epiweek(date=datetime.date.today()) -> Epiweek: """ Convert python date to Epiweek """ year = date.year start_dates = list(map(_start_date_of_year, [year - 1, year, year + 1])) start_date = start_dates[1] if start_dates[1] > date: start_date = start_dates[0] elif date >= start_dates[2]: start_date = start_dates[2] return Epiweek( year=(start_date + datetime.timedelta(days=7)).year, week=((date - start_date).days // 7) + 1, day=(date.isoweekday() % 7) + 1 )
python
def date_to_epiweek(date=datetime.date.today()) -> Epiweek: """ Convert python date to Epiweek """ year = date.year start_dates = list(map(_start_date_of_year, [year - 1, year, year + 1])) start_date = start_dates[1] if start_dates[1] > date: start_date = start_dates[0] elif date >= start_dates[2]: start_date = start_dates[2] return Epiweek( year=(start_date + datetime.timedelta(days=7)).year, week=((date - start_date).days // 7) + 1, day=(date.isoweekday() % 7) + 1 )
['def', 'date_to_epiweek', '(', 'date', '=', 'datetime', '.', 'date', '.', 'today', '(', ')', ')', '->', 'Epiweek', ':', 'year', '=', 'date', '.', 'year', 'start_dates', '=', 'list', '(', 'map', '(', '_start_date_of_year', ',', '[', 'year', '-', '1', ',', 'year', ',', 'year', '+', '1', ']', ')', ')', 'start_date', '=', 'start_dates', '[', '1', ']', 'if', 'start_dates', '[', '1', ']', '>', 'date', ':', 'start_date', '=', 'start_dates', '[', '0', ']', 'elif', 'date', '>=', 'start_dates', '[', '2', ']', ':', 'start_date', '=', 'start_dates', '[', '2', ']', 'return', 'Epiweek', '(', 'year', '=', '(', 'start_date', '+', 'datetime', '.', 'timedelta', '(', 'days', '=', '7', ')', ')', '.', 'year', ',', 'week', '=', '(', '(', 'date', '-', 'start_date', ')', '.', 'days', '//', '7', ')', '+', '1', ',', 'day', '=', '(', 'date', '.', 'isoweekday', '(', ')', '%', '7', ')', '+', '1', ')']
Convert python date to Epiweek
['Convert', 'python', 'date', 'to', 'Epiweek']
train
https://github.com/reichlab/pymmwr/blob/98216bd5081998ca63db89003c4ef397fe905755/pymmwr.py#L62-L80
5,368
inasafe/inasafe
safe/gui/widgets/message_viewer.py
MessageViewer.impact_path
def impact_path(self, value): """Setter to impact path. :param value: The impact path. :type value: str """ self._impact_path = value if value is None: self.action_show_report.setEnabled(False) self.action_show_log.setEnabled(False) self.report_path = None self.log_path = None else: self.action_show_report.setEnabled(True) self.action_show_log.setEnabled(True) self.log_path = '%s.log.html' % self.impact_path self.report_path = '%s.report.html' % self.impact_path self.save_report_to_html() self.save_log_to_html() self.show_report()
python
def impact_path(self, value): """Setter to impact path. :param value: The impact path. :type value: str """ self._impact_path = value if value is None: self.action_show_report.setEnabled(False) self.action_show_log.setEnabled(False) self.report_path = None self.log_path = None else: self.action_show_report.setEnabled(True) self.action_show_log.setEnabled(True) self.log_path = '%s.log.html' % self.impact_path self.report_path = '%s.report.html' % self.impact_path self.save_report_to_html() self.save_log_to_html() self.show_report()
['def', 'impact_path', '(', 'self', ',', 'value', ')', ':', 'self', '.', '_impact_path', '=', 'value', 'if', 'value', 'is', 'None', ':', 'self', '.', 'action_show_report', '.', 'setEnabled', '(', 'False', ')', 'self', '.', 'action_show_log', '.', 'setEnabled', '(', 'False', ')', 'self', '.', 'report_path', '=', 'None', 'self', '.', 'log_path', '=', 'None', 'else', ':', 'self', '.', 'action_show_report', '.', 'setEnabled', '(', 'True', ')', 'self', '.', 'action_show_log', '.', 'setEnabled', '(', 'True', ')', 'self', '.', 'log_path', '=', "'%s.log.html'", '%', 'self', '.', 'impact_path', 'self', '.', 'report_path', '=', "'%s.report.html'", '%', 'self', '.', 'impact_path', 'self', '.', 'save_report_to_html', '(', ')', 'self', '.', 'save_log_to_html', '(', ')', 'self', '.', 'show_report', '(', ')']
Setter to impact path. :param value: The impact path. :type value: str
['Setter', 'to', 'impact', 'path', '.']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/message_viewer.py#L103-L123
5,369
anayjoshi/cronus
cronus/beat.py
set_rate
def set_rate(rate): """Defines the ideal rate at which computation is to be performed :arg rate: the frequency in Hertz :type rate: int or float :raises: TypeError: if argument 'rate' is not int or float """ if not (isinstance(rate, int) or isinstance(rate, float)): raise TypeError("argument to set_rate is expected to be int or float") global loop_duration loop_duration = 1.0/rate
python
def set_rate(rate): """Defines the ideal rate at which computation is to be performed :arg rate: the frequency in Hertz :type rate: int or float :raises: TypeError: if argument 'rate' is not int or float """ if not (isinstance(rate, int) or isinstance(rate, float)): raise TypeError("argument to set_rate is expected to be int or float") global loop_duration loop_duration = 1.0/rate
['def', 'set_rate', '(', 'rate', ')', ':', 'if', 'not', '(', 'isinstance', '(', 'rate', ',', 'int', ')', 'or', 'isinstance', '(', 'rate', ',', 'float', ')', ')', ':', 'raise', 'TypeError', '(', '"argument to set_rate is expected to be int or float"', ')', 'global', 'loop_duration', 'loop_duration', '=', '1.0', '/', 'rate']
Defines the ideal rate at which computation is to be performed :arg rate: the frequency in Hertz :type rate: int or float :raises: TypeError: if argument 'rate' is not int or float
['Defines', 'the', 'ideal', 'rate', 'at', 'which', 'computation', 'is', 'to', 'be', 'performed']
train
https://github.com/anayjoshi/cronus/blob/52544e63913f37d7fca570168b878737f16fe39c/cronus/beat.py#L13-L24
5,370
gr33ndata/dysl
dysl/social.py
SocialLM.classify
def classify(self, text=u''): """ Predicts the Language of a given text. :param text: Unicode text to be classified. """ result = self.calculate(doc_terms=self.tokenize(text)) #return (result['calc_id'], result) return (result['calc_id'], self.karbasa(result))
python
def classify(self, text=u''): """ Predicts the Language of a given text. :param text: Unicode text to be classified. """ result = self.calculate(doc_terms=self.tokenize(text)) #return (result['calc_id'], result) return (result['calc_id'], self.karbasa(result))
['def', 'classify', '(', 'self', ',', 'text', '=', "u''", ')', ':', 'result', '=', 'self', '.', 'calculate', '(', 'doc_terms', '=', 'self', '.', 'tokenize', '(', 'text', ')', ')', "#return (result['calc_id'], result)", 'return', '(', 'result', '[', "'calc_id'", ']', ',', 'self', '.', 'karbasa', '(', 'result', ')', ')']
Predicts the Language of a given text. :param text: Unicode text to be classified.
['Predicts', 'the', 'Language', 'of', 'a', 'given', 'text', '.']
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L37-L44
5,371
limix/limix-core
limix_core/mean/mean_base.py
MeanBase.W
def W(self,value): """ set fixed effect design """ if value is None: value = sp.zeros((self._N, 0)) assert value.shape[0]==self._N, 'Dimension mismatch' self._K = value.shape[1] self._W = value self._notify() self.clear_cache('predict_in_sample','Yres')
python
def W(self,value): """ set fixed effect design """ if value is None: value = sp.zeros((self._N, 0)) assert value.shape[0]==self._N, 'Dimension mismatch' self._K = value.shape[1] self._W = value self._notify() self.clear_cache('predict_in_sample','Yres')
['def', 'W', '(', 'self', ',', 'value', ')', ':', 'if', 'value', 'is', 'None', ':', 'value', '=', 'sp', '.', 'zeros', '(', '(', 'self', '.', '_N', ',', '0', ')', ')', 'assert', 'value', '.', 'shape', '[', '0', ']', '==', 'self', '.', '_N', ',', "'Dimension mismatch'", 'self', '.', '_K', '=', 'value', '.', 'shape', '[', '1', ']', 'self', '.', '_W', '=', 'value', 'self', '.', '_notify', '(', ')', 'self', '.', 'clear_cache', '(', "'predict_in_sample'", ',', "'Yres'", ')']
set fixed effect design
['set', 'fixed', 'effect', 'design']
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/mean_base.py#L102-L109
5,372
saulpw/visidata
visidata/canvas.py
Canvas.render_sync
def render_sync(self): 'plots points and lines and text onto the Plotter' self.setZoom() bb = self.visibleBox xmin, ymin, xmax, ymax = bb.xmin, bb.ymin, bb.xmax, bb.ymax xfactor, yfactor = self.xScaler, self.yScaler plotxmin, plotymin = self.plotviewBox.xmin, self.plotviewBox.ymin for vertexes, attr, row in Progress(self.polylines, 'rendering'): if len(vertexes) == 1: # single point x1, y1 = vertexes[0] x1, y1 = float(x1), float(y1) if xmin <= x1 <= xmax and ymin <= y1 <= ymax: x = plotxmin+(x1-xmin)*xfactor y = plotymin+(y1-ymin)*yfactor self.plotpixel(round(x), round(y), attr, row) continue prev_x, prev_y = vertexes[0] for x, y in vertexes[1:]: r = clipline(prev_x, prev_y, x, y, xmin, ymin, xmax, ymax) if r: x1, y1, x2, y2 = r x1 = plotxmin+float(x1-xmin)*xfactor y1 = plotymin+float(y1-ymin)*yfactor x2 = plotxmin+float(x2-xmin)*xfactor y2 = plotymin+float(y2-ymin)*yfactor self.plotline(x1, y1, x2, y2, attr, row) prev_x, prev_y = x, y for x, y, text, attr, row in Progress(self.gridlabels, 'labeling'): self.plotlabel(self.scaleX(x), self.scaleY(y), text, attr, row)
python
def render_sync(self): 'plots points and lines and text onto the Plotter' self.setZoom() bb = self.visibleBox xmin, ymin, xmax, ymax = bb.xmin, bb.ymin, bb.xmax, bb.ymax xfactor, yfactor = self.xScaler, self.yScaler plotxmin, plotymin = self.plotviewBox.xmin, self.plotviewBox.ymin for vertexes, attr, row in Progress(self.polylines, 'rendering'): if len(vertexes) == 1: # single point x1, y1 = vertexes[0] x1, y1 = float(x1), float(y1) if xmin <= x1 <= xmax and ymin <= y1 <= ymax: x = plotxmin+(x1-xmin)*xfactor y = plotymin+(y1-ymin)*yfactor self.plotpixel(round(x), round(y), attr, row) continue prev_x, prev_y = vertexes[0] for x, y in vertexes[1:]: r = clipline(prev_x, prev_y, x, y, xmin, ymin, xmax, ymax) if r: x1, y1, x2, y2 = r x1 = plotxmin+float(x1-xmin)*xfactor y1 = plotymin+float(y1-ymin)*yfactor x2 = plotxmin+float(x2-xmin)*xfactor y2 = plotymin+float(y2-ymin)*yfactor self.plotline(x1, y1, x2, y2, attr, row) prev_x, prev_y = x, y for x, y, text, attr, row in Progress(self.gridlabels, 'labeling'): self.plotlabel(self.scaleX(x), self.scaleY(y), text, attr, row)
['def', 'render_sync', '(', 'self', ')', ':', 'self', '.', 'setZoom', '(', ')', 'bb', '=', 'self', '.', 'visibleBox', 'xmin', ',', 'ymin', ',', 'xmax', ',', 'ymax', '=', 'bb', '.', 'xmin', ',', 'bb', '.', 'ymin', ',', 'bb', '.', 'xmax', ',', 'bb', '.', 'ymax', 'xfactor', ',', 'yfactor', '=', 'self', '.', 'xScaler', ',', 'self', '.', 'yScaler', 'plotxmin', ',', 'plotymin', '=', 'self', '.', 'plotviewBox', '.', 'xmin', ',', 'self', '.', 'plotviewBox', '.', 'ymin', 'for', 'vertexes', ',', 'attr', ',', 'row', 'in', 'Progress', '(', 'self', '.', 'polylines', ',', "'rendering'", ')', ':', 'if', 'len', '(', 'vertexes', ')', '==', '1', ':', '# single point', 'x1', ',', 'y1', '=', 'vertexes', '[', '0', ']', 'x1', ',', 'y1', '=', 'float', '(', 'x1', ')', ',', 'float', '(', 'y1', ')', 'if', 'xmin', '<=', 'x1', '<=', 'xmax', 'and', 'ymin', '<=', 'y1', '<=', 'ymax', ':', 'x', '=', 'plotxmin', '+', '(', 'x1', '-', 'xmin', ')', '*', 'xfactor', 'y', '=', 'plotymin', '+', '(', 'y1', '-', 'ymin', ')', '*', 'yfactor', 'self', '.', 'plotpixel', '(', 'round', '(', 'x', ')', ',', 'round', '(', 'y', ')', ',', 'attr', ',', 'row', ')', 'continue', 'prev_x', ',', 'prev_y', '=', 'vertexes', '[', '0', ']', 'for', 'x', ',', 'y', 'in', 'vertexes', '[', '1', ':', ']', ':', 'r', '=', 'clipline', '(', 'prev_x', ',', 'prev_y', ',', 'x', ',', 'y', ',', 'xmin', ',', 'ymin', ',', 'xmax', ',', 'ymax', ')', 'if', 'r', ':', 'x1', ',', 'y1', ',', 'x2', ',', 'y2', '=', 'r', 'x1', '=', 'plotxmin', '+', 'float', '(', 'x1', '-', 'xmin', ')', '*', 'xfactor', 'y1', '=', 'plotymin', '+', 'float', '(', 'y1', '-', 'ymin', ')', '*', 'yfactor', 'x2', '=', 'plotxmin', '+', 'float', '(', 'x2', '-', 'xmin', ')', '*', 'xfactor', 'y2', '=', 'plotymin', '+', 'float', '(', 'y2', '-', 'ymin', ')', '*', 'yfactor', 'self', '.', 'plotline', '(', 'x1', ',', 'y1', ',', 'x2', ',', 'y2', ',', 'attr', ',', 'row', ')', 'prev_x', ',', 'prev_y', '=', 'x', ',', 'y', 'for', 'x', ',', 'y', ',', 'text', ',', 'attr', ',', 'row', 'in', 'Progress', '(', 'self', '.', 'gridlabels', ',', "'labeling'", ')', ':', 'self', '.', 'plotlabel', '(', 'self', '.', 'scaleX', '(', 'x', ')', ',', 'self', '.', 'scaleY', '(', 'y', ')', ',', 'text', ',', 'attr', ',', 'row', ')']
plots points and lines and text onto the Plotter
['plots', 'points', 'and', 'lines', 'and', 'text', 'onto', 'the', 'Plotter']
train
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/canvas.py#L594-L626
5,373
ewels/MultiQC
multiqc/modules/featureCounts/feature_counts.py
MultiqcModule.parse_featurecounts_report
def parse_featurecounts_report (self, f): """ Parse the featureCounts log file. """ file_names = list() parsed_data = dict() for l in f['f'].splitlines(): thisrow = list() s = l.split("\t") if len(s) < 2: continue if s[0] == 'Status': for f_name in s[1:]: file_names.append(f_name) else: k = s[0] if k not in self.featurecounts_keys: self.featurecounts_keys.append(k) for val in s[1:]: try: thisrow.append(int(val)) except ValueError: pass if len(thisrow) > 0: parsed_data[k] = thisrow # Check that this actually is a featureCounts file, as format and parsing is quite general if 'Assigned' not in parsed_data.keys(): return None for idx, f_name in enumerate(file_names): # Clean up sample name s_name = self.clean_s_name(f_name, f['root']) # Reorganised parsed data for this sample # Collect total count number data = dict() data['Total'] = 0 for k in parsed_data: data[k] = parsed_data[k][idx] data['Total'] += parsed_data[k][idx] # Calculate the percent aligned if we can try: data['percent_assigned'] = (float(data['Assigned'])/float(data['Total'])) * 100.0 except (KeyError, ZeroDivisionError): pass # Add to the main dictionary if len(data) > 1: if s_name in self.featurecounts_data: log.debug("Duplicate sample name found! Overwriting: {}".format(s_name)) self.add_data_source(f, s_name) self.featurecounts_data[s_name] = data
python
def parse_featurecounts_report (self, f): """ Parse the featureCounts log file. """ file_names = list() parsed_data = dict() for l in f['f'].splitlines(): thisrow = list() s = l.split("\t") if len(s) < 2: continue if s[0] == 'Status': for f_name in s[1:]: file_names.append(f_name) else: k = s[0] if k not in self.featurecounts_keys: self.featurecounts_keys.append(k) for val in s[1:]: try: thisrow.append(int(val)) except ValueError: pass if len(thisrow) > 0: parsed_data[k] = thisrow # Check that this actually is a featureCounts file, as format and parsing is quite general if 'Assigned' not in parsed_data.keys(): return None for idx, f_name in enumerate(file_names): # Clean up sample name s_name = self.clean_s_name(f_name, f['root']) # Reorganised parsed data for this sample # Collect total count number data = dict() data['Total'] = 0 for k in parsed_data: data[k] = parsed_data[k][idx] data['Total'] += parsed_data[k][idx] # Calculate the percent aligned if we can try: data['percent_assigned'] = (float(data['Assigned'])/float(data['Total'])) * 100.0 except (KeyError, ZeroDivisionError): pass # Add to the main dictionary if len(data) > 1: if s_name in self.featurecounts_data: log.debug("Duplicate sample name found! Overwriting: {}".format(s_name)) self.add_data_source(f, s_name) self.featurecounts_data[s_name] = data
['def', 'parse_featurecounts_report', '(', 'self', ',', 'f', ')', ':', 'file_names', '=', 'list', '(', ')', 'parsed_data', '=', 'dict', '(', ')', 'for', 'l', 'in', 'f', '[', "'f'", ']', '.', 'splitlines', '(', ')', ':', 'thisrow', '=', 'list', '(', ')', 's', '=', 'l', '.', 'split', '(', '"\\t"', ')', 'if', 'len', '(', 's', ')', '<', '2', ':', 'continue', 'if', 's', '[', '0', ']', '==', "'Status'", ':', 'for', 'f_name', 'in', 's', '[', '1', ':', ']', ':', 'file_names', '.', 'append', '(', 'f_name', ')', 'else', ':', 'k', '=', 's', '[', '0', ']', 'if', 'k', 'not', 'in', 'self', '.', 'featurecounts_keys', ':', 'self', '.', 'featurecounts_keys', '.', 'append', '(', 'k', ')', 'for', 'val', 'in', 's', '[', '1', ':', ']', ':', 'try', ':', 'thisrow', '.', 'append', '(', 'int', '(', 'val', ')', ')', 'except', 'ValueError', ':', 'pass', 'if', 'len', '(', 'thisrow', ')', '>', '0', ':', 'parsed_data', '[', 'k', ']', '=', 'thisrow', '# Check that this actually is a featureCounts file, as format and parsing is quite general', 'if', "'Assigned'", 'not', 'in', 'parsed_data', '.', 'keys', '(', ')', ':', 'return', 'None', 'for', 'idx', ',', 'f_name', 'in', 'enumerate', '(', 'file_names', ')', ':', '# Clean up sample name', 's_name', '=', 'self', '.', 'clean_s_name', '(', 'f_name', ',', 'f', '[', "'root'", ']', ')', '# Reorganised parsed data for this sample', '# Collect total count number', 'data', '=', 'dict', '(', ')', 'data', '[', "'Total'", ']', '=', '0', 'for', 'k', 'in', 'parsed_data', ':', 'data', '[', 'k', ']', '=', 'parsed_data', '[', 'k', ']', '[', 'idx', ']', 'data', '[', "'Total'", ']', '+=', 'parsed_data', '[', 'k', ']', '[', 'idx', ']', '# Calculate the percent aligned if we can', 'try', ':', 'data', '[', "'percent_assigned'", ']', '=', '(', 'float', '(', 'data', '[', "'Assigned'", ']', ')', '/', 'float', '(', 'data', '[', "'Total'", ']', ')', ')', '*', '100.0', 'except', '(', 'KeyError', ',', 'ZeroDivisionError', ')', ':', 'pass', '# Add to the main dictionary', 'if', 'len', '(', 'data', ')', '>', '1', ':', 'if', 's_name', 'in', 'self', '.', 'featurecounts_data', ':', 'log', '.', 'debug', '(', '"Duplicate sample name found! Overwriting: {}"', '.', 'format', '(', 's_name', ')', ')', 'self', '.', 'add_data_source', '(', 'f', ',', 's_name', ')', 'self', '.', 'featurecounts_data', '[', 's_name', ']', '=', 'data']
Parse the featureCounts log file.
['Parse', 'the', 'featureCounts', 'log', 'file', '.']
train
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/featureCounts/feature_counts.py#L52-L103
5,374
noahbenson/pimms
pimms/util.py
lazy_map
def lazy_map(initial={}, pre_size=0): ''' lazy_map is a blatant copy of the pyrsistent.pmap function, and is used to create lazy maps. ''' if is_lazy_map(initial): return initial if not initial: return _EMPTY_LMAP return _lazy_turbo_mapping(initial, pre_size)
python
def lazy_map(initial={}, pre_size=0): ''' lazy_map is a blatant copy of the pyrsistent.pmap function, and is used to create lazy maps. ''' if is_lazy_map(initial): return initial if not initial: return _EMPTY_LMAP return _lazy_turbo_mapping(initial, pre_size)
['def', 'lazy_map', '(', 'initial', '=', '{', '}', ',', 'pre_size', '=', '0', ')', ':', 'if', 'is_lazy_map', '(', 'initial', ')', ':', 'return', 'initial', 'if', 'not', 'initial', ':', 'return', '_EMPTY_LMAP', 'return', '_lazy_turbo_mapping', '(', 'initial', ',', 'pre_size', ')']
lazy_map is a blatant copy of the pyrsistent.pmap function, and is used to create lazy maps.
['lazy_map', 'is', 'a', 'blatant', 'copy', 'of', 'the', 'pyrsistent', '.', 'pmap', 'function', 'and', 'is', 'used', 'to', 'create', 'lazy', 'maps', '.']
train
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/util.py#L702-L708
5,375
ohenrik/tabs
tabs/tabs.py
Tabs.get
def get(self, table_name): """Load table class by name, class not yet initialized""" assert table_name in self.tabs, \ "Table not avaiable. Avaiable tables: {}".format( ", ".join(self.tabs.keys()) ) return self.tabs[table_name]
python
def get(self, table_name): """Load table class by name, class not yet initialized""" assert table_name in self.tabs, \ "Table not avaiable. Avaiable tables: {}".format( ", ".join(self.tabs.keys()) ) return self.tabs[table_name]
['def', 'get', '(', 'self', ',', 'table_name', ')', ':', 'assert', 'table_name', 'in', 'self', '.', 'tabs', ',', '"Table not avaiable. Avaiable tables: {}"', '.', 'format', '(', '", "', '.', 'join', '(', 'self', '.', 'tabs', '.', 'keys', '(', ')', ')', ')', 'return', 'self', '.', 'tabs', '[', 'table_name', ']']
Load table class by name, class not yet initialized
['Load', 'table', 'class', 'by', 'name', 'class', 'not', 'yet', 'initialized']
train
https://github.com/ohenrik/tabs/blob/039ced6c5612ecdd551aeaac63789862aba05711/tabs/tabs.py#L70-L76
5,376
jaraco/irc
irc/client.py
Reactor.process_once
def process_once(self, timeout=0): """Process data from connections once. Arguments: timeout -- How long the select() call should wait if no data is available. This method should be called periodically to check and process incoming data, if there are any. If that seems boring, look at the process_forever method. """ log.log(logging.DEBUG - 2, "process_once()") sockets = self.sockets if sockets: in_, out, err = select.select(sockets, [], [], timeout) self.process_data(in_) else: time.sleep(timeout) self.process_timeout()
python
def process_once(self, timeout=0): """Process data from connections once. Arguments: timeout -- How long the select() call should wait if no data is available. This method should be called periodically to check and process incoming data, if there are any. If that seems boring, look at the process_forever method. """ log.log(logging.DEBUG - 2, "process_once()") sockets = self.sockets if sockets: in_, out, err = select.select(sockets, [], [], timeout) self.process_data(in_) else: time.sleep(timeout) self.process_timeout()
['def', 'process_once', '(', 'self', ',', 'timeout', '=', '0', ')', ':', 'log', '.', 'log', '(', 'logging', '.', 'DEBUG', '-', '2', ',', '"process_once()"', ')', 'sockets', '=', 'self', '.', 'sockets', 'if', 'sockets', ':', 'in_', ',', 'out', ',', 'err', '=', 'select', '.', 'select', '(', 'sockets', ',', '[', ']', ',', '[', ']', ',', 'timeout', ')', 'self', '.', 'process_data', '(', 'in_', ')', 'else', ':', 'time', '.', 'sleep', '(', 'timeout', ')', 'self', '.', 'process_timeout', '(', ')']
Process data from connections once. Arguments: timeout -- How long the select() call should wait if no data is available. This method should be called periodically to check and process incoming data, if there are any. If that seems boring, look at the process_forever method.
['Process', 'data', 'from', 'connections', 'once', '.']
train
https://github.com/jaraco/irc/blob/571c1f448d5d5bb92bbe2605c33148bf6e698413/irc/client.py#L807-L826
5,377
Microsoft/nni
src/sdk/pynni/nni/networkmorphism_tuner/graph.py
Graph._add_node
def _add_node(self, node): """Add a new node to node_list and give the node an ID. Args: node: An instance of Node. Returns: node_id: An integer. """ node_id = len(self.node_list) self.node_to_id[node] = node_id self.node_list.append(node) self.adj_list[node_id] = [] self.reverse_adj_list[node_id] = [] return node_id
python
def _add_node(self, node): """Add a new node to node_list and give the node an ID. Args: node: An instance of Node. Returns: node_id: An integer. """ node_id = len(self.node_list) self.node_to_id[node] = node_id self.node_list.append(node) self.adj_list[node_id] = [] self.reverse_adj_list[node_id] = [] return node_id
['def', '_add_node', '(', 'self', ',', 'node', ')', ':', 'node_id', '=', 'len', '(', 'self', '.', 'node_list', ')', 'self', '.', 'node_to_id', '[', 'node', ']', '=', 'node_id', 'self', '.', 'node_list', '.', 'append', '(', 'node', ')', 'self', '.', 'adj_list', '[', 'node_id', ']', '=', '[', ']', 'self', '.', 'reverse_adj_list', '[', 'node_id', ']', '=', '[', ']', 'return', 'node_id']
Add a new node to node_list and give the node an ID. Args: node: An instance of Node. Returns: node_id: An integer.
['Add', 'a', 'new', 'node', 'to', 'node_list', 'and', 'give', 'the', 'node', 'an', 'ID', '.', 'Args', ':', 'node', ':', 'An', 'instance', 'of', 'Node', '.', 'Returns', ':', 'node_id', ':', 'An', 'integer', '.']
train
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/graph.py#L200-L212
5,378
jut-io/jut-python-tools
jut/api/data_engine.py
run
def run(juttle, deployment_name, program_name=None, persist=False, token_manager=None, app_url=defaults.APP_URL): """ run a juttle program through the juttle streaming API and return the various events that are part of running a Juttle program which include: * Initial job status details including information to associate multiple flowgraphs with their individual outputs (sinks): { "status": "ok", "job": { "channel_id": "56bde5f0", "_start_time": "2015-10-03T06:59:49.233Z", "alias": "jut-tools program 1443855588", "_ms_begin": 1443855589233, "user": "0fbbd98d-cf33-4582-8ca1-15a3d3fee510", "timeout": 5, "id": "b973bce6" }, "now": "2015-10-03T06:59:49.230Z", "stats": ... "sinks": [ { "location": { "start": { "column": 17, "line": 1, "offset": 16 }, "end": { "column": 24, "line": 1, "offset": 23 }, "filename": "main" }, "name": "table", "channel": "sink237", "options": { "_jut_time_bounds": [] } }, ... as many sinks as there are flowgrpahs in your program ] } * Each set of points returned along with the indication of which sink they belong to: { "points": [ array of points ], "sink": sink_id } * Error event indicating where in your program the error occurred { "error": true, payload with "info" and "context" explaining exact error } * Warning event indicating where in your program the error occurred { "warning": true, payload with "info" and "context" explaining exact warning } * ... juttle: juttle program to execute deployment_name: the deployment name to execute the program on persist: if set to True then we won't wait for response data and will disconnect from the websocket leaving the program running in the background if it is uses a background output (http://docs.jut.io/juttle-guide/#background_outputs) and therefore becomes a persistent job. token_manager: auth.TokenManager object app_url: optional argument used primarily for internal Jut testing """ headers = token_manager.get_access_token_headers() data_url = get_juttle_data_url(deployment_name, app_url=app_url, token_manager=token_manager) websocket = __wss_connect(data_url, token_manager) data = websocket.recv() channel_id_obj = json.loads(data) if is_debug_enabled(): debug('got channel response %s', json.dumps(channel_id_obj)) channel_id = channel_id_obj['channel_id'] juttle_job = { 'channel_id': channel_id, 'alias': program_name, 'program': juttle } response = requests.post('%s/api/v1/jobs' % data_url, data=json.dumps(juttle_job), headers=headers) if response.status_code != 200: yield { "error": True, "context": response.json() } return job_info = response.json() # yield job_info so the caller to this method can figure out which sinks # correlate to which flowgraphs yield job_info job_id = job_info['job']['id'] if is_debug_enabled(): debug('started job %s', json.dumps(job_info)) for data in connect_job(job_id, deployment_name, token_manager=token_manager, app_url=app_url, persist=persist, websocket=websocket, data_url=data_url): yield data
python
def run(juttle, deployment_name, program_name=None, persist=False, token_manager=None, app_url=defaults.APP_URL): """ run a juttle program through the juttle streaming API and return the various events that are part of running a Juttle program which include: * Initial job status details including information to associate multiple flowgraphs with their individual outputs (sinks): { "status": "ok", "job": { "channel_id": "56bde5f0", "_start_time": "2015-10-03T06:59:49.233Z", "alias": "jut-tools program 1443855588", "_ms_begin": 1443855589233, "user": "0fbbd98d-cf33-4582-8ca1-15a3d3fee510", "timeout": 5, "id": "b973bce6" }, "now": "2015-10-03T06:59:49.230Z", "stats": ... "sinks": [ { "location": { "start": { "column": 17, "line": 1, "offset": 16 }, "end": { "column": 24, "line": 1, "offset": 23 }, "filename": "main" }, "name": "table", "channel": "sink237", "options": { "_jut_time_bounds": [] } }, ... as many sinks as there are flowgrpahs in your program ] } * Each set of points returned along with the indication of which sink they belong to: { "points": [ array of points ], "sink": sink_id } * Error event indicating where in your program the error occurred { "error": true, payload with "info" and "context" explaining exact error } * Warning event indicating where in your program the error occurred { "warning": true, payload with "info" and "context" explaining exact warning } * ... juttle: juttle program to execute deployment_name: the deployment name to execute the program on persist: if set to True then we won't wait for response data and will disconnect from the websocket leaving the program running in the background if it is uses a background output (http://docs.jut.io/juttle-guide/#background_outputs) and therefore becomes a persistent job. token_manager: auth.TokenManager object app_url: optional argument used primarily for internal Jut testing """ headers = token_manager.get_access_token_headers() data_url = get_juttle_data_url(deployment_name, app_url=app_url, token_manager=token_manager) websocket = __wss_connect(data_url, token_manager) data = websocket.recv() channel_id_obj = json.loads(data) if is_debug_enabled(): debug('got channel response %s', json.dumps(channel_id_obj)) channel_id = channel_id_obj['channel_id'] juttle_job = { 'channel_id': channel_id, 'alias': program_name, 'program': juttle } response = requests.post('%s/api/v1/jobs' % data_url, data=json.dumps(juttle_job), headers=headers) if response.status_code != 200: yield { "error": True, "context": response.json() } return job_info = response.json() # yield job_info so the caller to this method can figure out which sinks # correlate to which flowgraphs yield job_info job_id = job_info['job']['id'] if is_debug_enabled(): debug('started job %s', json.dumps(job_info)) for data in connect_job(job_id, deployment_name, token_manager=token_manager, app_url=app_url, persist=persist, websocket=websocket, data_url=data_url): yield data
['def', 'run', '(', 'juttle', ',', 'deployment_name', ',', 'program_name', '=', 'None', ',', 'persist', '=', 'False', ',', 'token_manager', '=', 'None', ',', 'app_url', '=', 'defaults', '.', 'APP_URL', ')', ':', 'headers', '=', 'token_manager', '.', 'get_access_token_headers', '(', ')', 'data_url', '=', 'get_juttle_data_url', '(', 'deployment_name', ',', 'app_url', '=', 'app_url', ',', 'token_manager', '=', 'token_manager', ')', 'websocket', '=', '__wss_connect', '(', 'data_url', ',', 'token_manager', ')', 'data', '=', 'websocket', '.', 'recv', '(', ')', 'channel_id_obj', '=', 'json', '.', 'loads', '(', 'data', ')', 'if', 'is_debug_enabled', '(', ')', ':', 'debug', '(', "'got channel response %s'", ',', 'json', '.', 'dumps', '(', 'channel_id_obj', ')', ')', 'channel_id', '=', 'channel_id_obj', '[', "'channel_id'", ']', 'juttle_job', '=', '{', "'channel_id'", ':', 'channel_id', ',', "'alias'", ':', 'program_name', ',', "'program'", ':', 'juttle', '}', 'response', '=', 'requests', '.', 'post', '(', "'%s/api/v1/jobs'", '%', 'data_url', ',', 'data', '=', 'json', '.', 'dumps', '(', 'juttle_job', ')', ',', 'headers', '=', 'headers', ')', 'if', 'response', '.', 'status_code', '!=', '200', ':', 'yield', '{', '"error"', ':', 'True', ',', '"context"', ':', 'response', '.', 'json', '(', ')', '}', 'return', 'job_info', '=', 'response', '.', 'json', '(', ')', '# yield job_info so the caller to this method can figure out which sinks', '# correlate to which flowgraphs', 'yield', 'job_info', 'job_id', '=', 'job_info', '[', "'job'", ']', '[', "'id'", ']', 'if', 'is_debug_enabled', '(', ')', ':', 'debug', '(', "'started job %s'", ',', 'json', '.', 'dumps', '(', 'job_info', ')', ')', 'for', 'data', 'in', 'connect_job', '(', 'job_id', ',', 'deployment_name', ',', 'token_manager', '=', 'token_manager', ',', 'app_url', '=', 'app_url', ',', 'persist', '=', 'persist', ',', 'websocket', '=', 'websocket', ',', 'data_url', '=', 'data_url', ')', ':', 'yield', 'data']
run a juttle program through the juttle streaming API and return the various events that are part of running a Juttle program which include: * Initial job status details including information to associate multiple flowgraphs with their individual outputs (sinks): { "status": "ok", "job": { "channel_id": "56bde5f0", "_start_time": "2015-10-03T06:59:49.233Z", "alias": "jut-tools program 1443855588", "_ms_begin": 1443855589233, "user": "0fbbd98d-cf33-4582-8ca1-15a3d3fee510", "timeout": 5, "id": "b973bce6" }, "now": "2015-10-03T06:59:49.230Z", "stats": ... "sinks": [ { "location": { "start": { "column": 17, "line": 1, "offset": 16 }, "end": { "column": 24, "line": 1, "offset": 23 }, "filename": "main" }, "name": "table", "channel": "sink237", "options": { "_jut_time_bounds": [] } }, ... as many sinks as there are flowgrpahs in your program ] } * Each set of points returned along with the indication of which sink they belong to: { "points": [ array of points ], "sink": sink_id } * Error event indicating where in your program the error occurred { "error": true, payload with "info" and "context" explaining exact error } * Warning event indicating where in your program the error occurred { "warning": true, payload with "info" and "context" explaining exact warning } * ... juttle: juttle program to execute deployment_name: the deployment name to execute the program on persist: if set to True then we won't wait for response data and will disconnect from the websocket leaving the program running in the background if it is uses a background output (http://docs.jut.io/juttle-guide/#background_outputs) and therefore becomes a persistent job. token_manager: auth.TokenManager object app_url: optional argument used primarily for internal Jut testing
['run', 'a', 'juttle', 'program', 'through', 'the', 'juttle', 'streaming', 'API', 'and', 'return', 'the', 'various', 'events', 'that', 'are', 'part', 'of', 'running', 'a', 'Juttle', 'program', 'which', 'include', ':']
train
https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/data_engine.py#L263-L392
5,379
spyder-ide/spyder
spyder/otherplugins.py
_get_spyderplugins
def _get_spyderplugins(plugin_path, is_io, modnames, modlist): """Scan the directory `plugin_path` for plugin packages and loads them.""" if not osp.isdir(plugin_path): return for name in os.listdir(plugin_path): # This is needed in order to register the spyder_io_hdf5 plugin. # See issue 4487 # Is this a Spyder plugin? if not name.startswith(PLUGIN_PREFIX): continue # Ensure right type of plugin if is_io != name.startswith(IO_PREFIX): continue # Skip names that end in certain suffixes forbidden_suffixes = ['dist-info', 'egg.info', 'egg-info', 'egg-link', 'kernels'] if any([name.endswith(s) for s in forbidden_suffixes]): continue # Import the plugin _import_plugin(name, plugin_path, modnames, modlist)
python
def _get_spyderplugins(plugin_path, is_io, modnames, modlist): """Scan the directory `plugin_path` for plugin packages and loads them.""" if not osp.isdir(plugin_path): return for name in os.listdir(plugin_path): # This is needed in order to register the spyder_io_hdf5 plugin. # See issue 4487 # Is this a Spyder plugin? if not name.startswith(PLUGIN_PREFIX): continue # Ensure right type of plugin if is_io != name.startswith(IO_PREFIX): continue # Skip names that end in certain suffixes forbidden_suffixes = ['dist-info', 'egg.info', 'egg-info', 'egg-link', 'kernels'] if any([name.endswith(s) for s in forbidden_suffixes]): continue # Import the plugin _import_plugin(name, plugin_path, modnames, modlist)
['def', '_get_spyderplugins', '(', 'plugin_path', ',', 'is_io', ',', 'modnames', ',', 'modlist', ')', ':', 'if', 'not', 'osp', '.', 'isdir', '(', 'plugin_path', ')', ':', 'return', 'for', 'name', 'in', 'os', '.', 'listdir', '(', 'plugin_path', ')', ':', '# This is needed in order to register the spyder_io_hdf5 plugin.\r', '# See issue 4487\r', '# Is this a Spyder plugin?\r', 'if', 'not', 'name', '.', 'startswith', '(', 'PLUGIN_PREFIX', ')', ':', 'continue', '# Ensure right type of plugin\r', 'if', 'is_io', '!=', 'name', '.', 'startswith', '(', 'IO_PREFIX', ')', ':', 'continue', '# Skip names that end in certain suffixes\r', 'forbidden_suffixes', '=', '[', "'dist-info'", ',', "'egg.info'", ',', "'egg-info'", ',', "'egg-link'", ',', "'kernels'", ']', 'if', 'any', '(', '[', 'name', '.', 'endswith', '(', 's', ')', 'for', 's', 'in', 'forbidden_suffixes', ']', ')', ':', 'continue', '# Import the plugin\r', '_import_plugin', '(', 'name', ',', 'plugin_path', ',', 'modnames', ',', 'modlist', ')']
Scan the directory `plugin_path` for plugin packages and loads them.
['Scan', 'the', 'directory', 'plugin_path', 'for', 'plugin', 'packages', 'and', 'loads', 'them', '.']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/otherplugins.py#L46-L69
5,380
fermiPy/fermipy
fermipy/scripts/cluster_sources.py
make_reverse_dict
def make_reverse_dict(in_dict, warn=True): """ Build a reverse dictionary from a cluster dictionary Parameters ---------- in_dict : dict(int:[int,]) A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster. Returns ------- out_dict : dict(int:int) A single valued dictionary pointing from source index to cluster key for each source in a cluster. Note that the key does not point to itself. """ out_dict = {} for k, v in in_dict.items(): for vv in v: if vv in out_dict: if warn: print("Dictionary collision %i" % vv) out_dict[vv] = k return out_dict
python
def make_reverse_dict(in_dict, warn=True): """ Build a reverse dictionary from a cluster dictionary Parameters ---------- in_dict : dict(int:[int,]) A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster. Returns ------- out_dict : dict(int:int) A single valued dictionary pointing from source index to cluster key for each source in a cluster. Note that the key does not point to itself. """ out_dict = {} for k, v in in_dict.items(): for vv in v: if vv in out_dict: if warn: print("Dictionary collision %i" % vv) out_dict[vv] = k return out_dict
['def', 'make_reverse_dict', '(', 'in_dict', ',', 'warn', '=', 'True', ')', ':', 'out_dict', '=', '{', '}', 'for', 'k', ',', 'v', 'in', 'in_dict', '.', 'items', '(', ')', ':', 'for', 'vv', 'in', 'v', ':', 'if', 'vv', 'in', 'out_dict', ':', 'if', 'warn', ':', 'print', '(', '"Dictionary collision %i"', '%', 'vv', ')', 'out_dict', '[', 'vv', ']', '=', 'k', 'return', 'out_dict']
Build a reverse dictionary from a cluster dictionary Parameters ---------- in_dict : dict(int:[int,]) A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster. Returns ------- out_dict : dict(int:int) A single valued dictionary pointing from source index to cluster key for each source in a cluster. Note that the key does not point to itself.
['Build', 'a', 'reverse', 'dictionary', 'from', 'a', 'cluster', 'dictionary']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/scripts/cluster_sources.py#L420-L443
5,381
openstack/horizon
openstack_dashboard/api/keystone.py
remove_domain_user_role
def remove_domain_user_role(request, user, role, domain=None): """Removes a given single role for a user from a domain.""" manager = keystoneclient(request, admin=True).roles return manager.revoke(role, user=user, domain=domain)
python
def remove_domain_user_role(request, user, role, domain=None): """Removes a given single role for a user from a domain.""" manager = keystoneclient(request, admin=True).roles return manager.revoke(role, user=user, domain=domain)
['def', 'remove_domain_user_role', '(', 'request', ',', 'user', ',', 'role', ',', 'domain', '=', 'None', ')', ':', 'manager', '=', 'keystoneclient', '(', 'request', ',', 'admin', '=', 'True', ')', '.', 'roles', 'return', 'manager', '.', 'revoke', '(', 'role', ',', 'user', '=', 'user', ',', 'domain', '=', 'domain', ')']
Removes a given single role for a user from a domain.
['Removes', 'a', 'given', 'single', 'role', 'for', 'a', 'user', 'from', 'a', 'domain', '.']
train
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/keystone.py#L798-L801
5,382
projectshift/shift-schema
shiftschema/property.py
SimpleProperty.validate
def validate(self, value=None, model=None, context=None): """ Sequentially apply each validator to value and collect errors. :param value: a value to validate :param model: parent entity :param context: validation context, usually parent entity :return: list of errors (if any) """ errors = [] for validator in self.validators: if value is None and not isinstance(validator, Required): continue error = validator.run( value=value, model=model, context=context if self.use_context else None ) if error: errors.append(error) return errors
python
def validate(self, value=None, model=None, context=None): """ Sequentially apply each validator to value and collect errors. :param value: a value to validate :param model: parent entity :param context: validation context, usually parent entity :return: list of errors (if any) """ errors = [] for validator in self.validators: if value is None and not isinstance(validator, Required): continue error = validator.run( value=value, model=model, context=context if self.use_context else None ) if error: errors.append(error) return errors
['def', 'validate', '(', 'self', ',', 'value', '=', 'None', ',', 'model', '=', 'None', ',', 'context', '=', 'None', ')', ':', 'errors', '=', '[', ']', 'for', 'validator', 'in', 'self', '.', 'validators', ':', 'if', 'value', 'is', 'None', 'and', 'not', 'isinstance', '(', 'validator', ',', 'Required', ')', ':', 'continue', 'error', '=', 'validator', '.', 'run', '(', 'value', '=', 'value', ',', 'model', '=', 'model', ',', 'context', '=', 'context', 'if', 'self', '.', 'use_context', 'else', 'None', ')', 'if', 'error', ':', 'errors', '.', 'append', '(', 'error', ')', 'return', 'errors']
Sequentially apply each validator to value and collect errors. :param value: a value to validate :param model: parent entity :param context: validation context, usually parent entity :return: list of errors (if any)
['Sequentially', 'apply', 'each', 'validator', 'to', 'value', 'and', 'collect', 'errors', '.']
train
https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/property.py#L75-L97
5,383
ternaris/marv
docs/tutorial/code/marv_tutorial/__init__.py
gallery_section
def gallery_section(images, title): """Create detail section with gallery. Args: title (str): Title to be displayed for detail section. images: stream of marv image files Returns One detail section. """ # pull all images imgs = [] while True: img = yield marv.pull(images) if img is None: break imgs.append({'src': img.relpath}) if not imgs: return # create gallery widget and section containing it widget = {'title': images.title, 'gallery': {'images': imgs}} section = {'title': title, 'widgets': [widget]} yield marv.push(section)
python
def gallery_section(images, title): """Create detail section with gallery. Args: title (str): Title to be displayed for detail section. images: stream of marv image files Returns One detail section. """ # pull all images imgs = [] while True: img = yield marv.pull(images) if img is None: break imgs.append({'src': img.relpath}) if not imgs: return # create gallery widget and section containing it widget = {'title': images.title, 'gallery': {'images': imgs}} section = {'title': title, 'widgets': [widget]} yield marv.push(section)
['def', 'gallery_section', '(', 'images', ',', 'title', ')', ':', '# pull all images', 'imgs', '=', '[', ']', 'while', 'True', ':', 'img', '=', 'yield', 'marv', '.', 'pull', '(', 'images', ')', 'if', 'img', 'is', 'None', ':', 'break', 'imgs', '.', 'append', '(', '{', "'src'", ':', 'img', '.', 'relpath', '}', ')', 'if', 'not', 'imgs', ':', 'return', '# create gallery widget and section containing it', 'widget', '=', '{', "'title'", ':', 'images', '.', 'title', ',', "'gallery'", ':', '{', "'images'", ':', 'imgs', '}', '}', 'section', '=', '{', "'title'", ':', 'title', ',', "'widgets'", ':', '[', 'widget', ']', '}', 'yield', 'marv', '.', 'push', '(', 'section', ')']
Create detail section with gallery. Args: title (str): Title to be displayed for detail section. images: stream of marv image files Returns One detail section.
['Create', 'detail', 'section', 'with', 'gallery', '.']
train
https://github.com/ternaris/marv/blob/c221354d912ff869bbdb4f714a86a70be30d823e/docs/tutorial/code/marv_tutorial/__init__.py#L124-L147
5,384
PythonCharmers/python-future
src/future/backports/urllib/request.py
URLopener.open_unknown_proxy
def open_unknown_proxy(self, proxy, fullurl, data=None): """Overridable interface to open unknown URL type.""" type, url = splittype(fullurl) raise IOError('url error', 'invalid proxy for %s' % type, proxy)
python
def open_unknown_proxy(self, proxy, fullurl, data=None): """Overridable interface to open unknown URL type.""" type, url = splittype(fullurl) raise IOError('url error', 'invalid proxy for %s' % type, proxy)
['def', 'open_unknown_proxy', '(', 'self', ',', 'proxy', ',', 'fullurl', ',', 'data', '=', 'None', ')', ':', 'type', ',', 'url', '=', 'splittype', '(', 'fullurl', ')', 'raise', 'IOError', '(', "'url error'", ',', "'invalid proxy for %s'", '%', 'type', ',', 'proxy', ')']
Overridable interface to open unknown URL type.
['Overridable', 'interface', 'to', 'open', 'unknown', 'URL', 'type', '.']
train
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/urllib/request.py#L1710-L1713
5,385
gem/oq-engine
openquake/hmtk/plotting/seismicity/completeness/cumulative_rate_analysis.py
SimpleCumulativeRate._get_magnitudes_from_spacing
def _get_magnitudes_from_spacing(self, magnitudes, delta_m): '''If a single magnitude spacing is input then create the bins :param numpy.ndarray magnitudes: Vector of earthquake magnitudes :param float delta_m: Magnitude bin width :returns: Vector of magnitude bin edges (numpy.ndarray) ''' min_mag = np.min(magnitudes) max_mag = np.max(magnitudes) if (max_mag - min_mag) < delta_m: raise ValueError('Bin width greater than magnitude range!') mag_bins = np.arange(np.floor(min_mag), np.ceil(max_mag), delta_m) # Check to see if there are magnitudes in lower and upper bins is_mag = np.logical_and(mag_bins - max_mag < delta_m, min_mag - mag_bins < delta_m) mag_bins = mag_bins[is_mag] return mag_bins
python
def _get_magnitudes_from_spacing(self, magnitudes, delta_m): '''If a single magnitude spacing is input then create the bins :param numpy.ndarray magnitudes: Vector of earthquake magnitudes :param float delta_m: Magnitude bin width :returns: Vector of magnitude bin edges (numpy.ndarray) ''' min_mag = np.min(magnitudes) max_mag = np.max(magnitudes) if (max_mag - min_mag) < delta_m: raise ValueError('Bin width greater than magnitude range!') mag_bins = np.arange(np.floor(min_mag), np.ceil(max_mag), delta_m) # Check to see if there are magnitudes in lower and upper bins is_mag = np.logical_and(mag_bins - max_mag < delta_m, min_mag - mag_bins < delta_m) mag_bins = mag_bins[is_mag] return mag_bins
['def', '_get_magnitudes_from_spacing', '(', 'self', ',', 'magnitudes', ',', 'delta_m', ')', ':', 'min_mag', '=', 'np', '.', 'min', '(', 'magnitudes', ')', 'max_mag', '=', 'np', '.', 'max', '(', 'magnitudes', ')', 'if', '(', 'max_mag', '-', 'min_mag', ')', '<', 'delta_m', ':', 'raise', 'ValueError', '(', "'Bin width greater than magnitude range!'", ')', 'mag_bins', '=', 'np', '.', 'arange', '(', 'np', '.', 'floor', '(', 'min_mag', ')', ',', 'np', '.', 'ceil', '(', 'max_mag', ')', ',', 'delta_m', ')', '# Check to see if there are magnitudes in lower and upper bins', 'is_mag', '=', 'np', '.', 'logical_and', '(', 'mag_bins', '-', 'max_mag', '<', 'delta_m', ',', 'min_mag', '-', 'mag_bins', '<', 'delta_m', ')', 'mag_bins', '=', 'mag_bins', '[', 'is_mag', ']', 'return', 'mag_bins']
If a single magnitude spacing is input then create the bins :param numpy.ndarray magnitudes: Vector of earthquake magnitudes :param float delta_m: Magnitude bin width :returns: Vector of magnitude bin edges (numpy.ndarray)
['If', 'a', 'single', 'magnitude', 'spacing', 'is', 'input', 'then', 'create', 'the', 'bins']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/plotting/seismicity/completeness/cumulative_rate_analysis.py#L132-L152
5,386
Numigi/gitoo
src/core.py
force_move
def force_move(source, destination): """ Force the move of the source inside the destination even if the destination has already a folder with the name inside. In the case, the folder will be replaced. :param string source: path of the source to move. :param string destination: path of the folder to move the source to. """ if not os.path.exists(destination): raise RuntimeError( 'The code could not be moved to {destination} ' 'because the folder does not exist'.format(destination=destination)) destination_folder = os.path.join(destination, os.path.split(source)[-1]) if os.path.exists(destination_folder): shutil.rmtree(destination_folder) shutil.move(source, destination)
python
def force_move(source, destination): """ Force the move of the source inside the destination even if the destination has already a folder with the name inside. In the case, the folder will be replaced. :param string source: path of the source to move. :param string destination: path of the folder to move the source to. """ if not os.path.exists(destination): raise RuntimeError( 'The code could not be moved to {destination} ' 'because the folder does not exist'.format(destination=destination)) destination_folder = os.path.join(destination, os.path.split(source)[-1]) if os.path.exists(destination_folder): shutil.rmtree(destination_folder) shutil.move(source, destination)
['def', 'force_move', '(', 'source', ',', 'destination', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'destination', ')', ':', 'raise', 'RuntimeError', '(', "'The code could not be moved to {destination} '", "'because the folder does not exist'", '.', 'format', '(', 'destination', '=', 'destination', ')', ')', 'destination_folder', '=', 'os', '.', 'path', '.', 'join', '(', 'destination', ',', 'os', '.', 'path', '.', 'split', '(', 'source', ')', '[', '-', '1', ']', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'destination_folder', ')', ':', 'shutil', '.', 'rmtree', '(', 'destination_folder', ')', 'shutil', '.', 'move', '(', 'source', ',', 'destination', ')']
Force the move of the source inside the destination even if the destination has already a folder with the name inside. In the case, the folder will be replaced. :param string source: path of the source to move. :param string destination: path of the folder to move the source to.
['Force', 'the', 'move', 'of', 'the', 'source', 'inside', 'the', 'destination', 'even', 'if', 'the', 'destination', 'has', 'already', 'a', 'folder', 'with', 'the', 'name', 'inside', '.', 'In', 'the', 'case', 'the', 'folder', 'will', 'be', 'replaced', '.']
train
https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L38-L54
5,387
veripress/veripress
veripress/model/toc.py
HtmlTocParser.toc_html
def toc_html(self, depth=6, lowest_level=6): """ Get TOC of currently fed HTML string in form of HTML string. :param depth: the depth of TOC :param lowest_level: the allowed lowest level of header tag :return: an HTML string """ toc = self.toc(depth=depth, lowest_level=lowest_level) if not toc: return '' def map_toc_list(toc_list): result = '' if toc_list: result += '<ul>\n' result += ''.join( map(lambda x: '<li>' '<a href="#{}">{}</a>{}' '</li>\n'.format( x['id'], x['inner_html'], map_toc_list(x['children'])), toc_list) ) result += '</ul>' return result return map_toc_list(toc)
python
def toc_html(self, depth=6, lowest_level=6): """ Get TOC of currently fed HTML string in form of HTML string. :param depth: the depth of TOC :param lowest_level: the allowed lowest level of header tag :return: an HTML string """ toc = self.toc(depth=depth, lowest_level=lowest_level) if not toc: return '' def map_toc_list(toc_list): result = '' if toc_list: result += '<ul>\n' result += ''.join( map(lambda x: '<li>' '<a href="#{}">{}</a>{}' '</li>\n'.format( x['id'], x['inner_html'], map_toc_list(x['children'])), toc_list) ) result += '</ul>' return result return map_toc_list(toc)
['def', 'toc_html', '(', 'self', ',', 'depth', '=', '6', ',', 'lowest_level', '=', '6', ')', ':', 'toc', '=', 'self', '.', 'toc', '(', 'depth', '=', 'depth', ',', 'lowest_level', '=', 'lowest_level', ')', 'if', 'not', 'toc', ':', 'return', "''", 'def', 'map_toc_list', '(', 'toc_list', ')', ':', 'result', '=', "''", 'if', 'toc_list', ':', 'result', '+=', "'<ul>\\n'", 'result', '+=', "''", '.', 'join', '(', 'map', '(', 'lambda', 'x', ':', "'<li>'", '\'<a href="#{}">{}</a>{}\'', "'</li>\\n'", '.', 'format', '(', 'x', '[', "'id'", ']', ',', 'x', '[', "'inner_html'", ']', ',', 'map_toc_list', '(', 'x', '[', "'children'", ']', ')', ')', ',', 'toc_list', ')', ')', 'result', '+=', "'</ul>'", 'return', 'result', 'return', 'map_toc_list', '(', 'toc', ')']
Get TOC of currently fed HTML string in form of HTML string. :param depth: the depth of TOC :param lowest_level: the allowed lowest level of header tag :return: an HTML string
['Get', 'TOC', 'of', 'currently', 'fed', 'HTML', 'string', 'in', 'form', 'of', 'HTML', 'string', '.']
train
https://github.com/veripress/veripress/blob/9e3df3a10eb1db32da596bf52118fe6acbe4b14a/veripress/model/toc.py#L92-L119
5,388
andymccurdy/redis-py
redis/connection.py
PythonParser.on_connect
def on_connect(self, connection): "Called when the socket connects" self._sock = connection._sock self._buffer = SocketBuffer(self._sock, self.socket_read_size) self.encoder = connection.encoder
python
def on_connect(self, connection): "Called when the socket connects" self._sock = connection._sock self._buffer = SocketBuffer(self._sock, self.socket_read_size) self.encoder = connection.encoder
['def', 'on_connect', '(', 'self', ',', 'connection', ')', ':', 'self', '.', '_sock', '=', 'connection', '.', '_sock', 'self', '.', '_buffer', '=', 'SocketBuffer', '(', 'self', '.', '_sock', ',', 'self', '.', 'socket_read_size', ')', 'self', '.', 'encoder', '=', 'connection', '.', 'encoder']
Called when the socket connects
['Called', 'when', 'the', 'socket', 'connects']
train
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/connection.py#L265-L269
5,389
Kozea/cairocffi
cairocffi/surfaces.py
SVGSurface.set_document_unit
def set_document_unit(self, unit): """Use specified unit for width and height of generated SVG file. See ``SVG_UNIT_*`` enumerated values for a list of available unit values that can be used here. This function can be called at any time before generating the SVG file. However to minimize the risk of ambiguities it's recommended to call it before any drawing operations have been performed on the given surface, to make it clearer what the unit used in the drawing operations is. The simplest way to do this is to call this function immediately after creating the SVG surface. Note if this function is never called, the default unit for SVG documents generated by cairo will be "pt". This is for historical reasons. :param unit: SVG unit. *New in cairo 1.16.* *New in cairocffi 0.9.* """ cairo.cairo_svg_surface_set_document_unit(self._pointer, unit) self._check_status()
python
def set_document_unit(self, unit): """Use specified unit for width and height of generated SVG file. See ``SVG_UNIT_*`` enumerated values for a list of available unit values that can be used here. This function can be called at any time before generating the SVG file. However to minimize the risk of ambiguities it's recommended to call it before any drawing operations have been performed on the given surface, to make it clearer what the unit used in the drawing operations is. The simplest way to do this is to call this function immediately after creating the SVG surface. Note if this function is never called, the default unit for SVG documents generated by cairo will be "pt". This is for historical reasons. :param unit: SVG unit. *New in cairo 1.16.* *New in cairocffi 0.9.* """ cairo.cairo_svg_surface_set_document_unit(self._pointer, unit) self._check_status()
['def', 'set_document_unit', '(', 'self', ',', 'unit', ')', ':', 'cairo', '.', 'cairo_svg_surface_set_document_unit', '(', 'self', '.', '_pointer', ',', 'unit', ')', 'self', '.', '_check_status', '(', ')']
Use specified unit for width and height of generated SVG file. See ``SVG_UNIT_*`` enumerated values for a list of available unit values that can be used here. This function can be called at any time before generating the SVG file. However to minimize the risk of ambiguities it's recommended to call it before any drawing operations have been performed on the given surface, to make it clearer what the unit used in the drawing operations is. The simplest way to do this is to call this function immediately after creating the SVG surface. Note if this function is never called, the default unit for SVG documents generated by cairo will be "pt". This is for historical reasons. :param unit: SVG unit. *New in cairo 1.16.* *New in cairocffi 0.9.*
['Use', 'specified', 'unit', 'for', 'width', 'and', 'height', 'of', 'generated', 'SVG', 'file', '.']
train
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L1309-L1336
5,390
inasafe/inasafe
safe/gui/tools/batch/batch_dialog.py
BatchDialog.save_state
def save_state(self): """Save current state of GUI to configuration file.""" set_setting('lastSourceDir', self.source_directory.text()) set_setting('lastOutputDir', self.output_directory.text()) set_setting( 'useDefaultOutputDir', self.scenario_directory_radio.isChecked())
python
def save_state(self): """Save current state of GUI to configuration file.""" set_setting('lastSourceDir', self.source_directory.text()) set_setting('lastOutputDir', self.output_directory.text()) set_setting( 'useDefaultOutputDir', self.scenario_directory_radio.isChecked())
['def', 'save_state', '(', 'self', ')', ':', 'set_setting', '(', "'lastSourceDir'", ',', 'self', '.', 'source_directory', '.', 'text', '(', ')', ')', 'set_setting', '(', "'lastOutputDir'", ',', 'self', '.', 'output_directory', '.', 'text', '(', ')', ')', 'set_setting', '(', "'useDefaultOutputDir'", ',', 'self', '.', 'scenario_directory_radio', '.', 'isChecked', '(', ')', ')']
Save current state of GUI to configuration file.
['Save', 'current', 'state', 'of', 'GUI', 'to', 'configuration', 'file', '.']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/batch/batch_dialog.py#L187-L192
5,391
kedpter/secret_miner
pjutils.py
Sphinx.gen_code_api
def gen_code_api(self): """TODO: Docstring for gen_code_api.""" # edit config file conf_editor = Editor(self.conf_fpath) # insert code path for searching conf_editor.editline_with_regex(r'^# import os', 'import os') conf_editor.editline_with_regex(r'^# import sys', 'import sys') conf_editor.editline_with_regex( r'^# sys\.path\.insert', 'sys.path.insert(0, "{}")'.format(self.code_fdpath)) conf_editor.editline_with_regex( r"""html_theme = 'alabaster'""", 'html_theme = \'default\''.format(self.code_fdpath)) conf_editor.finish_writing() # sphinx-apidoc to generate rst from source code # force regenerate subprocess.call(self._sphinx_apidoc_cmd) pass
python
def gen_code_api(self): """TODO: Docstring for gen_code_api.""" # edit config file conf_editor = Editor(self.conf_fpath) # insert code path for searching conf_editor.editline_with_regex(r'^# import os', 'import os') conf_editor.editline_with_regex(r'^# import sys', 'import sys') conf_editor.editline_with_regex( r'^# sys\.path\.insert', 'sys.path.insert(0, "{}")'.format(self.code_fdpath)) conf_editor.editline_with_regex( r"""html_theme = 'alabaster'""", 'html_theme = \'default\''.format(self.code_fdpath)) conf_editor.finish_writing() # sphinx-apidoc to generate rst from source code # force regenerate subprocess.call(self._sphinx_apidoc_cmd) pass
['def', 'gen_code_api', '(', 'self', ')', ':', '# edit config file', 'conf_editor', '=', 'Editor', '(', 'self', '.', 'conf_fpath', ')', '# insert code path for searching', 'conf_editor', '.', 'editline_with_regex', '(', "r'^# import os'", ',', "'import os'", ')', 'conf_editor', '.', 'editline_with_regex', '(', "r'^# import sys'", ',', "'import sys'", ')', 'conf_editor', '.', 'editline_with_regex', '(', "r'^# sys\\.path\\.insert'", ',', '\'sys.path.insert(0, "{}")\'', '.', 'format', '(', 'self', '.', 'code_fdpath', ')', ')', 'conf_editor', '.', 'editline_with_regex', '(', 'r"""html_theme = \'alabaster\'"""', ',', "'html_theme = \\'default\\''", '.', 'format', '(', 'self', '.', 'code_fdpath', ')', ')', 'conf_editor', '.', 'finish_writing', '(', ')', '# sphinx-apidoc to generate rst from source code', '# force regenerate', 'subprocess', '.', 'call', '(', 'self', '.', '_sphinx_apidoc_cmd', ')', 'pass']
TODO: Docstring for gen_code_api.
['TODO', ':', 'Docstring', 'for', 'gen_code_api', '.']
train
https://github.com/kedpter/secret_miner/blob/3b4ebe58e11fb688d7e8928ebaa2871fc43717e4/pjutils.py#L257-L281
5,392
DataKitchen/DKCloudCommand
DKCloudCommand/cli/__main__.py
delete_orderrun
def delete_orderrun(backend, orderrun_id): """ Delete the orderrun specified by the argument. """ click.secho('%s - Deleting orderrun %s' % (get_datetime(), orderrun_id), fg='green') check_and_print(DKCloudCommandRunner.delete_orderrun(backend.dki, orderrun_id.strip()))
python
def delete_orderrun(backend, orderrun_id): """ Delete the orderrun specified by the argument. """ click.secho('%s - Deleting orderrun %s' % (get_datetime(), orderrun_id), fg='green') check_and_print(DKCloudCommandRunner.delete_orderrun(backend.dki, orderrun_id.strip()))
['def', 'delete_orderrun', '(', 'backend', ',', 'orderrun_id', ')', ':', 'click', '.', 'secho', '(', "'%s - Deleting orderrun %s'", '%', '(', 'get_datetime', '(', ')', ',', 'orderrun_id', ')', ',', 'fg', '=', "'green'", ')', 'check_and_print', '(', 'DKCloudCommandRunner', '.', 'delete_orderrun', '(', 'backend', '.', 'dki', ',', 'orderrun_id', '.', 'strip', '(', ')', ')', ')']
Delete the orderrun specified by the argument.
['Delete', 'the', 'orderrun', 'specified', 'by', 'the', 'argument', '.']
train
https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/cli/__main__.py#L822-L827
5,393
ensime/ensime-vim
ensime_shared/protocol.py
ProtocolHandlerV1.handle_string_response
def handle_string_response(self, call_id, payload): """Handler for response `StringResponse`. This is the response for the following requests: 1. `DocUriAtPointReq` or `DocUriForSymbolReq` 2. `DebugToStringReq` """ self.log.debug('handle_string_response: in [typehint: %s, call ID: %s]', payload['typehint'], call_id) # :EnDocBrowse or :EnDocUri url = payload['text'] if not url.startswith('http'): port = self.ensime.http_port() url = gconfig['localhost'].format(port, url) options = self.call_options.get(call_id) if options and options.get('browse'): self._browse_doc(url) del self.call_options[call_id] else: # TODO: make this return value of a Vim function synchronously, how? self.log.debug('EnDocUri %s', url) return url
python
def handle_string_response(self, call_id, payload): """Handler for response `StringResponse`. This is the response for the following requests: 1. `DocUriAtPointReq` or `DocUriForSymbolReq` 2. `DebugToStringReq` """ self.log.debug('handle_string_response: in [typehint: %s, call ID: %s]', payload['typehint'], call_id) # :EnDocBrowse or :EnDocUri url = payload['text'] if not url.startswith('http'): port = self.ensime.http_port() url = gconfig['localhost'].format(port, url) options = self.call_options.get(call_id) if options and options.get('browse'): self._browse_doc(url) del self.call_options[call_id] else: # TODO: make this return value of a Vim function synchronously, how? self.log.debug('EnDocUri %s', url) return url
['def', 'handle_string_response', '(', 'self', ',', 'call_id', ',', 'payload', ')', ':', 'self', '.', 'log', '.', 'debug', '(', "'handle_string_response: in [typehint: %s, call ID: %s]'", ',', 'payload', '[', "'typehint'", ']', ',', 'call_id', ')', '# :EnDocBrowse or :EnDocUri', 'url', '=', 'payload', '[', "'text'", ']', 'if', 'not', 'url', '.', 'startswith', '(', "'http'", ')', ':', 'port', '=', 'self', '.', 'ensime', '.', 'http_port', '(', ')', 'url', '=', 'gconfig', '[', "'localhost'", ']', '.', 'format', '(', 'port', ',', 'url', ')', 'options', '=', 'self', '.', 'call_options', '.', 'get', '(', 'call_id', ')', 'if', 'options', 'and', 'options', '.', 'get', '(', "'browse'", ')', ':', 'self', '.', '_browse_doc', '(', 'url', ')', 'del', 'self', '.', 'call_options', '[', 'call_id', ']', 'else', ':', '# TODO: make this return value of a Vim function synchronously, how?', 'self', '.', 'log', '.', 'debug', '(', "'EnDocUri %s'", ',', 'url', ')', 'return', 'url']
Handler for response `StringResponse`. This is the response for the following requests: 1. `DocUriAtPointReq` or `DocUriForSymbolReq` 2. `DebugToStringReq`
['Handler', 'for', 'response', 'StringResponse', '.']
train
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/protocol.py#L203-L226
5,394
yvesalexandre/bandicoot
bandicoot/io.py
_parse_record
def _parse_record(data, duration_format='seconds'): """ Parse a raw data dictionary and return a Record object. """ def _map_duration(s): if s == '': return None elif duration_format.lower() == 'seconds': return int(s) else: t = time.strptime(s, duration_format) return 3600 * t.tm_hour + 60 * t.tm_min + t.tm_sec def _map_position(data): antenna = Position() if 'antenna_id' in data and data['antenna_id']: antenna.antenna = data['antenna_id'] if 'place_id' in data: raise NameError("Use field name 'antenna_id' in input files. " "'place_id' is deprecated.") if 'latitude' in data and 'longitude' in data: latitude = data['latitude'] longitude = data['longitude'] # latitude and longitude should not be empty strings. if latitude and longitude: antenna.location = float(latitude), float(longitude) return antenna return Record(interaction=data['interaction'] if data['interaction'] else None, direction=data['direction'], correspondent_id=data['correspondent_id'], datetime=_tryto( lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S"), data['datetime']), call_duration=_tryto(_map_duration, data['call_duration']), position=_tryto(_map_position, data))
python
def _parse_record(data, duration_format='seconds'): """ Parse a raw data dictionary and return a Record object. """ def _map_duration(s): if s == '': return None elif duration_format.lower() == 'seconds': return int(s) else: t = time.strptime(s, duration_format) return 3600 * t.tm_hour + 60 * t.tm_min + t.tm_sec def _map_position(data): antenna = Position() if 'antenna_id' in data and data['antenna_id']: antenna.antenna = data['antenna_id'] if 'place_id' in data: raise NameError("Use field name 'antenna_id' in input files. " "'place_id' is deprecated.") if 'latitude' in data and 'longitude' in data: latitude = data['latitude'] longitude = data['longitude'] # latitude and longitude should not be empty strings. if latitude and longitude: antenna.location = float(latitude), float(longitude) return antenna return Record(interaction=data['interaction'] if data['interaction'] else None, direction=data['direction'], correspondent_id=data['correspondent_id'], datetime=_tryto( lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S"), data['datetime']), call_duration=_tryto(_map_duration, data['call_duration']), position=_tryto(_map_position, data))
['def', '_parse_record', '(', 'data', ',', 'duration_format', '=', "'seconds'", ')', ':', 'def', '_map_duration', '(', 's', ')', ':', 'if', 's', '==', "''", ':', 'return', 'None', 'elif', 'duration_format', '.', 'lower', '(', ')', '==', "'seconds'", ':', 'return', 'int', '(', 's', ')', 'else', ':', 't', '=', 'time', '.', 'strptime', '(', 's', ',', 'duration_format', ')', 'return', '3600', '*', 't', '.', 'tm_hour', '+', '60', '*', 't', '.', 'tm_min', '+', 't', '.', 'tm_sec', 'def', '_map_position', '(', 'data', ')', ':', 'antenna', '=', 'Position', '(', ')', 'if', "'antenna_id'", 'in', 'data', 'and', 'data', '[', "'antenna_id'", ']', ':', 'antenna', '.', 'antenna', '=', 'data', '[', "'antenna_id'", ']', 'if', "'place_id'", 'in', 'data', ':', 'raise', 'NameError', '(', '"Use field name \'antenna_id\' in input files. "', '"\'place_id\' is deprecated."', ')', 'if', "'latitude'", 'in', 'data', 'and', "'longitude'", 'in', 'data', ':', 'latitude', '=', 'data', '[', "'latitude'", ']', 'longitude', '=', 'data', '[', "'longitude'", ']', '# latitude and longitude should not be empty strings.', 'if', 'latitude', 'and', 'longitude', ':', 'antenna', '.', 'location', '=', 'float', '(', 'latitude', ')', ',', 'float', '(', 'longitude', ')', 'return', 'antenna', 'return', 'Record', '(', 'interaction', '=', 'data', '[', "'interaction'", ']', 'if', 'data', '[', "'interaction'", ']', 'else', 'None', ',', 'direction', '=', 'data', '[', "'direction'", ']', ',', 'correspondent_id', '=', 'data', '[', "'correspondent_id'", ']', ',', 'datetime', '=', '_tryto', '(', 'lambda', 'x', ':', 'datetime', '.', 'strptime', '(', 'x', ',', '"%Y-%m-%d %H:%M:%S"', ')', ',', 'data', '[', "'datetime'", ']', ')', ',', 'call_duration', '=', '_tryto', '(', '_map_duration', ',', 'data', '[', "'call_duration'", ']', ')', ',', 'position', '=', '_tryto', '(', '_map_position', ',', 'data', ')', ')']
Parse a raw data dictionary and return a Record object.
['Parse', 'a', 'raw', 'data', 'dictionary', 'and', 'return', 'a', 'Record', 'object', '.']
train
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/io.py#L147-L187
5,395
MisterY/pydatum
pydatum/datum.py
Datum.subtract_months
def subtract_months(self, months: int) -> datetime: """ Subtracts a number of months from the current value """ self.value = self.value - relativedelta(months=months) return self.value
python
def subtract_months(self, months: int) -> datetime: """ Subtracts a number of months from the current value """ self.value = self.value - relativedelta(months=months) return self.value
['def', 'subtract_months', '(', 'self', ',', 'months', ':', 'int', ')', '->', 'datetime', ':', 'self', '.', 'value', '=', 'self', '.', 'value', '-', 'relativedelta', '(', 'months', '=', 'months', ')', 'return', 'self', '.', 'value']
Subtracts a number of months from the current value
['Subtracts', 'a', 'number', 'of', 'months', 'from', 'the', 'current', 'value']
train
https://github.com/MisterY/pydatum/blob/4b39f43040e31a95bcf219603b6429078a9ba3c2/pydatum/datum.py#L172-L175
5,396
materialsproject/pymatgen
pymatgen/ext/matproj.py
MPRester.get_reaction
def get_reaction(self, reactants, products): """ Gets a reaction from the Materials Project. Args: reactants ([str]): List of formulas products ([str]): List of formulas Returns: rxn """ return self._make_request("/reaction", payload={"reactants[]": reactants, "products[]": products}, mp_decode=False)
python
def get_reaction(self, reactants, products): """ Gets a reaction from the Materials Project. Args: reactants ([str]): List of formulas products ([str]): List of formulas Returns: rxn """ return self._make_request("/reaction", payload={"reactants[]": reactants, "products[]": products}, mp_decode=False)
['def', 'get_reaction', '(', 'self', ',', 'reactants', ',', 'products', ')', ':', 'return', 'self', '.', '_make_request', '(', '"/reaction"', ',', 'payload', '=', '{', '"reactants[]"', ':', 'reactants', ',', '"products[]"', ':', 'products', '}', ',', 'mp_decode', '=', 'False', ')']
Gets a reaction from the Materials Project. Args: reactants ([str]): List of formulas products ([str]): List of formulas Returns: rxn
['Gets', 'a', 'reaction', 'from', 'the', 'Materials', 'Project', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/ext/matproj.py#L1089-L1102
5,397
saltstack/salt
salt/modules/chocolatey.py
install_gem
def install_gem(name, version=None, install_args=None, override_args=False): ''' Instructs Chocolatey to install a package via Ruby's Gems. name The name of the package to be installed. Only accepts a single argument. version Install a specific version of the package. Defaults to latest version available. install_args A list of install arguments you want to pass to the installation process i.e product key or feature list override_args Set to true if you want to override the original install arguments (for the native installer) in the package and use your own. When this is set to False install_args will be appended to the end of the default arguments CLI Example: .. code-block:: bash salt '*' chocolatey.install_gem <package name> salt '*' chocolatey.install_gem <package name> version=<package version> salt '*' chocolatey.install_gem <package name> install_args=<args> override_args=True ''' return install(name, version=version, source='ruby', install_args=install_args, override_args=override_args)
python
def install_gem(name, version=None, install_args=None, override_args=False): ''' Instructs Chocolatey to install a package via Ruby's Gems. name The name of the package to be installed. Only accepts a single argument. version Install a specific version of the package. Defaults to latest version available. install_args A list of install arguments you want to pass to the installation process i.e product key or feature list override_args Set to true if you want to override the original install arguments (for the native installer) in the package and use your own. When this is set to False install_args will be appended to the end of the default arguments CLI Example: .. code-block:: bash salt '*' chocolatey.install_gem <package name> salt '*' chocolatey.install_gem <package name> version=<package version> salt '*' chocolatey.install_gem <package name> install_args=<args> override_args=True ''' return install(name, version=version, source='ruby', install_args=install_args, override_args=override_args)
['def', 'install_gem', '(', 'name', ',', 'version', '=', 'None', ',', 'install_args', '=', 'None', ',', 'override_args', '=', 'False', ')', ':', 'return', 'install', '(', 'name', ',', 'version', '=', 'version', ',', 'source', '=', "'ruby'", ',', 'install_args', '=', 'install_args', ',', 'override_args', '=', 'override_args', ')']
Instructs Chocolatey to install a package via Ruby's Gems. name The name of the package to be installed. Only accepts a single argument. version Install a specific version of the package. Defaults to latest version available. install_args A list of install arguments you want to pass to the installation process i.e product key or feature list override_args Set to true if you want to override the original install arguments (for the native installer) in the package and use your own. When this is set to False install_args will be appended to the end of the default arguments CLI Example: .. code-block:: bash salt '*' chocolatey.install_gem <package name> salt '*' chocolatey.install_gem <package name> version=<package version> salt '*' chocolatey.install_gem <package name> install_args=<args> override_args=True
['Instructs', 'Chocolatey', 'to', 'install', 'a', 'package', 'via', 'Ruby', 's', 'Gems', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/chocolatey.py#L526-L560
5,398
globus/globus-cli
globus_cli/commands/endpoint/show.py
endpoint_show
def endpoint_show(endpoint_id): """ Executor for `globus endpoint show` """ client = get_client() res = client.get_endpoint(endpoint_id) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=GCP_FIELDS if res["is_globus_connect"] else STANDARD_FIELDS, )
python
def endpoint_show(endpoint_id): """ Executor for `globus endpoint show` """ client = get_client() res = client.get_endpoint(endpoint_id) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=GCP_FIELDS if res["is_globus_connect"] else STANDARD_FIELDS, )
['def', 'endpoint_show', '(', 'endpoint_id', ')', ':', 'client', '=', 'get_client', '(', ')', 'res', '=', 'client', '.', 'get_endpoint', '(', 'endpoint_id', ')', 'formatted_print', '(', 'res', ',', 'text_format', '=', 'FORMAT_TEXT_RECORD', ',', 'fields', '=', 'GCP_FIELDS', 'if', 'res', '[', '"is_globus_connect"', ']', 'else', 'STANDARD_FIELDS', ',', ')']
Executor for `globus endpoint show`
['Executor', 'for', 'globus', 'endpoint', 'show']
train
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/commands/endpoint/show.py#L11-L23
5,399
pennersr/django-allauth
allauth/socialaccount/providers/oauth/client.py
OAuth.query
def query(self, url, method="GET", params=dict(), headers=dict()): """ Request a API endpoint at ``url`` with ``params`` being either the POST or GET data. """ access_token = self._get_at_from_session() oauth = OAuth1( self.consumer_key, client_secret=self.secret_key, resource_owner_key=access_token['oauth_token'], resource_owner_secret=access_token['oauth_token_secret']) response = getattr(requests, method.lower())(url, auth=oauth, headers=headers, params=params) if response.status_code != 200: raise OAuthError( _('No access to private resources at "%s".') % get_token_prefix(self.request_token_url)) return response.text
python
def query(self, url, method="GET", params=dict(), headers=dict()): """ Request a API endpoint at ``url`` with ``params`` being either the POST or GET data. """ access_token = self._get_at_from_session() oauth = OAuth1( self.consumer_key, client_secret=self.secret_key, resource_owner_key=access_token['oauth_token'], resource_owner_secret=access_token['oauth_token_secret']) response = getattr(requests, method.lower())(url, auth=oauth, headers=headers, params=params) if response.status_code != 200: raise OAuthError( _('No access to private resources at "%s".') % get_token_prefix(self.request_token_url)) return response.text
['def', 'query', '(', 'self', ',', 'url', ',', 'method', '=', '"GET"', ',', 'params', '=', 'dict', '(', ')', ',', 'headers', '=', 'dict', '(', ')', ')', ':', 'access_token', '=', 'self', '.', '_get_at_from_session', '(', ')', 'oauth', '=', 'OAuth1', '(', 'self', '.', 'consumer_key', ',', 'client_secret', '=', 'self', '.', 'secret_key', ',', 'resource_owner_key', '=', 'access_token', '[', "'oauth_token'", ']', ',', 'resource_owner_secret', '=', 'access_token', '[', "'oauth_token_secret'", ']', ')', 'response', '=', 'getattr', '(', 'requests', ',', 'method', '.', 'lower', '(', ')', ')', '(', 'url', ',', 'auth', '=', 'oauth', ',', 'headers', '=', 'headers', ',', 'params', '=', 'params', ')', 'if', 'response', '.', 'status_code', '!=', '200', ':', 'raise', 'OAuthError', '(', '_', '(', '\'No access to private resources at "%s".\'', ')', '%', 'get_token_prefix', '(', 'self', '.', 'request_token_url', ')', ')', 'return', 'response', '.', 'text']
Request a API endpoint at ``url`` with ``params`` being either the POST or GET data.
['Request', 'a', 'API', 'endpoint', 'at', 'url', 'with', 'params', 'being', 'either', 'the', 'POST', 'or', 'GET', 'data', '.']
train
https://github.com/pennersr/django-allauth/blob/f70cb3d622f992f15fe9b57098e0b328445b664e/allauth/socialaccount/providers/oauth/client.py#L180-L200