code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _to_ascii(s):
""" Converts given string to ascii ignoring non ascii.
Args:
s (text or binary):
Returns:
str:
"""
# TODO: Always use unicode within ambry.
from six import text_type, binary_type
if isinstance(s, text_type):
ascii_ = s.encode('ascii', 'ignore')
elif isinstance(s, binary_type):
ascii_ = s.decode('utf-8').encode('ascii', 'ignore')
else:
raise Exception('Unknown text type - {}'.format(type(s)))
return ascii_
|
Converts given string to ascii ignoring non ascii.
Args:
s (text or binary):
Returns:
str:
|
Below is the the instruction that describes the task:
### Input:
Converts given string to ascii ignoring non ascii.
Args:
s (text or binary):
Returns:
str:
### Response:
def _to_ascii(s):
""" Converts given string to ascii ignoring non ascii.
Args:
s (text or binary):
Returns:
str:
"""
# TODO: Always use unicode within ambry.
from six import text_type, binary_type
if isinstance(s, text_type):
ascii_ = s.encode('ascii', 'ignore')
elif isinstance(s, binary_type):
ascii_ = s.decode('utf-8').encode('ascii', 'ignore')
else:
raise Exception('Unknown text type - {}'.format(type(s)))
return ascii_
|
def request_param_update(self, var_id):
"""Place a param update request on the queue"""
self._useV2 = self.cf.platform.get_protocol_version() >= 4
pk = CRTPPacket()
pk.set_header(CRTPPort.PARAM, READ_CHANNEL)
if self._useV2:
pk.data = struct.pack('<H', var_id)
else:
pk.data = struct.pack('<B', var_id)
logger.debug('Requesting request to update param [%d]', var_id)
self.request_queue.put(pk)
|
Place a param update request on the queue
|
Below is the the instruction that describes the task:
### Input:
Place a param update request on the queue
### Response:
def request_param_update(self, var_id):
"""Place a param update request on the queue"""
self._useV2 = self.cf.platform.get_protocol_version() >= 4
pk = CRTPPacket()
pk.set_header(CRTPPort.PARAM, READ_CHANNEL)
if self._useV2:
pk.data = struct.pack('<H', var_id)
else:
pk.data = struct.pack('<B', var_id)
logger.debug('Requesting request to update param [%d]', var_id)
self.request_queue.put(pk)
|
def remove_raw_jobs(self, params_list):
""" Remove jobs from a raw queue with their raw params. """
if len(params_list) == 0:
return
# ZSET
if self.is_sorted:
context.connections.redis.zrem(self.redis_key, *iter(params_list))
# SET
elif self.is_set:
context.connections.redis.srem(self.redis_key, *params_list)
else:
# O(n)! Use with caution.
for k in params_list:
context.connections.redis.lrem(self.redis_key, 1, k)
context.metric("queues.%s.removed" % self.id, len(params_list))
context.metric("queues.all.removed", len(params_list))
|
Remove jobs from a raw queue with their raw params.
|
Below is the the instruction that describes the task:
### Input:
Remove jobs from a raw queue with their raw params.
### Response:
def remove_raw_jobs(self, params_list):
""" Remove jobs from a raw queue with their raw params. """
if len(params_list) == 0:
return
# ZSET
if self.is_sorted:
context.connections.redis.zrem(self.redis_key, *iter(params_list))
# SET
elif self.is_set:
context.connections.redis.srem(self.redis_key, *params_list)
else:
# O(n)! Use with caution.
for k in params_list:
context.connections.redis.lrem(self.redis_key, 1, k)
context.metric("queues.%s.removed" % self.id, len(params_list))
context.metric("queues.all.removed", len(params_list))
|
def find_rmlst_type(kma_report, rmlst_report):
"""
Uses a report generated by KMA to determine what allele is present for each rMLST gene.
:param kma_report: The .res report generated by KMA.
:param rmlst_report: rMLST report file to write information to.
:return: a sorted list of loci present, in format gene_allele
"""
genes_to_use = dict()
score_dict = dict()
gene_alleles = list()
with open(kma_report) as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t')
for row in reader:
gene_allele = row['#Template']
score = int(row['Score'])
gene = gene_allele.split('_')[0]
allele = gene_allele.split('_')[1]
if gene not in score_dict:
score_dict[gene] = score
genes_to_use[gene] = allele
else:
if score > score_dict[gene]:
score_dict[gene] = score
genes_to_use[gene] = allele
for gene in genes_to_use:
gene_alleles.append(gene + '_' + genes_to_use[gene].replace(' ', ''))
gene_alleles = sorted(gene_alleles)
with open(rmlst_report, 'w') as f:
f.write('Gene,Allele\n')
for gene_allele in gene_alleles:
gene = gene_allele.split('_')[0]
allele = gene_allele.split('_')[1]
f.write('{},{}\n'.format(gene, allele))
return gene_alleles
|
Uses a report generated by KMA to determine what allele is present for each rMLST gene.
:param kma_report: The .res report generated by KMA.
:param rmlst_report: rMLST report file to write information to.
:return: a sorted list of loci present, in format gene_allele
|
Below is the the instruction that describes the task:
### Input:
Uses a report generated by KMA to determine what allele is present for each rMLST gene.
:param kma_report: The .res report generated by KMA.
:param rmlst_report: rMLST report file to write information to.
:return: a sorted list of loci present, in format gene_allele
### Response:
def find_rmlst_type(kma_report, rmlst_report):
"""
Uses a report generated by KMA to determine what allele is present for each rMLST gene.
:param kma_report: The .res report generated by KMA.
:param rmlst_report: rMLST report file to write information to.
:return: a sorted list of loci present, in format gene_allele
"""
genes_to_use = dict()
score_dict = dict()
gene_alleles = list()
with open(kma_report) as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t')
for row in reader:
gene_allele = row['#Template']
score = int(row['Score'])
gene = gene_allele.split('_')[0]
allele = gene_allele.split('_')[1]
if gene not in score_dict:
score_dict[gene] = score
genes_to_use[gene] = allele
else:
if score > score_dict[gene]:
score_dict[gene] = score
genes_to_use[gene] = allele
for gene in genes_to_use:
gene_alleles.append(gene + '_' + genes_to_use[gene].replace(' ', ''))
gene_alleles = sorted(gene_alleles)
with open(rmlst_report, 'w') as f:
f.write('Gene,Allele\n')
for gene_allele in gene_alleles:
gene = gene_allele.split('_')[0]
allele = gene_allele.split('_')[1]
f.write('{},{}\n'.format(gene, allele))
return gene_alleles
|
def populate_parallel_text(extract_dir: str,
file_sets: List[Tuple[str, str, str]],
dest_prefix: str,
keep_separate: bool,
head_n: int = 0):
"""
Create raw parallel train, dev, or test files with a given prefix.
:param extract_dir: Directory where raw files (inputs) are extracted.
:param file_sets: Sets of files to use.
:param dest_prefix: Prefix for output files.
:param keep_separate: True if each file set (source-target pair) should have
its own file (used for test sets).
:param head_n: If N>0, use only the first N lines (used in test mode).
"""
source_out = None # type: IO[Any]
target_out = None # type: IO[Any]
lines_written = 0
# Single output file for each side
if not keep_separate:
source_dest = dest_prefix + SUFFIX_SRC_GZ
target_dest = dest_prefix + SUFFIX_TRG_GZ
logging.info("Populate: %s %s", source_dest, target_dest)
source_out = gzip.open(source_dest, "wt", encoding="utf-8")
target_out = gzip.open(target_dest, "wt", encoding="utf-8")
for i, (source_fname, target_fname, text_type) in enumerate(file_sets):
# One output file per input file for each side
if keep_separate:
if source_out:
source_out.close()
if target_out:
target_out.close()
source_dest = dest_prefix + str(i) + "." + SUFFIX_SRC_GZ
target_dest = dest_prefix + str(i) + "." + SUFFIX_TRG_GZ
logging.info("Populate: %s %s", source_dest, target_dest)
source_out = gzip.open(source_dest, "wt", encoding="utf-8")
target_out = gzip.open(target_dest, "wt", encoding="utf-8")
for source_line, target_line in zip(
plain_text_iter(os.path.join(extract_dir, source_fname), text_type, DATA_SRC),
plain_text_iter(os.path.join(extract_dir, target_fname), text_type, DATA_TRG)):
# Only write N lines total if requested, but reset per file when
# keeping files separate
if head_n > 0 and lines_written >= head_n:
if keep_separate:
lines_written = 0
break
source_out.write("{}\n".format(source_line))
target_out.write("{}\n".format(target_line))
lines_written += 1
source_out.close()
target_out.close()
|
Create raw parallel train, dev, or test files with a given prefix.
:param extract_dir: Directory where raw files (inputs) are extracted.
:param file_sets: Sets of files to use.
:param dest_prefix: Prefix for output files.
:param keep_separate: True if each file set (source-target pair) should have
its own file (used for test sets).
:param head_n: If N>0, use only the first N lines (used in test mode).
|
Below is the the instruction that describes the task:
### Input:
Create raw parallel train, dev, or test files with a given prefix.
:param extract_dir: Directory where raw files (inputs) are extracted.
:param file_sets: Sets of files to use.
:param dest_prefix: Prefix for output files.
:param keep_separate: True if each file set (source-target pair) should have
its own file (used for test sets).
:param head_n: If N>0, use only the first N lines (used in test mode).
### Response:
def populate_parallel_text(extract_dir: str,
file_sets: List[Tuple[str, str, str]],
dest_prefix: str,
keep_separate: bool,
head_n: int = 0):
"""
Create raw parallel train, dev, or test files with a given prefix.
:param extract_dir: Directory where raw files (inputs) are extracted.
:param file_sets: Sets of files to use.
:param dest_prefix: Prefix for output files.
:param keep_separate: True if each file set (source-target pair) should have
its own file (used for test sets).
:param head_n: If N>0, use only the first N lines (used in test mode).
"""
source_out = None # type: IO[Any]
target_out = None # type: IO[Any]
lines_written = 0
# Single output file for each side
if not keep_separate:
source_dest = dest_prefix + SUFFIX_SRC_GZ
target_dest = dest_prefix + SUFFIX_TRG_GZ
logging.info("Populate: %s %s", source_dest, target_dest)
source_out = gzip.open(source_dest, "wt", encoding="utf-8")
target_out = gzip.open(target_dest, "wt", encoding="utf-8")
for i, (source_fname, target_fname, text_type) in enumerate(file_sets):
# One output file per input file for each side
if keep_separate:
if source_out:
source_out.close()
if target_out:
target_out.close()
source_dest = dest_prefix + str(i) + "." + SUFFIX_SRC_GZ
target_dest = dest_prefix + str(i) + "." + SUFFIX_TRG_GZ
logging.info("Populate: %s %s", source_dest, target_dest)
source_out = gzip.open(source_dest, "wt", encoding="utf-8")
target_out = gzip.open(target_dest, "wt", encoding="utf-8")
for source_line, target_line in zip(
plain_text_iter(os.path.join(extract_dir, source_fname), text_type, DATA_SRC),
plain_text_iter(os.path.join(extract_dir, target_fname), text_type, DATA_TRG)):
# Only write N lines total if requested, but reset per file when
# keeping files separate
if head_n > 0 and lines_written >= head_n:
if keep_separate:
lines_written = 0
break
source_out.write("{}\n".format(source_line))
target_out.write("{}\n".format(target_line))
lines_written += 1
source_out.close()
target_out.close()
|
def _rdf2dot_simple(g, stream):
"""Create a simple graph of processes and artifacts."""
from itertools import chain
import re
path_re = re.compile(
r'file:///(?P<type>[a-zA-Z]+)/'
r'(?P<commit>\w+)'
r'(?P<path>.+)?'
)
inputs = g.query(
"""
SELECT ?input ?role ?activity ?comment
WHERE {
?activity (prov:qualifiedUsage/prov:entity) ?input .
?activity prov:qualifiedUsage ?qual .
?qual prov:hadRole ?role .
?qual prov:entity ?input .
?qual rdf:type ?type .
?activity rdf:type wfprov:ProcessRun .
?activity rdfs:comment ?comment .
FILTER NOT EXISTS {?activity rdf:type wfprov:WorkflowRun}
}
"""
)
outputs = g.query(
"""
SELECT ?activity ?role ?output ?comment
WHERE {
?output (prov:qualifiedGeneration/prov:activity) ?activity .
?output prov:qualifiedGeneration ?qual .
?qual prov:hadRole ?role .
?qual prov:activity ?activity .
?qual rdf:type ?type .
?activity rdf:type wfprov:ProcessRun ;
rdfs:comment ?comment .
FILTER NOT EXISTS {?activity rdf:type wfprov:WorkflowRun}
}
"""
)
activity_nodes = {}
artifact_nodes = {}
for source, role, target, comment, in chain(inputs, outputs):
# extract the pieces of the process URI
src_path = path_re.match(source).groupdict()
tgt_path = path_re.match(target).groupdict()
# write the edge
stream.write(
'\t"{src_commit}:{src_path}" -> '
'"{tgt_commit}:{tgt_path}" '
'[label={role}] \n'.format(
src_commit=src_path['commit'][:5],
src_path=src_path.get('path') or '',
tgt_commit=tgt_path['commit'][:5],
tgt_path=tgt_path.get('path') or '',
role=role
)
)
if src_path.get('type') == 'commit':
activity_nodes.setdefault(source, {'comment': comment})
artifact_nodes.setdefault(target, {})
if tgt_path.get('type') == 'commit':
activity_nodes.setdefault(target, {'comment': comment})
artifact_nodes.setdefault(source, {})
# customize the nodes
for node, content in activity_nodes.items():
node_path = path_re.match(node).groupdict()
stream.write(
'\t"{commit}:{path}" '
'[shape=box label="#{commit}:{path}:{comment}"] \n'.format(
comment=content['comment'],
commit=node_path['commit'][:5],
path=node_path.get('path') or ''
)
)
for node, content in artifact_nodes.items():
node_path = path_re.match(node).groupdict()
stream.write(
'\t"{commit}:{path}" '
'[label="#{commit}:{path}"] \n'.format(
commit=node_path['commit'][:5],
path=node_path.get('path') or ''
)
)
stream.write('}\n')
|
Create a simple graph of processes and artifacts.
|
Below is the the instruction that describes the task:
### Input:
Create a simple graph of processes and artifacts.
### Response:
def _rdf2dot_simple(g, stream):
"""Create a simple graph of processes and artifacts."""
from itertools import chain
import re
path_re = re.compile(
r'file:///(?P<type>[a-zA-Z]+)/'
r'(?P<commit>\w+)'
r'(?P<path>.+)?'
)
inputs = g.query(
"""
SELECT ?input ?role ?activity ?comment
WHERE {
?activity (prov:qualifiedUsage/prov:entity) ?input .
?activity prov:qualifiedUsage ?qual .
?qual prov:hadRole ?role .
?qual prov:entity ?input .
?qual rdf:type ?type .
?activity rdf:type wfprov:ProcessRun .
?activity rdfs:comment ?comment .
FILTER NOT EXISTS {?activity rdf:type wfprov:WorkflowRun}
}
"""
)
outputs = g.query(
"""
SELECT ?activity ?role ?output ?comment
WHERE {
?output (prov:qualifiedGeneration/prov:activity) ?activity .
?output prov:qualifiedGeneration ?qual .
?qual prov:hadRole ?role .
?qual prov:activity ?activity .
?qual rdf:type ?type .
?activity rdf:type wfprov:ProcessRun ;
rdfs:comment ?comment .
FILTER NOT EXISTS {?activity rdf:type wfprov:WorkflowRun}
}
"""
)
activity_nodes = {}
artifact_nodes = {}
for source, role, target, comment, in chain(inputs, outputs):
# extract the pieces of the process URI
src_path = path_re.match(source).groupdict()
tgt_path = path_re.match(target).groupdict()
# write the edge
stream.write(
'\t"{src_commit}:{src_path}" -> '
'"{tgt_commit}:{tgt_path}" '
'[label={role}] \n'.format(
src_commit=src_path['commit'][:5],
src_path=src_path.get('path') or '',
tgt_commit=tgt_path['commit'][:5],
tgt_path=tgt_path.get('path') or '',
role=role
)
)
if src_path.get('type') == 'commit':
activity_nodes.setdefault(source, {'comment': comment})
artifact_nodes.setdefault(target, {})
if tgt_path.get('type') == 'commit':
activity_nodes.setdefault(target, {'comment': comment})
artifact_nodes.setdefault(source, {})
# customize the nodes
for node, content in activity_nodes.items():
node_path = path_re.match(node).groupdict()
stream.write(
'\t"{commit}:{path}" '
'[shape=box label="#{commit}:{path}:{comment}"] \n'.format(
comment=content['comment'],
commit=node_path['commit'][:5],
path=node_path.get('path') or ''
)
)
for node, content in artifact_nodes.items():
node_path = path_re.match(node).groupdict()
stream.write(
'\t"{commit}:{path}" '
'[label="#{commit}:{path}"] \n'.format(
commit=node_path['commit'][:5],
path=node_path.get('path') or ''
)
)
stream.write('}\n')
|
def cronitor(self):
"""Wrap run with requests to cronitor."""
url = f'https://cronitor.link/{self.opts.cronitor}/{{}}'
try:
run_url = url.format('run')
self.logger.debug(f'Pinging {run_url}')
requests.get(run_url, timeout=self.opts.timeout)
except requests.exceptions.RequestException as e:
self.logger.exception(e)
# Cronitor may be having an outage, but we still want to run our stuff
output, exit_status = self.run()
endpoint = 'complete' if exit_status == 0 else 'fail'
try:
ping_url = url.format(endpoint)
self.logger.debug('Pinging {}'.format(ping_url))
requests.get(ping_url, timeout=self.opts.timeout)
except requests.exceptions.RequestException as e:
self.logger.exception(e)
return output, exit_status
|
Wrap run with requests to cronitor.
|
Below is the the instruction that describes the task:
### Input:
Wrap run with requests to cronitor.
### Response:
def cronitor(self):
"""Wrap run with requests to cronitor."""
url = f'https://cronitor.link/{self.opts.cronitor}/{{}}'
try:
run_url = url.format('run')
self.logger.debug(f'Pinging {run_url}')
requests.get(run_url, timeout=self.opts.timeout)
except requests.exceptions.RequestException as e:
self.logger.exception(e)
# Cronitor may be having an outage, but we still want to run our stuff
output, exit_status = self.run()
endpoint = 'complete' if exit_status == 0 else 'fail'
try:
ping_url = url.format(endpoint)
self.logger.debug('Pinging {}'.format(ping_url))
requests.get(ping_url, timeout=self.opts.timeout)
except requests.exceptions.RequestException as e:
self.logger.exception(e)
return output, exit_status
|
def create_vlan_interface(self, interface, subnet, **kwargs):
"""Create a vlan interface
:param interface: Name of interface to be created.
:type interface: str
:param subnet: Subnet associated with interface to be created
:type subnet: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST network/vif/:vlan_interface**
:type \*\*kwargs: optional
:returns: A dictionary describing the created interface
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
data = {"subnet": subnet}
data.update(kwargs)
return self._request("POST", "network/vif/{0}".format(interface), data)
|
Create a vlan interface
:param interface: Name of interface to be created.
:type interface: str
:param subnet: Subnet associated with interface to be created
:type subnet: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST network/vif/:vlan_interface**
:type \*\*kwargs: optional
:returns: A dictionary describing the created interface
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
|
Below is the the instruction that describes the task:
### Input:
Create a vlan interface
:param interface: Name of interface to be created.
:type interface: str
:param subnet: Subnet associated with interface to be created
:type subnet: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST network/vif/:vlan_interface**
:type \*\*kwargs: optional
:returns: A dictionary describing the created interface
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
### Response:
def create_vlan_interface(self, interface, subnet, **kwargs):
"""Create a vlan interface
:param interface: Name of interface to be created.
:type interface: str
:param subnet: Subnet associated with interface to be created
:type subnet: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST network/vif/:vlan_interface**
:type \*\*kwargs: optional
:returns: A dictionary describing the created interface
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
data = {"subnet": subnet}
data.update(kwargs)
return self._request("POST", "network/vif/{0}".format(interface), data)
|
def synset(self):
"""Returns synset into which the given lemma belongs to.
Returns
-------
Synset
Synset into which the given lemma belongs to.
"""
return synset('%s.%s.%s.%s'%(self.synset_literal,self.synset_pos,self.synset_sense,self.literal))
|
Returns synset into which the given lemma belongs to.
Returns
-------
Synset
Synset into which the given lemma belongs to.
|
Below is the the instruction that describes the task:
### Input:
Returns synset into which the given lemma belongs to.
Returns
-------
Synset
Synset into which the given lemma belongs to.
### Response:
def synset(self):
"""Returns synset into which the given lemma belongs to.
Returns
-------
Synset
Synset into which the given lemma belongs to.
"""
return synset('%s.%s.%s.%s'%(self.synset_literal,self.synset_pos,self.synset_sense,self.literal))
|
def write_render_callable(self, node, name, args, buffered, filtered,
cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
if self.in_def:
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_toplevel(%s)" % decorator)
self.printer.start_source(node.lineno)
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(
self.compiler.identifiers.branch(self.node))
if (not self.in_def or self.node.is_block) and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0
):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" %
','.join([
"%s=%s" % (x, x) for x in
self.identifiers.argument_declared
]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write_blanks(2)
if cached:
self.write_cache_decorator(
node, name,
args, buffered,
self.identifiers, toplevel=True)
|
write a top-level render callable.
this could be the main render() method or that of a top-level def.
|
Below is the the instruction that describes the task:
### Input:
write a top-level render callable.
this could be the main render() method or that of a top-level def.
### Response:
def write_render_callable(self, node, name, args, buffered, filtered,
cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
if self.in_def:
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_toplevel(%s)" % decorator)
self.printer.start_source(node.lineno)
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(
self.compiler.identifiers.branch(self.node))
if (not self.in_def or self.node.is_block) and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0
):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" %
','.join([
"%s=%s" % (x, x) for x in
self.identifiers.argument_declared
]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write_blanks(2)
if cached:
self.write_cache_decorator(
node, name,
args, buffered,
self.identifiers, toplevel=True)
|
def _get_on_reboot(dom):
'''
Return `on_reboot` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_reboot <domain>
'''
node = ElementTree.fromstring(get_xml(dom)).find('on_reboot')
return node.text if node is not None else ''
|
Return `on_reboot` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_reboot <domain>
|
Below is the the instruction that describes the task:
### Input:
Return `on_reboot` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_reboot <domain>
### Response:
def _get_on_reboot(dom):
'''
Return `on_reboot` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_reboot <domain>
'''
node = ElementTree.fromstring(get_xml(dom)).find('on_reboot')
return node.text if node is not None else ''
|
def from_yamlfile(cls, fp, selector_handler=None, strict=False, debug=False):
"""
Create a Parselet instance from a file containing
the Parsley script as a YAML object
>>> import parslepy
>>> with open('parselet.yml') as fp:
... parslepy.Parselet.from_yamlfile(fp)
...
<parslepy.base.Parselet object at 0x2014e50>
:param file fp: an open file-like pointer containing the Parsley script
:rtype: :class:`.Parselet`
Other arguments: same as for :class:`.Parselet` contructor
"""
return cls.from_yamlstring(fp.read(), selector_handler=selector_handler, strict=strict, debug=debug)
|
Create a Parselet instance from a file containing
the Parsley script as a YAML object
>>> import parslepy
>>> with open('parselet.yml') as fp:
... parslepy.Parselet.from_yamlfile(fp)
...
<parslepy.base.Parselet object at 0x2014e50>
:param file fp: an open file-like pointer containing the Parsley script
:rtype: :class:`.Parselet`
Other arguments: same as for :class:`.Parselet` contructor
|
Below is the the instruction that describes the task:
### Input:
Create a Parselet instance from a file containing
the Parsley script as a YAML object
>>> import parslepy
>>> with open('parselet.yml') as fp:
... parslepy.Parselet.from_yamlfile(fp)
...
<parslepy.base.Parselet object at 0x2014e50>
:param file fp: an open file-like pointer containing the Parsley script
:rtype: :class:`.Parselet`
Other arguments: same as for :class:`.Parselet` contructor
### Response:
def from_yamlfile(cls, fp, selector_handler=None, strict=False, debug=False):
"""
Create a Parselet instance from a file containing
the Parsley script as a YAML object
>>> import parslepy
>>> with open('parselet.yml') as fp:
... parslepy.Parselet.from_yamlfile(fp)
...
<parslepy.base.Parselet object at 0x2014e50>
:param file fp: an open file-like pointer containing the Parsley script
:rtype: :class:`.Parselet`
Other arguments: same as for :class:`.Parselet` contructor
"""
return cls.from_yamlstring(fp.read(), selector_handler=selector_handler, strict=strict, debug=debug)
|
def gfstep(time):
"""
Return the time step set by the most recent call to :func:`gfsstp`.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfstep_c.html
:param time: Ignored ET value.
:type time: float
:return: Time step to take.
:rtype: float
"""
time = ctypes.c_double(time)
step = ctypes.c_double()
libspice.gfstep_c(time, ctypes.byref(step))
return step.value
|
Return the time step set by the most recent call to :func:`gfsstp`.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfstep_c.html
:param time: Ignored ET value.
:type time: float
:return: Time step to take.
:rtype: float
|
Below is the the instruction that describes the task:
### Input:
Return the time step set by the most recent call to :func:`gfsstp`.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfstep_c.html
:param time: Ignored ET value.
:type time: float
:return: Time step to take.
:rtype: float
### Response:
def gfstep(time):
"""
Return the time step set by the most recent call to :func:`gfsstp`.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfstep_c.html
:param time: Ignored ET value.
:type time: float
:return: Time step to take.
:rtype: float
"""
time = ctypes.c_double(time)
step = ctypes.c_double()
libspice.gfstep_c(time, ctypes.byref(step))
return step.value
|
def to_json(self, depth=-1, **kwargs):
"""Returns a JSON representation of the object."""
return json.dumps(self.to_dict(depth=depth, ordered=True), **kwargs)
|
Returns a JSON representation of the object.
|
Below is the the instruction that describes the task:
### Input:
Returns a JSON representation of the object.
### Response:
def to_json(self, depth=-1, **kwargs):
"""Returns a JSON representation of the object."""
return json.dumps(self.to_dict(depth=depth, ordered=True), **kwargs)
|
def to_date(ts: float) -> datetime.date:
"""Convert timestamp to date.
>>> to_date(978393600.0)
datetime.date(2001, 1, 2)
"""
return datetime.datetime.fromtimestamp(
ts, tz=datetime.timezone.utc).date()
|
Convert timestamp to date.
>>> to_date(978393600.0)
datetime.date(2001, 1, 2)
|
Below is the the instruction that describes the task:
### Input:
Convert timestamp to date.
>>> to_date(978393600.0)
datetime.date(2001, 1, 2)
### Response:
def to_date(ts: float) -> datetime.date:
"""Convert timestamp to date.
>>> to_date(978393600.0)
datetime.date(2001, 1, 2)
"""
return datetime.datetime.fromtimestamp(
ts, tz=datetime.timezone.utc).date()
|
def get_ticker(self):
"""Return the latest ticker information.
:return: Latest ticker information.
:rtype: dict
"""
self._log('get ticker')
return self._rest_client.get(
endpoint='/ticker',
params={'book': self.name}
)
|
Return the latest ticker information.
:return: Latest ticker information.
:rtype: dict
|
Below is the the instruction that describes the task:
### Input:
Return the latest ticker information.
:return: Latest ticker information.
:rtype: dict
### Response:
def get_ticker(self):
"""Return the latest ticker information.
:return: Latest ticker information.
:rtype: dict
"""
self._log('get ticker')
return self._rest_client.get(
endpoint='/ticker',
params={'book': self.name}
)
|
def addnot(self, action=None, subject=None, **conditions):
"""
Defines an ability which cannot be done.
"""
self.add_rule(Rule(False, action, subject, **conditions))
|
Defines an ability which cannot be done.
|
Below is the the instruction that describes the task:
### Input:
Defines an ability which cannot be done.
### Response:
def addnot(self, action=None, subject=None, **conditions):
"""
Defines an ability which cannot be done.
"""
self.add_rule(Rule(False, action, subject, **conditions))
|
def attribute_invoked(self, sender, name, args, kwargs):
"Handles the creation of ExpectationBuilder when an attribute is invoked."
return ExpectationBuilder(self.sender, self.delegate, self.add_invocation, self.add_expectations, '__call__')(*args, **kwargs)
|
Handles the creation of ExpectationBuilder when an attribute is invoked.
|
Below is the the instruction that describes the task:
### Input:
Handles the creation of ExpectationBuilder when an attribute is invoked.
### Response:
def attribute_invoked(self, sender, name, args, kwargs):
"Handles the creation of ExpectationBuilder when an attribute is invoked."
return ExpectationBuilder(self.sender, self.delegate, self.add_invocation, self.add_expectations, '__call__')(*args, **kwargs)
|
def createproject(self, name, **kwargs):
"""
Creates a new project owned by the authenticated user.
:param name: new project name
:param path: custom repository name for new project. By default generated based on name
:param namespace_id: namespace for the new project (defaults to user)
:param description: short project description
:param issues_enabled:
:param merge_requests_enabled:
:param wiki_enabled:
:param snippets_enabled:
:param public: if true same as setting visibility_level = 20
:param visibility_level:
:param sudo:
:param import_url:
:return:
"""
data = {'name': name}
if kwargs:
data.update(kwargs)
request = requests.post(
self.projects_url, headers=self.headers, data=data,
verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 201:
return request.json()
elif request.status_code == 403:
if 'Your own projects limit is 0' in request.text:
print(request.text)
return False
else:
return False
|
Creates a new project owned by the authenticated user.
:param name: new project name
:param path: custom repository name for new project. By default generated based on name
:param namespace_id: namespace for the new project (defaults to user)
:param description: short project description
:param issues_enabled:
:param merge_requests_enabled:
:param wiki_enabled:
:param snippets_enabled:
:param public: if true same as setting visibility_level = 20
:param visibility_level:
:param sudo:
:param import_url:
:return:
|
Below is the the instruction that describes the task:
### Input:
Creates a new project owned by the authenticated user.
:param name: new project name
:param path: custom repository name for new project. By default generated based on name
:param namespace_id: namespace for the new project (defaults to user)
:param description: short project description
:param issues_enabled:
:param merge_requests_enabled:
:param wiki_enabled:
:param snippets_enabled:
:param public: if true same as setting visibility_level = 20
:param visibility_level:
:param sudo:
:param import_url:
:return:
### Response:
def createproject(self, name, **kwargs):
"""
Creates a new project owned by the authenticated user.
:param name: new project name
:param path: custom repository name for new project. By default generated based on name
:param namespace_id: namespace for the new project (defaults to user)
:param description: short project description
:param issues_enabled:
:param merge_requests_enabled:
:param wiki_enabled:
:param snippets_enabled:
:param public: if true same as setting visibility_level = 20
:param visibility_level:
:param sudo:
:param import_url:
:return:
"""
data = {'name': name}
if kwargs:
data.update(kwargs)
request = requests.post(
self.projects_url, headers=self.headers, data=data,
verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 201:
return request.json()
elif request.status_code == 403:
if 'Your own projects limit is 0' in request.text:
print(request.text)
return False
else:
return False
|
def make_reading_comprehension_instance_quac(question_list_tokens: List[List[Token]],
passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
token_span_lists: List[List[Tuple[int, int]]] = None,
yesno_list: List[int] = None,
followup_list: List[int] = None,
additional_metadata: Dict[str, Any] = None,
num_context_answers: int = 0) -> Instance:
"""
Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
Parameters
----------
question_list_tokens : ``List[List[Token]]``
An already-tokenized list of questions. Each dialog have multiple questions.
passage_tokens : ``List[Token]``
An already-tokenized passage that contains the answer to the given question.
token_indexers : ``Dict[str, TokenIndexer]``
Determines how the question and passage ``TextFields`` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : ``str``
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_span_lists : ``List[List[Tuple[int, int]]]``, optional
Indices into ``passage_tokens`` to use as the answer to the question for training. This is
a list of list, first because there is multiple questions per dialog, and
because there might be several possible correct answer spans in the passage.
Currently, we just select the last span in this list (i.e., QuAC has multiple
annotations on the dev set; this will select the last span, which was given by the original annotator).
yesno_list : ``List[int]``
List of the affirmation bit for each question answer pairs.
followup_list : ``List[int]``
List of the continuation bit for each question answer pairs.
num_context_answers : ``int``, optional
How many answers to encode into the passage.
additional_metadata : ``Dict[str, Any]``, optional
The constructed ``metadata`` field will by default contain ``original_passage``,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct.
"""
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
fields['passage'] = passage_field
fields['question'] = ListField([TextField(q_tokens, token_indexers) for q_tokens in question_list_tokens])
metadata = {'original_passage': passage_text,
'token_offsets': passage_offsets,
'question_tokens': [[token.text for token in question_tokens] \
for question_tokens in question_list_tokens],
'passage_tokens': [token.text for token in passage_tokens], }
p1_answer_marker_list: List[Field] = []
p2_answer_marker_list: List[Field] = []
p3_answer_marker_list: List[Field] = []
def get_tag(i, i_name):
# Generate a tag to mark previous answer span in the passage.
return "<{0:d}_{1:s}>".format(i, i_name)
def mark_tag(span_start, span_end, passage_tags, prev_answer_distance):
try:
assert span_start >= 0
assert span_end >= 0
except:
raise ValueError("Previous {0:d}th answer span should have been updated!".format(prev_answer_distance))
# Modify "tags" to mark previous answer span.
if span_start == span_end:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "")
else:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "start")
passage_tags[prev_answer_distance][span_end] = get_tag(prev_answer_distance, "end")
for passage_index in range(span_start + 1, span_end):
passage_tags[prev_answer_distance][passage_index] = get_tag(prev_answer_distance, "in")
if token_span_lists:
span_start_list: List[Field] = []
span_end_list: List[Field] = []
p1_span_start, p1_span_end, p2_span_start = -1, -1, -1
p2_span_end, p3_span_start, p3_span_end = -1, -1, -1
# Looping each <<answers>>.
for question_index, answer_span_lists in enumerate(token_span_lists):
span_start, span_end = answer_span_lists[-1] # Last one is the original answer
span_start_list.append(IndexField(span_start, passage_field))
span_end_list.append(IndexField(span_end, passage_field))
prev_answer_marker_lists = [["O"] * len(passage_tokens), ["O"] * len(passage_tokens),
["O"] * len(passage_tokens), ["O"] * len(passage_tokens)]
if question_index > 0 and num_context_answers > 0:
mark_tag(p1_span_start, p1_span_end, prev_answer_marker_lists, 1)
if question_index > 1 and num_context_answers > 1:
mark_tag(p2_span_start, p2_span_end, prev_answer_marker_lists, 2)
if question_index > 2 and num_context_answers > 2:
mark_tag(p3_span_start, p3_span_end, prev_answer_marker_lists, 3)
p3_span_start = p2_span_start
p3_span_end = p2_span_end
p2_span_start = p1_span_start
p2_span_end = p1_span_end
p1_span_start = span_start
p1_span_end = span_end
if num_context_answers > 2:
p3_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[3],
passage_field,
label_namespace="answer_tags"))
if num_context_answers > 1:
p2_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[2],
passage_field,
label_namespace="answer_tags"))
if num_context_answers > 0:
p1_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[1],
passage_field,
label_namespace="answer_tags"))
fields['span_start'] = ListField(span_start_list)
fields['span_end'] = ListField(span_end_list)
if num_context_answers > 0:
fields['p1_answer_marker'] = ListField(p1_answer_marker_list)
if num_context_answers > 1:
fields['p2_answer_marker'] = ListField(p2_answer_marker_list)
if num_context_answers > 2:
fields['p3_answer_marker'] = ListField(p3_answer_marker_list)
fields['yesno_list'] = ListField( \
[LabelField(yesno, label_namespace="yesno_labels") for yesno in yesno_list])
fields['followup_list'] = ListField([LabelField(followup, label_namespace="followup_labels") \
for followup in followup_list])
metadata.update(additional_metadata)
fields['metadata'] = MetadataField(metadata)
return Instance(fields)
|
Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
Parameters
----------
question_list_tokens : ``List[List[Token]]``
An already-tokenized list of questions. Each dialog have multiple questions.
passage_tokens : ``List[Token]``
An already-tokenized passage that contains the answer to the given question.
token_indexers : ``Dict[str, TokenIndexer]``
Determines how the question and passage ``TextFields`` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : ``str``
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_span_lists : ``List[List[Tuple[int, int]]]``, optional
Indices into ``passage_tokens`` to use as the answer to the question for training. This is
a list of list, first because there is multiple questions per dialog, and
because there might be several possible correct answer spans in the passage.
Currently, we just select the last span in this list (i.e., QuAC has multiple
annotations on the dev set; this will select the last span, which was given by the original annotator).
yesno_list : ``List[int]``
List of the affirmation bit for each question answer pairs.
followup_list : ``List[int]``
List of the continuation bit for each question answer pairs.
num_context_answers : ``int``, optional
How many answers to encode into the passage.
additional_metadata : ``Dict[str, Any]``, optional
The constructed ``metadata`` field will by default contain ``original_passage``,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct.
|
Below is the the instruction that describes the task:
### Input:
Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
Parameters
----------
question_list_tokens : ``List[List[Token]]``
An already-tokenized list of questions. Each dialog have multiple questions.
passage_tokens : ``List[Token]``
An already-tokenized passage that contains the answer to the given question.
token_indexers : ``Dict[str, TokenIndexer]``
Determines how the question and passage ``TextFields`` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : ``str``
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_span_lists : ``List[List[Tuple[int, int]]]``, optional
Indices into ``passage_tokens`` to use as the answer to the question for training. This is
a list of list, first because there is multiple questions per dialog, and
because there might be several possible correct answer spans in the passage.
Currently, we just select the last span in this list (i.e., QuAC has multiple
annotations on the dev set; this will select the last span, which was given by the original annotator).
yesno_list : ``List[int]``
List of the affirmation bit for each question answer pairs.
followup_list : ``List[int]``
List of the continuation bit for each question answer pairs.
num_context_answers : ``int``, optional
How many answers to encode into the passage.
additional_metadata : ``Dict[str, Any]``, optional
The constructed ``metadata`` field will by default contain ``original_passage``,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct.
### Response:
def make_reading_comprehension_instance_quac(question_list_tokens: List[List[Token]],
passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
token_span_lists: List[List[Tuple[int, int]]] = None,
yesno_list: List[int] = None,
followup_list: List[int] = None,
additional_metadata: Dict[str, Any] = None,
num_context_answers: int = 0) -> Instance:
"""
Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
Parameters
----------
question_list_tokens : ``List[List[Token]]``
An already-tokenized list of questions. Each dialog have multiple questions.
passage_tokens : ``List[Token]``
An already-tokenized passage that contains the answer to the given question.
token_indexers : ``Dict[str, TokenIndexer]``
Determines how the question and passage ``TextFields`` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : ``str``
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_span_lists : ``List[List[Tuple[int, int]]]``, optional
Indices into ``passage_tokens`` to use as the answer to the question for training. This is
a list of list, first because there is multiple questions per dialog, and
because there might be several possible correct answer spans in the passage.
Currently, we just select the last span in this list (i.e., QuAC has multiple
annotations on the dev set; this will select the last span, which was given by the original annotator).
yesno_list : ``List[int]``
List of the affirmation bit for each question answer pairs.
followup_list : ``List[int]``
List of the continuation bit for each question answer pairs.
num_context_answers : ``int``, optional
How many answers to encode into the passage.
additional_metadata : ``Dict[str, Any]``, optional
The constructed ``metadata`` field will by default contain ``original_passage``,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct.
"""
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
fields['passage'] = passage_field
fields['question'] = ListField([TextField(q_tokens, token_indexers) for q_tokens in question_list_tokens])
metadata = {'original_passage': passage_text,
'token_offsets': passage_offsets,
'question_tokens': [[token.text for token in question_tokens] \
for question_tokens in question_list_tokens],
'passage_tokens': [token.text for token in passage_tokens], }
p1_answer_marker_list: List[Field] = []
p2_answer_marker_list: List[Field] = []
p3_answer_marker_list: List[Field] = []
def get_tag(i, i_name):
# Generate a tag to mark previous answer span in the passage.
return "<{0:d}_{1:s}>".format(i, i_name)
def mark_tag(span_start, span_end, passage_tags, prev_answer_distance):
try:
assert span_start >= 0
assert span_end >= 0
except:
raise ValueError("Previous {0:d}th answer span should have been updated!".format(prev_answer_distance))
# Modify "tags" to mark previous answer span.
if span_start == span_end:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "")
else:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "start")
passage_tags[prev_answer_distance][span_end] = get_tag(prev_answer_distance, "end")
for passage_index in range(span_start + 1, span_end):
passage_tags[prev_answer_distance][passage_index] = get_tag(prev_answer_distance, "in")
if token_span_lists:
span_start_list: List[Field] = []
span_end_list: List[Field] = []
p1_span_start, p1_span_end, p2_span_start = -1, -1, -1
p2_span_end, p3_span_start, p3_span_end = -1, -1, -1
# Looping each <<answers>>.
for question_index, answer_span_lists in enumerate(token_span_lists):
span_start, span_end = answer_span_lists[-1] # Last one is the original answer
span_start_list.append(IndexField(span_start, passage_field))
span_end_list.append(IndexField(span_end, passage_field))
prev_answer_marker_lists = [["O"] * len(passage_tokens), ["O"] * len(passage_tokens),
["O"] * len(passage_tokens), ["O"] * len(passage_tokens)]
if question_index > 0 and num_context_answers > 0:
mark_tag(p1_span_start, p1_span_end, prev_answer_marker_lists, 1)
if question_index > 1 and num_context_answers > 1:
mark_tag(p2_span_start, p2_span_end, prev_answer_marker_lists, 2)
if question_index > 2 and num_context_answers > 2:
mark_tag(p3_span_start, p3_span_end, prev_answer_marker_lists, 3)
p3_span_start = p2_span_start
p3_span_end = p2_span_end
p2_span_start = p1_span_start
p2_span_end = p1_span_end
p1_span_start = span_start
p1_span_end = span_end
if num_context_answers > 2:
p3_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[3],
passage_field,
label_namespace="answer_tags"))
if num_context_answers > 1:
p2_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[2],
passage_field,
label_namespace="answer_tags"))
if num_context_answers > 0:
p1_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[1],
passage_field,
label_namespace="answer_tags"))
fields['span_start'] = ListField(span_start_list)
fields['span_end'] = ListField(span_end_list)
if num_context_answers > 0:
fields['p1_answer_marker'] = ListField(p1_answer_marker_list)
if num_context_answers > 1:
fields['p2_answer_marker'] = ListField(p2_answer_marker_list)
if num_context_answers > 2:
fields['p3_answer_marker'] = ListField(p3_answer_marker_list)
fields['yesno_list'] = ListField( \
[LabelField(yesno, label_namespace="yesno_labels") for yesno in yesno_list])
fields['followup_list'] = ListField([LabelField(followup, label_namespace="followup_labels") \
for followup in followup_list])
metadata.update(additional_metadata)
fields['metadata'] = MetadataField(metadata)
return Instance(fields)
|
def transformSkyCoordinates(self, phi, theta):
"""
Converts sky coordinates from one reference system to another, making use of the rotation matrix with
which the class was initialized. Inputs can be scalars or 1-dimensional numpy arrays.
Parameters
----------
phi - Value of the azimuthal angle (right ascension, longitude) in radians.
theta - Value of the elevation angle (declination, latitude) in radians.
Returns
-------
phirot - Value of the transformed azimuthal angle in radians.
thetarot - Value of the transformed elevation angle in radians.
"""
r=ones_like(phi)
x, y, z = sphericalToCartesian(r, phi, theta)
xrot, yrot, zrot = self.transformCartesianCoordinates(x, y, z)
r, phirot, thetarot = cartesianToSpherical(xrot, yrot, zrot)
return phirot, thetarot
|
Converts sky coordinates from one reference system to another, making use of the rotation matrix with
which the class was initialized. Inputs can be scalars or 1-dimensional numpy arrays.
Parameters
----------
phi - Value of the azimuthal angle (right ascension, longitude) in radians.
theta - Value of the elevation angle (declination, latitude) in radians.
Returns
-------
phirot - Value of the transformed azimuthal angle in radians.
thetarot - Value of the transformed elevation angle in radians.
|
Below is the the instruction that describes the task:
### Input:
Converts sky coordinates from one reference system to another, making use of the rotation matrix with
which the class was initialized. Inputs can be scalars or 1-dimensional numpy arrays.
Parameters
----------
phi - Value of the azimuthal angle (right ascension, longitude) in radians.
theta - Value of the elevation angle (declination, latitude) in radians.
Returns
-------
phirot - Value of the transformed azimuthal angle in radians.
thetarot - Value of the transformed elevation angle in radians.
### Response:
def transformSkyCoordinates(self, phi, theta):
"""
Converts sky coordinates from one reference system to another, making use of the rotation matrix with
which the class was initialized. Inputs can be scalars or 1-dimensional numpy arrays.
Parameters
----------
phi - Value of the azimuthal angle (right ascension, longitude) in radians.
theta - Value of the elevation angle (declination, latitude) in radians.
Returns
-------
phirot - Value of the transformed azimuthal angle in radians.
thetarot - Value of the transformed elevation angle in radians.
"""
r=ones_like(phi)
x, y, z = sphericalToCartesian(r, phi, theta)
xrot, yrot, zrot = self.transformCartesianCoordinates(x, y, z)
r, phirot, thetarot = cartesianToSpherical(xrot, yrot, zrot)
return phirot, thetarot
|
def wr_row_mergeall(self, worksheet, txtstr, fmt, row_idx):
"""Merge all columns and place text string in widened cell."""
hdridxval = len(self.hdrs) - 1
worksheet.merge_range(row_idx, 0, row_idx, hdridxval, txtstr, fmt)
return row_idx + 1
|
Merge all columns and place text string in widened cell.
|
Below is the the instruction that describes the task:
### Input:
Merge all columns and place text string in widened cell.
### Response:
def wr_row_mergeall(self, worksheet, txtstr, fmt, row_idx):
"""Merge all columns and place text string in widened cell."""
hdridxval = len(self.hdrs) - 1
worksheet.merge_range(row_idx, 0, row_idx, hdridxval, txtstr, fmt)
return row_idx + 1
|
def give_dots_yield(R, r, r_, resolution=2*PI/1000, spins=50):
'''Generate Spirograph dots without numpy using yield.
'''
def x(theta):
return (R-r) * math.cos(theta) + r_*math.cos((R-r) / r * theta)
def y(theta):
return (R-r) * math.sin(theta) - r_*math.sin((R-r) / r * theta)
theta = 0.0
while theta < 2*PI*spins:
yield (x(theta), y(theta))
theta += resolution
|
Generate Spirograph dots without numpy using yield.
|
Below is the the instruction that describes the task:
### Input:
Generate Spirograph dots without numpy using yield.
### Response:
def give_dots_yield(R, r, r_, resolution=2*PI/1000, spins=50):
'''Generate Spirograph dots without numpy using yield.
'''
def x(theta):
return (R-r) * math.cos(theta) + r_*math.cos((R-r) / r * theta)
def y(theta):
return (R-r) * math.sin(theta) - r_*math.sin((R-r) / r * theta)
theta = 0.0
while theta < 2*PI*spins:
yield (x(theta), y(theta))
theta += resolution
|
def format_page(text):
"""Format the text for output adding ASCII frame around the text.
Args:
text (str): Text that needs to be formatted.
Returns:
str: Formatted string.
"""
width = max(map(len, text.splitlines()))
page = "+-" + "-" * width + "-+\n"
for line in text.splitlines():
page += "| " + line.ljust(width) + " |\n"
page += "+-" + "-" * width + "-+\n"
return page
|
Format the text for output adding ASCII frame around the text.
Args:
text (str): Text that needs to be formatted.
Returns:
str: Formatted string.
|
Below is the the instruction that describes the task:
### Input:
Format the text for output adding ASCII frame around the text.
Args:
text (str): Text that needs to be formatted.
Returns:
str: Formatted string.
### Response:
def format_page(text):
"""Format the text for output adding ASCII frame around the text.
Args:
text (str): Text that needs to be formatted.
Returns:
str: Formatted string.
"""
width = max(map(len, text.splitlines()))
page = "+-" + "-" * width + "-+\n"
for line in text.splitlines():
page += "| " + line.ljust(width) + " |\n"
page += "+-" + "-" * width + "-+\n"
return page
|
def categorytree(self, category, depth=5):
""" Generate the Category Tree for the given categories
Args:
category(str or list of strings): Category name(s)
depth(int): Depth to traverse the tree
Returns:
dict: Category tree structure
Note:
Set depth to **None** to get the whole tree
Note:
Return Data Structure: Subcategory contains the same \
recursive structure
>>> {
'category': {
'depth': Number,
'links': list,
'parent-categories': list,
'sub-categories': dict
}
}
.. versionadded:: 0.3.10 """
def __cat_tree_rec(cat, depth, tree, level, categories, links):
""" recursive function to build out the tree """
tree[cat] = dict()
tree[cat]["depth"] = level
tree[cat]["sub-categories"] = dict()
tree[cat]["links"] = list()
tree[cat]["parent-categories"] = list()
parent_cats = list()
if cat not in categories:
tries = 0
while True:
if tries > 10:
raise MediaWikiCategoryTreeError(cat)
try:
pag = self.page("{0}:{1}".format(self.category_prefix, cat))
categories[cat] = pag
parent_cats = categories[cat].categories
links[cat] = self.categorymembers(
cat, results=None, subcategories=True
)
break
except PageError:
raise PageError("{0}:{1}".format(self.category_prefix, cat))
except KeyboardInterrupt:
raise
except Exception:
tries = tries + 1
time.sleep(1)
else:
parent_cats = categories[cat].categories
tree[cat]["parent-categories"].extend(parent_cats)
tree[cat]["links"].extend(links[cat][0])
if depth and level >= depth:
for ctg in links[cat][1]:
tree[cat]["sub-categories"][ctg] = None
else:
for ctg in links[cat][1]:
__cat_tree_rec(
ctg,
depth,
tree[cat]["sub-categories"],
level + 1,
categories,
links,
)
# ###################################
# ### Actual Function Code ###
# ###################################
# make it simple to use both a list or a single category term
if not isinstance(category, list):
cats = [category]
else:
cats = category
# parameter verification
if len(cats) == 1 and (cats[0] is None or cats[0] == ""):
msg = (
"CategoryTree: Parameter 'category' must either "
"be a list of one or more categories or a string; "
"provided: '{}'".format(category)
)
raise ValueError(msg)
if depth is not None and depth < 1:
msg = (
"CategoryTree: Parameter 'depth' must be either None "
"(for the full tree) or be greater than 0"
)
raise ValueError(msg)
results = dict()
categories = dict()
links = dict()
for cat in cats:
if cat is None or cat == "":
continue
__cat_tree_rec(cat, depth, results, 0, categories, links)
return results
|
Generate the Category Tree for the given categories
Args:
category(str or list of strings): Category name(s)
depth(int): Depth to traverse the tree
Returns:
dict: Category tree structure
Note:
Set depth to **None** to get the whole tree
Note:
Return Data Structure: Subcategory contains the same \
recursive structure
>>> {
'category': {
'depth': Number,
'links': list,
'parent-categories': list,
'sub-categories': dict
}
}
.. versionadded:: 0.3.10
|
Below is the the instruction that describes the task:
### Input:
Generate the Category Tree for the given categories
Args:
category(str or list of strings): Category name(s)
depth(int): Depth to traverse the tree
Returns:
dict: Category tree structure
Note:
Set depth to **None** to get the whole tree
Note:
Return Data Structure: Subcategory contains the same \
recursive structure
>>> {
'category': {
'depth': Number,
'links': list,
'parent-categories': list,
'sub-categories': dict
}
}
.. versionadded:: 0.3.10
### Response:
def categorytree(self, category, depth=5):
""" Generate the Category Tree for the given categories
Args:
category(str or list of strings): Category name(s)
depth(int): Depth to traverse the tree
Returns:
dict: Category tree structure
Note:
Set depth to **None** to get the whole tree
Note:
Return Data Structure: Subcategory contains the same \
recursive structure
>>> {
'category': {
'depth': Number,
'links': list,
'parent-categories': list,
'sub-categories': dict
}
}
.. versionadded:: 0.3.10 """
def __cat_tree_rec(cat, depth, tree, level, categories, links):
""" recursive function to build out the tree """
tree[cat] = dict()
tree[cat]["depth"] = level
tree[cat]["sub-categories"] = dict()
tree[cat]["links"] = list()
tree[cat]["parent-categories"] = list()
parent_cats = list()
if cat not in categories:
tries = 0
while True:
if tries > 10:
raise MediaWikiCategoryTreeError(cat)
try:
pag = self.page("{0}:{1}".format(self.category_prefix, cat))
categories[cat] = pag
parent_cats = categories[cat].categories
links[cat] = self.categorymembers(
cat, results=None, subcategories=True
)
break
except PageError:
raise PageError("{0}:{1}".format(self.category_prefix, cat))
except KeyboardInterrupt:
raise
except Exception:
tries = tries + 1
time.sleep(1)
else:
parent_cats = categories[cat].categories
tree[cat]["parent-categories"].extend(parent_cats)
tree[cat]["links"].extend(links[cat][0])
if depth and level >= depth:
for ctg in links[cat][1]:
tree[cat]["sub-categories"][ctg] = None
else:
for ctg in links[cat][1]:
__cat_tree_rec(
ctg,
depth,
tree[cat]["sub-categories"],
level + 1,
categories,
links,
)
# ###################################
# ### Actual Function Code ###
# ###################################
# make it simple to use both a list or a single category term
if not isinstance(category, list):
cats = [category]
else:
cats = category
# parameter verification
if len(cats) == 1 and (cats[0] is None or cats[0] == ""):
msg = (
"CategoryTree: Parameter 'category' must either "
"be a list of one or more categories or a string; "
"provided: '{}'".format(category)
)
raise ValueError(msg)
if depth is not None and depth < 1:
msg = (
"CategoryTree: Parameter 'depth' must be either None "
"(for the full tree) or be greater than 0"
)
raise ValueError(msg)
results = dict()
categories = dict()
links = dict()
for cat in cats:
if cat is None or cat == "":
continue
__cat_tree_rec(cat, depth, results, 0, categories, links)
return results
|
def _add_session(self, session, start_info, groups_by_name):
"""Adds a new Session protobuffer to the 'groups_by_name' dictionary.
Called by _build_session_groups when we encounter a new session. Creates
the Session protobuffer and adds it to the relevant group in the
'groups_by_name' dict. Creates the session group if this is the first time
we encounter it.
Args:
session: api_pb2.Session. The session to add.
start_info: The SessionStartInfo protobuffer associated with the session.
groups_by_name: A str to SessionGroup protobuffer dict. Representing the
session groups and sessions found so far.
"""
# If the group_name is empty, this session's group contains only
# this session. Use the session name for the group name since session
# names are unique.
group_name = start_info.group_name or session.name
if group_name in groups_by_name:
groups_by_name[group_name].sessions.extend([session])
else:
# Create the group and add the session as the first one.
group = api_pb2.SessionGroup(
name=group_name,
sessions=[session],
monitor_url=start_info.monitor_url)
# Copy hparams from the first session (all sessions should have the same
# hyperparameter values) into result.
# There doesn't seem to be a way to initialize a protobuffer map in the
# constructor.
for (key, value) in six.iteritems(start_info.hparams):
group.hparams[key].CopyFrom(value)
groups_by_name[group_name] = group
|
Adds a new Session protobuffer to the 'groups_by_name' dictionary.
Called by _build_session_groups when we encounter a new session. Creates
the Session protobuffer and adds it to the relevant group in the
'groups_by_name' dict. Creates the session group if this is the first time
we encounter it.
Args:
session: api_pb2.Session. The session to add.
start_info: The SessionStartInfo protobuffer associated with the session.
groups_by_name: A str to SessionGroup protobuffer dict. Representing the
session groups and sessions found so far.
|
Below is the the instruction that describes the task:
### Input:
Adds a new Session protobuffer to the 'groups_by_name' dictionary.
Called by _build_session_groups when we encounter a new session. Creates
the Session protobuffer and adds it to the relevant group in the
'groups_by_name' dict. Creates the session group if this is the first time
we encounter it.
Args:
session: api_pb2.Session. The session to add.
start_info: The SessionStartInfo protobuffer associated with the session.
groups_by_name: A str to SessionGroup protobuffer dict. Representing the
session groups and sessions found so far.
### Response:
def _add_session(self, session, start_info, groups_by_name):
"""Adds a new Session protobuffer to the 'groups_by_name' dictionary.
Called by _build_session_groups when we encounter a new session. Creates
the Session protobuffer and adds it to the relevant group in the
'groups_by_name' dict. Creates the session group if this is the first time
we encounter it.
Args:
session: api_pb2.Session. The session to add.
start_info: The SessionStartInfo protobuffer associated with the session.
groups_by_name: A str to SessionGroup protobuffer dict. Representing the
session groups and sessions found so far.
"""
# If the group_name is empty, this session's group contains only
# this session. Use the session name for the group name since session
# names are unique.
group_name = start_info.group_name or session.name
if group_name in groups_by_name:
groups_by_name[group_name].sessions.extend([session])
else:
# Create the group and add the session as the first one.
group = api_pb2.SessionGroup(
name=group_name,
sessions=[session],
monitor_url=start_info.monitor_url)
# Copy hparams from the first session (all sessions should have the same
# hyperparameter values) into result.
# There doesn't seem to be a way to initialize a protobuffer map in the
# constructor.
for (key, value) in six.iteritems(start_info.hparams):
group.hparams[key].CopyFrom(value)
groups_by_name[group_name] = group
|
def check_syntax(self, cmd, line):
"""Syntax check a line of RiveScript code.
Args:
str cmd: The command symbol for the line of code, such as one
of ``+``, ``-``, ``*``, ``>``, etc.
str line: The remainder of the line of code, such as the text of
a trigger or reply.
Return:
str: A string syntax error message or ``None`` if no errors.
"""
# Run syntax checks based on the type of command.
if cmd == '!':
# ! Definition
# - Must be formatted like this:
# ! type name = value
# OR
# ! type = value
match = re.match(RE.def_syntax, line)
if not match:
return "Invalid format for !Definition line: must be '! type name = value' OR '! type = value'"
elif cmd == '>':
# > Label
# - The "begin" label must have only one argument ("begin")
# - "topic" labels must be lowercased but can inherit other topics (a-z0-9_\s)
# - "object" labels must follow the same rules as "topic", but don't need to be lowercase
parts = re.split(" ", line, 2)
if parts[0] == "begin" and len(parts) > 1:
return "The 'begin' label takes no additional arguments, should be verbatim '> begin'"
elif parts[0] == "topic":
search = re.search(RE.name_syntax, line)
if search:
return "Topics should be lowercased and contain only numbers and letters"
elif parts[0] == "object":
search = re.search(RE.obj_syntax, line) # Upper case is allowed
if search:
return "Objects can only contain numbers and letters"
elif cmd == '+' or cmd == '%' or cmd == '@':
# + Trigger, % Previous, @ Redirect
# This one is strict. The triggers are to be run through the regexp engine,
# therefore it should be acceptable for the regexp engine.
# - Entirely lowercase
# - No symbols except: ( | ) [ ] * _ # @ { } < > =
# - All brackets should be matched
# - No empty option with pipe such as ||, [|, |], (|, |) and whitespace between
parens = 0 # Open parenthesis
square = 0 # Open square brackets
curly = 0 # Open curly brackets
angle = 0 # Open angled brackets
# Count brackets.
for char in line:
if char == '(':
parens += 1
elif char == ')':
parens -= 1
elif char == '[':
square += 1
elif char == ']':
square -= 1
elif char == '{':
curly += 1
elif char == '}':
curly -= 1
elif char == '<':
angle += 1
elif char == '>':
angle -= 1
elif char == '|':
if parens == 0 and square == 0: # Pipe outside the alternative and option
return "Pipe | must be within parenthesis brackets or square brackets"
if (angle != 0) and (char in {"(", ")", "[", "]", "{", "}"}):
return "Angle bracket must be closed before closing or opening other type of brackets"
total = parens + square + curly # At each character, not more than 1 bracket opens, except <>
for special_char_count in [parens, square, curly, angle, total]:
if special_char_count not in (0, 1):
return "Unbalanced brackets"
# Any mismatches?
if parens != 0:
return "Unmatched parenthesis brackets"
elif square != 0:
return "Unmatched square brackets"
elif curly != 0:
return "Unmatched curly brackets"
elif angle != 0:
return "Unmatched angle brackets"
# Check for empty pipe
search = re.search(RE.empty_pipe, line)
if search:
return "Piped arrays can't include blank entries"
# In UTF-8 mode, most symbols are allowed.
if self.utf8:
search = re.search(RE.utf8_trig, line)
if search:
return "Triggers can't contain uppercase letters, backslashes or dots in UTF-8 mode."
else:
search = re.search(RE.trig_syntax, line)
if search:
return "Triggers may only contain lowercase letters, numbers, and these symbols: ( | ) [ ] * _ # @ { } < > ="
elif cmd == '-' or cmd == '^' or cmd == '/':
# - Trigger, ^ Continue, / Comment
# These commands take verbatim arguments, so their syntax is loose.
pass
elif cmd == '*':
# * Condition
# Syntax for a conditional is as follows:
# * value symbol value => response
match = re.match(RE.cond_syntax, line)
if not match:
return "Invalid format for !Condition: should be like '* value symbol value => response'"
return None
|
Syntax check a line of RiveScript code.
Args:
str cmd: The command symbol for the line of code, such as one
of ``+``, ``-``, ``*``, ``>``, etc.
str line: The remainder of the line of code, such as the text of
a trigger or reply.
Return:
str: A string syntax error message or ``None`` if no errors.
|
Below is the the instruction that describes the task:
### Input:
Syntax check a line of RiveScript code.
Args:
str cmd: The command symbol for the line of code, such as one
of ``+``, ``-``, ``*``, ``>``, etc.
str line: The remainder of the line of code, such as the text of
a trigger or reply.
Return:
str: A string syntax error message or ``None`` if no errors.
### Response:
def check_syntax(self, cmd, line):
"""Syntax check a line of RiveScript code.
Args:
str cmd: The command symbol for the line of code, such as one
of ``+``, ``-``, ``*``, ``>``, etc.
str line: The remainder of the line of code, such as the text of
a trigger or reply.
Return:
str: A string syntax error message or ``None`` if no errors.
"""
# Run syntax checks based on the type of command.
if cmd == '!':
# ! Definition
# - Must be formatted like this:
# ! type name = value
# OR
# ! type = value
match = re.match(RE.def_syntax, line)
if not match:
return "Invalid format for !Definition line: must be '! type name = value' OR '! type = value'"
elif cmd == '>':
# > Label
# - The "begin" label must have only one argument ("begin")
# - "topic" labels must be lowercased but can inherit other topics (a-z0-9_\s)
# - "object" labels must follow the same rules as "topic", but don't need to be lowercase
parts = re.split(" ", line, 2)
if parts[0] == "begin" and len(parts) > 1:
return "The 'begin' label takes no additional arguments, should be verbatim '> begin'"
elif parts[0] == "topic":
search = re.search(RE.name_syntax, line)
if search:
return "Topics should be lowercased and contain only numbers and letters"
elif parts[0] == "object":
search = re.search(RE.obj_syntax, line) # Upper case is allowed
if search:
return "Objects can only contain numbers and letters"
elif cmd == '+' or cmd == '%' or cmd == '@':
# + Trigger, % Previous, @ Redirect
# This one is strict. The triggers are to be run through the regexp engine,
# therefore it should be acceptable for the regexp engine.
# - Entirely lowercase
# - No symbols except: ( | ) [ ] * _ # @ { } < > =
# - All brackets should be matched
# - No empty option with pipe such as ||, [|, |], (|, |) and whitespace between
parens = 0 # Open parenthesis
square = 0 # Open square brackets
curly = 0 # Open curly brackets
angle = 0 # Open angled brackets
# Count brackets.
for char in line:
if char == '(':
parens += 1
elif char == ')':
parens -= 1
elif char == '[':
square += 1
elif char == ']':
square -= 1
elif char == '{':
curly += 1
elif char == '}':
curly -= 1
elif char == '<':
angle += 1
elif char == '>':
angle -= 1
elif char == '|':
if parens == 0 and square == 0: # Pipe outside the alternative and option
return "Pipe | must be within parenthesis brackets or square brackets"
if (angle != 0) and (char in {"(", ")", "[", "]", "{", "}"}):
return "Angle bracket must be closed before closing or opening other type of brackets"
total = parens + square + curly # At each character, not more than 1 bracket opens, except <>
for special_char_count in [parens, square, curly, angle, total]:
if special_char_count not in (0, 1):
return "Unbalanced brackets"
# Any mismatches?
if parens != 0:
return "Unmatched parenthesis brackets"
elif square != 0:
return "Unmatched square brackets"
elif curly != 0:
return "Unmatched curly brackets"
elif angle != 0:
return "Unmatched angle brackets"
# Check for empty pipe
search = re.search(RE.empty_pipe, line)
if search:
return "Piped arrays can't include blank entries"
# In UTF-8 mode, most symbols are allowed.
if self.utf8:
search = re.search(RE.utf8_trig, line)
if search:
return "Triggers can't contain uppercase letters, backslashes or dots in UTF-8 mode."
else:
search = re.search(RE.trig_syntax, line)
if search:
return "Triggers may only contain lowercase letters, numbers, and these symbols: ( | ) [ ] * _ # @ { } < > ="
elif cmd == '-' or cmd == '^' or cmd == '/':
# - Trigger, ^ Continue, / Comment
# These commands take verbatim arguments, so their syntax is loose.
pass
elif cmd == '*':
# * Condition
# Syntax for a conditional is as follows:
# * value symbol value => response
match = re.match(RE.cond_syntax, line)
if not match:
return "Invalid format for !Condition: should be like '* value symbol value => response'"
return None
|
def cache_backend(self):
"""
Get the cache backend
Returns
~~~~~~~
Django cache backend
"""
if not hasattr(self, '_cache_backend'):
if hasattr(django.core.cache, 'caches'):
self._cache_backend = django.core.cache.caches[_cache_name]
else:
self._cache_backend = django.core.cache.get_cache(_cache_name)
return self._cache_backend
|
Get the cache backend
Returns
~~~~~~~
Django cache backend
|
Below is the the instruction that describes the task:
### Input:
Get the cache backend
Returns
~~~~~~~
Django cache backend
### Response:
def cache_backend(self):
"""
Get the cache backend
Returns
~~~~~~~
Django cache backend
"""
if not hasattr(self, '_cache_backend'):
if hasattr(django.core.cache, 'caches'):
self._cache_backend = django.core.cache.caches[_cache_name]
else:
self._cache_backend = django.core.cache.get_cache(_cache_name)
return self._cache_backend
|
def download(name, options):
"""
download a file or all files in a directory
"""
dire = os.path.dirname(name) # returns the directory name
fName = os.path.basename(name) # returns the filename
fNameOnly, fExt = os.path.splitext(fName)
dwn = 0
if fileExists(fName, dire) and not fileExists((fNameOnly + '.srt'), dire): # skip if already downloaded
if file_downloaded(download_file(fName, options.timeout, dire), fName, options.verbose):
dwn += 1
elif dirExists(name):
for filename in os.listdir(name):
if options.recursive:
dwn += download(os.path.join(name, filename), options)
else:
if file_downloaded(download_file(filename, options.timeout, name), filename, options.verbose):
dwn += 1
return dwn
|
download a file or all files in a directory
|
Below is the the instruction that describes the task:
### Input:
download a file or all files in a directory
### Response:
def download(name, options):
"""
download a file or all files in a directory
"""
dire = os.path.dirname(name) # returns the directory name
fName = os.path.basename(name) # returns the filename
fNameOnly, fExt = os.path.splitext(fName)
dwn = 0
if fileExists(fName, dire) and not fileExists((fNameOnly + '.srt'), dire): # skip if already downloaded
if file_downloaded(download_file(fName, options.timeout, dire), fName, options.verbose):
dwn += 1
elif dirExists(name):
for filename in os.listdir(name):
if options.recursive:
dwn += download(os.path.join(name, filename), options)
else:
if file_downloaded(download_file(filename, options.timeout, name), filename, options.verbose):
dwn += 1
return dwn
|
def references(self, key, value):
"""Populate the ``references`` key."""
def _has_curator_flag(value):
normalized_nine_values = [el.upper() for el in force_list(value.get('9'))]
return 'CURATOR' in normalized_nine_values
def _is_curated(value):
return force_single_element(value.get('z')) == '1' and _has_curator_flag(value)
def _set_record(el):
recid = maybe_int(el)
record = get_record_ref(recid, 'literature')
rb.set_record(record)
rb = ReferenceBuilder()
mapping = [
('0', _set_record),
('a', rb.add_uid),
('b', rb.add_uid),
('c', rb.add_collaboration),
('e', partial(rb.add_author, role='ed.')),
('h', rb.add_refextract_authors_str),
('i', rb.add_uid),
('k', rb.set_texkey),
('m', rb.add_misc),
('o', rb.set_label),
('p', rb.set_publisher),
('q', rb.add_parent_title),
('r', rb.add_report_number),
('s', rb.set_pubnote),
('t', rb.add_title),
('x', rb.add_raw_reference),
('y', rb.set_year),
]
for field, method in mapping:
for el in force_list(value.get(field)):
if el:
method(el)
for el in dedupe_list(force_list(value.get('u'))):
if el:
rb.add_url(el)
if _is_curated(value):
rb.curate()
if _has_curator_flag(value):
rb.obj['legacy_curated'] = True
return rb.obj
|
Populate the ``references`` key.
|
Below is the the instruction that describes the task:
### Input:
Populate the ``references`` key.
### Response:
def references(self, key, value):
"""Populate the ``references`` key."""
def _has_curator_flag(value):
normalized_nine_values = [el.upper() for el in force_list(value.get('9'))]
return 'CURATOR' in normalized_nine_values
def _is_curated(value):
return force_single_element(value.get('z')) == '1' and _has_curator_flag(value)
def _set_record(el):
recid = maybe_int(el)
record = get_record_ref(recid, 'literature')
rb.set_record(record)
rb = ReferenceBuilder()
mapping = [
('0', _set_record),
('a', rb.add_uid),
('b', rb.add_uid),
('c', rb.add_collaboration),
('e', partial(rb.add_author, role='ed.')),
('h', rb.add_refextract_authors_str),
('i', rb.add_uid),
('k', rb.set_texkey),
('m', rb.add_misc),
('o', rb.set_label),
('p', rb.set_publisher),
('q', rb.add_parent_title),
('r', rb.add_report_number),
('s', rb.set_pubnote),
('t', rb.add_title),
('x', rb.add_raw_reference),
('y', rb.set_year),
]
for field, method in mapping:
for el in force_list(value.get(field)):
if el:
method(el)
for el in dedupe_list(force_list(value.get('u'))):
if el:
rb.add_url(el)
if _is_curated(value):
rb.curate()
if _has_curator_flag(value):
rb.obj['legacy_curated'] = True
return rb.obj
|
def check_tcp(helper, host, port, warning_param, critical_param, session):
"""
the check logic for check TCP ports
"""
# from tcpConnState from TCP-MIB
tcp_translate = {
"1" : "closed",
"2" : "listen",
"3" : "synSent",
"4" : "synReceived",
"5" : "established",
"6" : "finWait1",
"7" : "finWait2",
"8" : "closeWait",
"9" : "lastAck",
"10": "closing",
"11": "timeWait",
"12": "deleteTCB"
}
# collect all open local ports
open_ports = walk_data(session, '.1.3.6.1.2.1.6.13.1.3', helper)[0] #tcpConnLocalPort from TCP-MIB (deprecated)
# collect all status information about the open ports
port_status = walk_data(session, '.1.3.6.1.2.1.6.13.1.1', helper)[0] #tcpConnState from TCP-MIB (deprecated)
# make a dict out of the two lists
port_and_status = dict(zip(open_ports, port_status))
# here we show all open TCP ports and it's status
if scan:
print "All open TCP ports: " + host
for port in open_ports:
tcp_status = port_and_status[port]
tcp_status = tcp_translate[tcp_status]
print "TCP: \t" + port + "\t Status: \t" + tcp_status
quit()
#here we have the real check logic for TCP ports
if port in open_ports:
# if the port is available in the list of open_ports, then extract the status
tcp_status = port_and_status[port]
# translate the status from the integer value to a human readable string
tcp_status = tcp_translate[tcp_status]
# now let's set the status according to the warning / critical "threshold" parameter
if tcp_status in warning_param:
helper.status(warning)
elif tcp_status in critical_param:
helper.status(critical)
else:
helper.status(ok)
else:
# if there is no value in the list => the port is closed for sure
tcp_status = "CLOSED"
helper.status(critical)
return ("Current status for TCP port " + port + " is: " + tcp_status)
|
the check logic for check TCP ports
|
Below is the the instruction that describes the task:
### Input:
the check logic for check TCP ports
### Response:
def check_tcp(helper, host, port, warning_param, critical_param, session):
"""
the check logic for check TCP ports
"""
# from tcpConnState from TCP-MIB
tcp_translate = {
"1" : "closed",
"2" : "listen",
"3" : "synSent",
"4" : "synReceived",
"5" : "established",
"6" : "finWait1",
"7" : "finWait2",
"8" : "closeWait",
"9" : "lastAck",
"10": "closing",
"11": "timeWait",
"12": "deleteTCB"
}
# collect all open local ports
open_ports = walk_data(session, '.1.3.6.1.2.1.6.13.1.3', helper)[0] #tcpConnLocalPort from TCP-MIB (deprecated)
# collect all status information about the open ports
port_status = walk_data(session, '.1.3.6.1.2.1.6.13.1.1', helper)[0] #tcpConnState from TCP-MIB (deprecated)
# make a dict out of the two lists
port_and_status = dict(zip(open_ports, port_status))
# here we show all open TCP ports and it's status
if scan:
print "All open TCP ports: " + host
for port in open_ports:
tcp_status = port_and_status[port]
tcp_status = tcp_translate[tcp_status]
print "TCP: \t" + port + "\t Status: \t" + tcp_status
quit()
#here we have the real check logic for TCP ports
if port in open_ports:
# if the port is available in the list of open_ports, then extract the status
tcp_status = port_and_status[port]
# translate the status from the integer value to a human readable string
tcp_status = tcp_translate[tcp_status]
# now let's set the status according to the warning / critical "threshold" parameter
if tcp_status in warning_param:
helper.status(warning)
elif tcp_status in critical_param:
helper.status(critical)
else:
helper.status(ok)
else:
# if there is no value in the list => the port is closed for sure
tcp_status = "CLOSED"
helper.status(critical)
return ("Current status for TCP port " + port + " is: " + tcp_status)
|
def pass_actualremoterelease_v1(self):
"""Update the outlet link sequence |dam_outlets.S|."""
flu = self.sequences.fluxes.fastaccess
out = self.sequences.outlets.fastaccess
out.s[0] += flu.actualremoterelease
|
Update the outlet link sequence |dam_outlets.S|.
|
Below is the the instruction that describes the task:
### Input:
Update the outlet link sequence |dam_outlets.S|.
### Response:
def pass_actualremoterelease_v1(self):
"""Update the outlet link sequence |dam_outlets.S|."""
flu = self.sequences.fluxes.fastaccess
out = self.sequences.outlets.fastaccess
out.s[0] += flu.actualremoterelease
|
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
|
Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
|
Below is the the instruction that describes the task:
### Input:
Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
### Response:
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
|
def expand_variables_to_segments(v, Nt):
''' expands contextual variables v, by repeating each instance as specified in Nt '''
N_v = len(np.atleast_1d(v[0]))
return np.concatenate([np.full((Nt[i], N_v), v[i]) for i in np.arange(len(v))])
|
expands contextual variables v, by repeating each instance as specified in Nt
|
Below is the the instruction that describes the task:
### Input:
expands contextual variables v, by repeating each instance as specified in Nt
### Response:
def expand_variables_to_segments(v, Nt):
''' expands contextual variables v, by repeating each instance as specified in Nt '''
N_v = len(np.atleast_1d(v[0]))
return np.concatenate([np.full((Nt[i], N_v), v[i]) for i in np.arange(len(v))])
|
def rpcexec(self, payload):
""" Manual execute a command on API (internally used)
param str payload: The payload containing the request
return: Servers answer to the query
rtype: json
raises RPCConnection: if no connction can be made
raises UnauthorizedError: if the user is not authorized
raise ValueError: if the API returns a non-JSON formated answer
It is not recommended to use this method directly, unless
you know what you are doing. All calls available to the API
will be wrapped to methods directly::
info -> grapheneapi.info()
"""
try:
response = requests.post(
"http://{}:{}/rpc".format(self.host, self.port),
data=json.dumps(payload, ensure_ascii=False).encode("utf8"),
headers=self.headers,
auth=(self.username, self.password),
)
if response.status_code == 401:
raise UnauthorizedError
ret = json.loads(response.text)
if "error" in ret:
if "detail" in ret["error"]:
raise RPCError(ret["error"]["detail"])
else:
raise RPCError(ret["error"]["message"])
except requests.exceptions.RequestException:
raise RPCConnection("Error connecting to Client!")
except UnauthorizedError:
raise UnauthorizedError("Invalid login credentials!")
except ValueError:
raise ValueError("Client returned invalid format. Expected JSON!")
except RPCError as err:
raise err
else:
return ret["result"]
|
Manual execute a command on API (internally used)
param str payload: The payload containing the request
return: Servers answer to the query
rtype: json
raises RPCConnection: if no connction can be made
raises UnauthorizedError: if the user is not authorized
raise ValueError: if the API returns a non-JSON formated answer
It is not recommended to use this method directly, unless
you know what you are doing. All calls available to the API
will be wrapped to methods directly::
info -> grapheneapi.info()
|
Below is the the instruction that describes the task:
### Input:
Manual execute a command on API (internally used)
param str payload: The payload containing the request
return: Servers answer to the query
rtype: json
raises RPCConnection: if no connction can be made
raises UnauthorizedError: if the user is not authorized
raise ValueError: if the API returns a non-JSON formated answer
It is not recommended to use this method directly, unless
you know what you are doing. All calls available to the API
will be wrapped to methods directly::
info -> grapheneapi.info()
### Response:
def rpcexec(self, payload):
""" Manual execute a command on API (internally used)
param str payload: The payload containing the request
return: Servers answer to the query
rtype: json
raises RPCConnection: if no connction can be made
raises UnauthorizedError: if the user is not authorized
raise ValueError: if the API returns a non-JSON formated answer
It is not recommended to use this method directly, unless
you know what you are doing. All calls available to the API
will be wrapped to methods directly::
info -> grapheneapi.info()
"""
try:
response = requests.post(
"http://{}:{}/rpc".format(self.host, self.port),
data=json.dumps(payload, ensure_ascii=False).encode("utf8"),
headers=self.headers,
auth=(self.username, self.password),
)
if response.status_code == 401:
raise UnauthorizedError
ret = json.loads(response.text)
if "error" in ret:
if "detail" in ret["error"]:
raise RPCError(ret["error"]["detail"])
else:
raise RPCError(ret["error"]["message"])
except requests.exceptions.RequestException:
raise RPCConnection("Error connecting to Client!")
except UnauthorizedError:
raise UnauthorizedError("Invalid login credentials!")
except ValueError:
raise ValueError("Client returned invalid format. Expected JSON!")
except RPCError as err:
raise err
else:
return ret["result"]
|
def email_message(
self,
recipient, # type: Text
subject_template, # type: Text
body_template, # type: Text
sender=None, # type: Optional[AbstractUser]
message_class=EmailMessage,
**kwargs
):
"""
Returns an invitation email message. This can be easily overridden.
For instance, to send an HTML message, use the EmailMultiAlternatives message_class
and attach the additional conent.
"""
from_email = "%s %s <%s>" % (
sender.first_name,
sender.last_name,
email.utils.parseaddr(settings.DEFAULT_FROM_EMAIL)[1],
)
reply_to = "%s %s <%s>" % (sender.first_name, sender.last_name, sender.email)
headers = {"Reply-To": reply_to}
kwargs.update({"sender": sender, "recipient": recipient})
subject_template = loader.get_template(subject_template)
body_template = loader.get_template(body_template)
subject = subject_template.render(
kwargs
).strip() # Remove stray newline characters
body = body_template.render(kwargs)
return message_class(subject, body, from_email, [recipient], headers=headers)
|
Returns an invitation email message. This can be easily overridden.
For instance, to send an HTML message, use the EmailMultiAlternatives message_class
and attach the additional conent.
|
Below is the the instruction that describes the task:
### Input:
Returns an invitation email message. This can be easily overridden.
For instance, to send an HTML message, use the EmailMultiAlternatives message_class
and attach the additional conent.
### Response:
def email_message(
self,
recipient, # type: Text
subject_template, # type: Text
body_template, # type: Text
sender=None, # type: Optional[AbstractUser]
message_class=EmailMessage,
**kwargs
):
"""
Returns an invitation email message. This can be easily overridden.
For instance, to send an HTML message, use the EmailMultiAlternatives message_class
and attach the additional conent.
"""
from_email = "%s %s <%s>" % (
sender.first_name,
sender.last_name,
email.utils.parseaddr(settings.DEFAULT_FROM_EMAIL)[1],
)
reply_to = "%s %s <%s>" % (sender.first_name, sender.last_name, sender.email)
headers = {"Reply-To": reply_to}
kwargs.update({"sender": sender, "recipient": recipient})
subject_template = loader.get_template(subject_template)
body_template = loader.get_template(body_template)
subject = subject_template.render(
kwargs
).strip() # Remove stray newline characters
body = body_template.render(kwargs)
return message_class(subject, body, from_email, [recipient], headers=headers)
|
def shutdown(self, payload=None):
"""
Close the connection/shutdown the messaging loop.
:param payload: None: not used. Here to allow using this method with add_command.
"""
logging.info("Work queue shutdown.")
self.connection.close()
self.receiving_messages = False
|
Close the connection/shutdown the messaging loop.
:param payload: None: not used. Here to allow using this method with add_command.
|
Below is the the instruction that describes the task:
### Input:
Close the connection/shutdown the messaging loop.
:param payload: None: not used. Here to allow using this method with add_command.
### Response:
def shutdown(self, payload=None):
"""
Close the connection/shutdown the messaging loop.
:param payload: None: not used. Here to allow using this method with add_command.
"""
logging.info("Work queue shutdown.")
self.connection.close()
self.receiving_messages = False
|
def tacacs_server_tacacs_source_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
tacacs_source_ip = ET.SubElement(tacacs_server, "tacacs-source-ip")
tacacs_source_ip.text = kwargs.pop('tacacs_source_ip')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def tacacs_server_tacacs_source_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
tacacs_source_ip = ET.SubElement(tacacs_server, "tacacs-source-ip")
tacacs_source_ip.text = kwargs.pop('tacacs_source_ip')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
def print_help(self, *args, **kwargs):
""" Add pager support to help output. """
if self._command is not None and self._command.session.allow_pager:
desc = 'Help\: %s' % '-'.join(self.prog.split())
pager_kwargs = self._command.get_pager_spec()
with paging.pager_redirect(desc, **pager_kwargs):
return super().print_help(*args, **kwargs)
else:
return super().print_help(*args, **kwargs)
|
Add pager support to help output.
|
Below is the the instruction that describes the task:
### Input:
Add pager support to help output.
### Response:
def print_help(self, *args, **kwargs):
""" Add pager support to help output. """
if self._command is not None and self._command.session.allow_pager:
desc = 'Help\: %s' % '-'.join(self.prog.split())
pager_kwargs = self._command.get_pager_spec()
with paging.pager_redirect(desc, **pager_kwargs):
return super().print_help(*args, **kwargs)
else:
return super().print_help(*args, **kwargs)
|
def to_type(upcast_type, varlist):
"""Loop over all elements of varlist and convert them to upcasttype.
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
"""
# convert_type = type(np.array([0], upcast_type)[0])
for i in range(len(varlist)):
# convert scalars to complex
if np.isscalar(varlist[i]):
varlist[i] = np.array([varlist[i]], upcast_type)[0]
else:
# convert sparse and dense mats to complex
try:
if varlist[i].dtype != upcast_type:
varlist[i] = varlist[i].astype(upcast_type)
except AttributeError:
warn('Failed to cast in to_type')
pass
return varlist
|
Loop over all elements of varlist and convert them to upcasttype.
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
|
Below is the the instruction that describes the task:
### Input:
Loop over all elements of varlist and convert them to upcasttype.
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
### Response:
def to_type(upcast_type, varlist):
"""Loop over all elements of varlist and convert them to upcasttype.
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
"""
# convert_type = type(np.array([0], upcast_type)[0])
for i in range(len(varlist)):
# convert scalars to complex
if np.isscalar(varlist[i]):
varlist[i] = np.array([varlist[i]], upcast_type)[0]
else:
# convert sparse and dense mats to complex
try:
if varlist[i].dtype != upcast_type:
varlist[i] = varlist[i].astype(upcast_type)
except AttributeError:
warn('Failed to cast in to_type')
pass
return varlist
|
def _turn_sigterm_into_systemexit(): # pragma: no cover
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
try:
import signal
except ImportError:
return
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
|
Attempts to turn a SIGTERM exception into a SystemExit exception.
|
Below is the the instruction that describes the task:
### Input:
Attempts to turn a SIGTERM exception into a SystemExit exception.
### Response:
def _turn_sigterm_into_systemexit(): # pragma: no cover
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
try:
import signal
except ImportError:
return
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
|
def generate_model_file(filename, project, model, fields):
"""Creates a webpage for a given instance of a model."""
for field in fields:
field.type = field.__class__.__name__
content = open(os.path.join(os.path.dirname(__file__), 'templates/model_page.html'), 'r').read()
engine = StatikTemplateEngine(project)
template = engine.create_template(content)
# create context and update from project.config
context = {'model': model,
'fields': fields}
context.update(dict(project.config.context_static))
string = template.render(context)
with open(filename, 'w') as file:
file.write(string)
|
Creates a webpage for a given instance of a model.
|
Below is the the instruction that describes the task:
### Input:
Creates a webpage for a given instance of a model.
### Response:
def generate_model_file(filename, project, model, fields):
"""Creates a webpage for a given instance of a model."""
for field in fields:
field.type = field.__class__.__name__
content = open(os.path.join(os.path.dirname(__file__), 'templates/model_page.html'), 'r').read()
engine = StatikTemplateEngine(project)
template = engine.create_template(content)
# create context and update from project.config
context = {'model': model,
'fields': fields}
context.update(dict(project.config.context_static))
string = template.render(context)
with open(filename, 'w') as file:
file.write(string)
|
def _update_index(self, axis, key, value):
"""Update the current axis index based on a given key or value
This is an internal method designed to set the origin or step for
an index, whilst updating existing Index arrays as appropriate
Examples
--------
>>> self._update_index("x0", 0)
>>> self._update_index("dx", 0)
To actually set an index array, use `_set_index`
"""
# delete current value if given None
if value is None:
return delattr(self, key)
_key = "_{}".format(key)
index = "{[0]}index".format(axis)
unit = "{[0]}unit".format(axis)
# convert float to Quantity
if not isinstance(value, Quantity):
try:
value = Quantity(value, getattr(self, unit))
except TypeError:
value = Quantity(float(value), getattr(self, unit))
# if value is changing, delete current index
try:
curr = getattr(self, _key)
except AttributeError:
delattr(self, index)
else:
if (
value is None or
getattr(self, key) is None or
not value.unit.is_equivalent(curr.unit) or
value != curr
):
delattr(self, index)
# set new value
setattr(self, _key, value)
return value
|
Update the current axis index based on a given key or value
This is an internal method designed to set the origin or step for
an index, whilst updating existing Index arrays as appropriate
Examples
--------
>>> self._update_index("x0", 0)
>>> self._update_index("dx", 0)
To actually set an index array, use `_set_index`
|
Below is the the instruction that describes the task:
### Input:
Update the current axis index based on a given key or value
This is an internal method designed to set the origin or step for
an index, whilst updating existing Index arrays as appropriate
Examples
--------
>>> self._update_index("x0", 0)
>>> self._update_index("dx", 0)
To actually set an index array, use `_set_index`
### Response:
def _update_index(self, axis, key, value):
"""Update the current axis index based on a given key or value
This is an internal method designed to set the origin or step for
an index, whilst updating existing Index arrays as appropriate
Examples
--------
>>> self._update_index("x0", 0)
>>> self._update_index("dx", 0)
To actually set an index array, use `_set_index`
"""
# delete current value if given None
if value is None:
return delattr(self, key)
_key = "_{}".format(key)
index = "{[0]}index".format(axis)
unit = "{[0]}unit".format(axis)
# convert float to Quantity
if not isinstance(value, Quantity):
try:
value = Quantity(value, getattr(self, unit))
except TypeError:
value = Quantity(float(value), getattr(self, unit))
# if value is changing, delete current index
try:
curr = getattr(self, _key)
except AttributeError:
delattr(self, index)
else:
if (
value is None or
getattr(self, key) is None or
not value.unit.is_equivalent(curr.unit) or
value != curr
):
delattr(self, index)
# set new value
setattr(self, _key, value)
return value
|
def _deserialization_helper(self, state, ray_forking):
"""This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
"""
worker = ray.worker.get_global_worker()
worker.check_connected()
if state["ray_forking"]:
actor_handle_id = state["actor_handle_id"]
else:
# Right now, if the actor handle has been pickled, we create a
# temporary actor handle id for invocations.
# TODO(pcm): This still leads to a lot of actor handles being
# created, there should be a better way to handle pickled
# actor handles.
# TODO(swang): Accessing the worker's current task ID is not
# thread-safe.
# TODO(swang): Unpickling the same actor handle twice in the same
# task will break the application, and unpickling it twice in the
# same actor is likely a performance bug. We should consider
# logging a warning in these cases.
actor_handle_id = compute_actor_handle_id_non_forked(
state["actor_handle_id"], worker.current_task_id)
self.__init__(
state["actor_id"],
state["module_name"],
state["class_name"],
state["actor_cursor"],
state["actor_method_names"],
state["method_signatures"],
state["method_num_return_vals"],
state["actor_creation_dummy_object_id"],
state["actor_method_cpus"],
# This is the driver ID of the driver that owns the actor, not
# necessarily the driver that owns this actor handle.
state["actor_driver_id"],
actor_handle_id=actor_handle_id)
|
This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
|
Below is the the instruction that describes the task:
### Input:
This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
### Response:
def _deserialization_helper(self, state, ray_forking):
"""This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
"""
worker = ray.worker.get_global_worker()
worker.check_connected()
if state["ray_forking"]:
actor_handle_id = state["actor_handle_id"]
else:
# Right now, if the actor handle has been pickled, we create a
# temporary actor handle id for invocations.
# TODO(pcm): This still leads to a lot of actor handles being
# created, there should be a better way to handle pickled
# actor handles.
# TODO(swang): Accessing the worker's current task ID is not
# thread-safe.
# TODO(swang): Unpickling the same actor handle twice in the same
# task will break the application, and unpickling it twice in the
# same actor is likely a performance bug. We should consider
# logging a warning in these cases.
actor_handle_id = compute_actor_handle_id_non_forked(
state["actor_handle_id"], worker.current_task_id)
self.__init__(
state["actor_id"],
state["module_name"],
state["class_name"],
state["actor_cursor"],
state["actor_method_names"],
state["method_signatures"],
state["method_num_return_vals"],
state["actor_creation_dummy_object_id"],
state["actor_method_cpus"],
# This is the driver ID of the driver that owns the actor, not
# necessarily the driver that owns this actor handle.
state["actor_driver_id"],
actor_handle_id=actor_handle_id)
|
def _write_apt_gpg_keyfile(key_name, key_material):
"""Writes GPG key material into a file at a provided path.
:param key_name: A key name to use for a key file (could be a fingerprint)
:type key_name: str
:param key_material: A GPG key material (binary)
:type key_material: (str, bytes)
"""
with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
'wb') as keyf:
keyf.write(key_material)
|
Writes GPG key material into a file at a provided path.
:param key_name: A key name to use for a key file (could be a fingerprint)
:type key_name: str
:param key_material: A GPG key material (binary)
:type key_material: (str, bytes)
|
Below is the the instruction that describes the task:
### Input:
Writes GPG key material into a file at a provided path.
:param key_name: A key name to use for a key file (could be a fingerprint)
:type key_name: str
:param key_material: A GPG key material (binary)
:type key_material: (str, bytes)
### Response:
def _write_apt_gpg_keyfile(key_name, key_material):
"""Writes GPG key material into a file at a provided path.
:param key_name: A key name to use for a key file (could be a fingerprint)
:type key_name: str
:param key_material: A GPG key material (binary)
:type key_material: (str, bytes)
"""
with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
'wb') as keyf:
keyf.write(key_material)
|
def get_snpeff_info(snpeff_string, snpeff_header):
"""Make the vep annotations into a dictionaries
A snpeff dictionary will have the snpeff column names as keys and
the vep annotations as values.
The dictionaries are stored in a list.
One dictionary for each transcript.
Args:
snpeff_string (string): A string with the ANN annotation
snpeff_header (list): A list with the vep header
Return:
snpeff_annotations (list): A list of vep dicts
"""
snpeff_annotations = [
dict(zip(snpeff_header, snpeff_annotation.split('|')))
for snpeff_annotation in snpeff_string.split(',')
]
return snpeff_annotations
|
Make the vep annotations into a dictionaries
A snpeff dictionary will have the snpeff column names as keys and
the vep annotations as values.
The dictionaries are stored in a list.
One dictionary for each transcript.
Args:
snpeff_string (string): A string with the ANN annotation
snpeff_header (list): A list with the vep header
Return:
snpeff_annotations (list): A list of vep dicts
|
Below is the the instruction that describes the task:
### Input:
Make the vep annotations into a dictionaries
A snpeff dictionary will have the snpeff column names as keys and
the vep annotations as values.
The dictionaries are stored in a list.
One dictionary for each transcript.
Args:
snpeff_string (string): A string with the ANN annotation
snpeff_header (list): A list with the vep header
Return:
snpeff_annotations (list): A list of vep dicts
### Response:
def get_snpeff_info(snpeff_string, snpeff_header):
"""Make the vep annotations into a dictionaries
A snpeff dictionary will have the snpeff column names as keys and
the vep annotations as values.
The dictionaries are stored in a list.
One dictionary for each transcript.
Args:
snpeff_string (string): A string with the ANN annotation
snpeff_header (list): A list with the vep header
Return:
snpeff_annotations (list): A list of vep dicts
"""
snpeff_annotations = [
dict(zip(snpeff_header, snpeff_annotation.split('|')))
for snpeff_annotation in snpeff_string.split(',')
]
return snpeff_annotations
|
def getLocalDatetime(date, time, tz=None, timeDefault=dt.time.max):
"""
Get a datetime in the local timezone from date and optionally time
"""
localTZ = timezone.get_current_timezone()
if tz is None or tz == localTZ:
localDt = getAwareDatetime(date, time, tz, timeDefault)
else:
# create in event's time zone
eventDt = getAwareDatetime(date, time, tz, timeDefault)
# convert to local time zone
localDt = eventDt.astimezone(localTZ)
if time is None:
localDt = getAwareDatetime(localDt.date(), None, localTZ, timeDefault)
return localDt
|
Get a datetime in the local timezone from date and optionally time
|
Below is the the instruction that describes the task:
### Input:
Get a datetime in the local timezone from date and optionally time
### Response:
def getLocalDatetime(date, time, tz=None, timeDefault=dt.time.max):
"""
Get a datetime in the local timezone from date and optionally time
"""
localTZ = timezone.get_current_timezone()
if tz is None or tz == localTZ:
localDt = getAwareDatetime(date, time, tz, timeDefault)
else:
# create in event's time zone
eventDt = getAwareDatetime(date, time, tz, timeDefault)
# convert to local time zone
localDt = eventDt.astimezone(localTZ)
if time is None:
localDt = getAwareDatetime(localDt.date(), None, localTZ, timeDefault)
return localDt
|
def generate_doc(self, language_predicate, create_jvmdoc_command):
"""
Generate an execute method given a language predicate and command to create documentation
language_predicate: a function that accepts a target and returns True if the target is of that
language
create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
documentation documentation for targets
"""
catalog = self.context.products.isrequired(self.jvmdoc().product_type)
if catalog and self.combined:
raise TaskError(
'Cannot provide {} target mappings for combined output'.format(self.jvmdoc().product_type))
def docable(target):
if not language_predicate(target):
self.context.log.debug('Skipping [{}] because it is does not pass the language predicate'.format(target.address.spec))
return False
if not self._include_codegen and target.is_synthetic:
self.context.log.debug('Skipping [{}] because it is a synthetic target'.format(target.address.spec))
return False
for pattern in self._exclude_patterns:
if pattern.search(target.address.spec):
self.context.log.debug(
"Skipping [{}] because it matches exclude pattern '{}'".format(target.address.spec, pattern.pattern))
return False
return True
targets = self.get_targets(predicate=docable)
if not targets:
return
with self.invalidated(targets, invalidate_dependents=self.combined) as invalidation_check:
def find_invalid_targets():
invalid_targets = set()
for vt in invalidation_check.invalid_vts:
invalid_targets.update(vt.targets)
return invalid_targets
invalid_targets = list(find_invalid_targets())
if invalid_targets:
if self.combined:
self._generate_combined(targets, create_jvmdoc_command)
else:
self._generate_individual(invalid_targets, create_jvmdoc_command)
if self.open and self.combined:
try:
desktop.ui_open(os.path.join(self.workdir, 'combined', 'index.html'))
except desktop.OpenError as e:
raise TaskError(e)
if catalog:
for target in targets:
gendir = self._gendir(target)
jvmdocs = []
for root, dirs, files in safe_walk(gendir):
jvmdocs.extend(os.path.relpath(os.path.join(root, f), gendir) for f in files)
self.context.products.get(self.jvmdoc().product_type).add(target, gendir, jvmdocs)
|
Generate an execute method given a language predicate and command to create documentation
language_predicate: a function that accepts a target and returns True if the target is of that
language
create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
documentation documentation for targets
|
Below is the the instruction that describes the task:
### Input:
Generate an execute method given a language predicate and command to create documentation
language_predicate: a function that accepts a target and returns True if the target is of that
language
create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
documentation documentation for targets
### Response:
def generate_doc(self, language_predicate, create_jvmdoc_command):
"""
Generate an execute method given a language predicate and command to create documentation
language_predicate: a function that accepts a target and returns True if the target is of that
language
create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
documentation documentation for targets
"""
catalog = self.context.products.isrequired(self.jvmdoc().product_type)
if catalog and self.combined:
raise TaskError(
'Cannot provide {} target mappings for combined output'.format(self.jvmdoc().product_type))
def docable(target):
if not language_predicate(target):
self.context.log.debug('Skipping [{}] because it is does not pass the language predicate'.format(target.address.spec))
return False
if not self._include_codegen and target.is_synthetic:
self.context.log.debug('Skipping [{}] because it is a synthetic target'.format(target.address.spec))
return False
for pattern in self._exclude_patterns:
if pattern.search(target.address.spec):
self.context.log.debug(
"Skipping [{}] because it matches exclude pattern '{}'".format(target.address.spec, pattern.pattern))
return False
return True
targets = self.get_targets(predicate=docable)
if not targets:
return
with self.invalidated(targets, invalidate_dependents=self.combined) as invalidation_check:
def find_invalid_targets():
invalid_targets = set()
for vt in invalidation_check.invalid_vts:
invalid_targets.update(vt.targets)
return invalid_targets
invalid_targets = list(find_invalid_targets())
if invalid_targets:
if self.combined:
self._generate_combined(targets, create_jvmdoc_command)
else:
self._generate_individual(invalid_targets, create_jvmdoc_command)
if self.open and self.combined:
try:
desktop.ui_open(os.path.join(self.workdir, 'combined', 'index.html'))
except desktop.OpenError as e:
raise TaskError(e)
if catalog:
for target in targets:
gendir = self._gendir(target)
jvmdocs = []
for root, dirs, files in safe_walk(gendir):
jvmdocs.extend(os.path.relpath(os.path.join(root, f), gendir) for f in files)
self.context.products.get(self.jvmdoc().product_type).add(target, gendir, jvmdocs)
|
def filter(self, func):
"""Returns a packet list filtered by a truth function"""
return self.__class__(list(filter(func,self.res)),
name="filtered %s"%self.listname)
|
Returns a packet list filtered by a truth function
|
Below is the the instruction that describes the task:
### Input:
Returns a packet list filtered by a truth function
### Response:
def filter(self, func):
"""Returns a packet list filtered by a truth function"""
return self.__class__(list(filter(func,self.res)),
name="filtered %s"%self.listname)
|
def save_dot(self, fd):
""" Saves a representation of the case in the Graphviz DOT language.
"""
from pylon.io import DotWriter
DotWriter(self).write(fd)
|
Saves a representation of the case in the Graphviz DOT language.
|
Below is the the instruction that describes the task:
### Input:
Saves a representation of the case in the Graphviz DOT language.
### Response:
def save_dot(self, fd):
""" Saves a representation of the case in the Graphviz DOT language.
"""
from pylon.io import DotWriter
DotWriter(self).write(fd)
|
def end_tag(self, alt=None):
"""Return XML end tag for the receiver."""
if alt:
name = alt
else:
name = self.name
return "</" + name + ">"
|
Return XML end tag for the receiver.
|
Below is the the instruction that describes the task:
### Input:
Return XML end tag for the receiver.
### Response:
def end_tag(self, alt=None):
"""Return XML end tag for the receiver."""
if alt:
name = alt
else:
name = self.name
return "</" + name + ">"
|
def ext_pillar(minion_id,
pillar,
*args,
**kwargs):
'''
Execute queries against SQLite3, merge and return as a dict
'''
return SQLite3ExtPillar().fetch(minion_id, pillar, *args, **kwargs)
|
Execute queries against SQLite3, merge and return as a dict
|
Below is the the instruction that describes the task:
### Input:
Execute queries against SQLite3, merge and return as a dict
### Response:
def ext_pillar(minion_id,
pillar,
*args,
**kwargs):
'''
Execute queries against SQLite3, merge and return as a dict
'''
return SQLite3ExtPillar().fetch(minion_id, pillar, *args, **kwargs)
|
def change_email(self, email):
"""
Change user's login email
:param user: AuthUser
:param email:
:return:
"""
def cb():
if not utils.is_email_valid(email):
raise exceptions.AuthError("Email address invalid")
self.user.change_email(email)
return email
return signals.user_update(self, ACTIONS["EMAIL"], cb,
{"email": self.email})
|
Change user's login email
:param user: AuthUser
:param email:
:return:
|
Below is the the instruction that describes the task:
### Input:
Change user's login email
:param user: AuthUser
:param email:
:return:
### Response:
def change_email(self, email):
"""
Change user's login email
:param user: AuthUser
:param email:
:return:
"""
def cb():
if not utils.is_email_valid(email):
raise exceptions.AuthError("Email address invalid")
self.user.change_email(email)
return email
return signals.user_update(self, ACTIONS["EMAIL"], cb,
{"email": self.email})
|
def _validate(self):
"""Validate the JPEG 2000 outermost superbox. These checks must be
done at a file level.
"""
# A JP2 file must contain certain boxes. The 2nd box must be a file
# type box.
if not isinstance(self.box[1], FileTypeBox):
msg = "{filename} does not contain a valid File Type box."
msg = msg.format(filename=self.filename)
raise IOError(msg)
# A jp2-branded file cannot contain an "any ICC profile
ftyp = self.box[1]
if ftyp.brand == 'jp2 ':
jp2h = [box for box in self.box if box.box_id == 'jp2h'][0]
colrs = [box for box in jp2h.box if box.box_id == 'colr']
for colr in colrs:
if colr.method not in (core.ENUMERATED_COLORSPACE,
core.RESTRICTED_ICC_PROFILE):
msg = ("Color Specification box method must specify "
"either an enumerated colorspace or a restricted "
"ICC profile if the file type box brand is 'jp2 '.")
warnings.warn(msg, UserWarning)
|
Validate the JPEG 2000 outermost superbox. These checks must be
done at a file level.
|
Below is the the instruction that describes the task:
### Input:
Validate the JPEG 2000 outermost superbox. These checks must be
done at a file level.
### Response:
def _validate(self):
"""Validate the JPEG 2000 outermost superbox. These checks must be
done at a file level.
"""
# A JP2 file must contain certain boxes. The 2nd box must be a file
# type box.
if not isinstance(self.box[1], FileTypeBox):
msg = "{filename} does not contain a valid File Type box."
msg = msg.format(filename=self.filename)
raise IOError(msg)
# A jp2-branded file cannot contain an "any ICC profile
ftyp = self.box[1]
if ftyp.brand == 'jp2 ':
jp2h = [box for box in self.box if box.box_id == 'jp2h'][0]
colrs = [box for box in jp2h.box if box.box_id == 'colr']
for colr in colrs:
if colr.method not in (core.ENUMERATED_COLORSPACE,
core.RESTRICTED_ICC_PROFILE):
msg = ("Color Specification box method must specify "
"either an enumerated colorspace or a restricted "
"ICC profile if the file type box brand is 'jp2 '.")
warnings.warn(msg, UserWarning)
|
def find_elements(self):
"""
Returns:
Element (list): all the elements
"""
es = []
for element_id in self.find_element_ids():
e = Element(self.http.new_client(''), element_id)
es.append(e)
return es
|
Returns:
Element (list): all the elements
|
Below is the the instruction that describes the task:
### Input:
Returns:
Element (list): all the elements
### Response:
def find_elements(self):
"""
Returns:
Element (list): all the elements
"""
es = []
for element_id in self.find_element_ids():
e = Element(self.http.new_client(''), element_id)
es.append(e)
return es
|
def save(self, **kwargs):
"Override save() to construct tree_path based on the category's parent."
old_tree_path = self.tree_path
if self.tree_parent:
if self.tree_parent.tree_path:
self.tree_path = '%s/%s' % (self.tree_parent.tree_path, self.slug)
else:
self.tree_path = self.slug
else:
self.tree_path = ''
Category.objects.clear_cache()
super(Category, self).save(**kwargs)
if old_tree_path != self.tree_path:
# the tree_path has changed, update children
children = Category.objects.filter(tree_parent=self)
for child in children:
child.save(force_update=True)
|
Override save() to construct tree_path based on the category's parent.
|
Below is the the instruction that describes the task:
### Input:
Override save() to construct tree_path based on the category's parent.
### Response:
def save(self, **kwargs):
"Override save() to construct tree_path based on the category's parent."
old_tree_path = self.tree_path
if self.tree_parent:
if self.tree_parent.tree_path:
self.tree_path = '%s/%s' % (self.tree_parent.tree_path, self.slug)
else:
self.tree_path = self.slug
else:
self.tree_path = ''
Category.objects.clear_cache()
super(Category, self).save(**kwargs)
if old_tree_path != self.tree_path:
# the tree_path has changed, update children
children = Category.objects.filter(tree_parent=self)
for child in children:
child.save(force_update=True)
|
def get_height(self, points, only_in = True, edge=True, full=False):
"""
Given a set of points, it computes the z value for the
parametric equation of the plane where the polygon belongs.
Only the two first columns of the points will be taken into
account as x and y.
By default, the points outside the object will have a NaN value
in the z column. If the inputed points has a third column the z
values outside the Surface's domain will remain unchanged, the
rest will be replaced.
:param points: Coordinates of the points to calculate.
:type points: ndarray shape=(N, 2 or 3)
:param only_in: If True, computes only the points which are
inside of the Polygon.
:type only_in: bool
:param edge: If True, consider the points in the Polygon's edge
inside the Polygon.
:type edge: bool
:param full: If true, the return will have three columns
(x, y, z) instead of one (z).
:type full: bool
:returns: (z) or (x, y, z)
:rtype: ndarray shape=(N, 1 or 3)
"""
p = self.get_parametric()
z = (-p[0]*points[:, 0]-p[1]*points[:, 1]-p[3])/p[2]
if only_in:
pip = self.contains(points, edge=edge)
z[pip == False] *= np.nan
if full:
z = np.hstack((points[:, :2],
np.reshape(z, (points.shape[0], 1))))
if points.shape[1] == 3: # Restore original z
z[pip == False] = points[pip == False]
return z
|
Given a set of points, it computes the z value for the
parametric equation of the plane where the polygon belongs.
Only the two first columns of the points will be taken into
account as x and y.
By default, the points outside the object will have a NaN value
in the z column. If the inputed points has a third column the z
values outside the Surface's domain will remain unchanged, the
rest will be replaced.
:param points: Coordinates of the points to calculate.
:type points: ndarray shape=(N, 2 or 3)
:param only_in: If True, computes only the points which are
inside of the Polygon.
:type only_in: bool
:param edge: If True, consider the points in the Polygon's edge
inside the Polygon.
:type edge: bool
:param full: If true, the return will have three columns
(x, y, z) instead of one (z).
:type full: bool
:returns: (z) or (x, y, z)
:rtype: ndarray shape=(N, 1 or 3)
|
Below is the the instruction that describes the task:
### Input:
Given a set of points, it computes the z value for the
parametric equation of the plane where the polygon belongs.
Only the two first columns of the points will be taken into
account as x and y.
By default, the points outside the object will have a NaN value
in the z column. If the inputed points has a third column the z
values outside the Surface's domain will remain unchanged, the
rest will be replaced.
:param points: Coordinates of the points to calculate.
:type points: ndarray shape=(N, 2 or 3)
:param only_in: If True, computes only the points which are
inside of the Polygon.
:type only_in: bool
:param edge: If True, consider the points in the Polygon's edge
inside the Polygon.
:type edge: bool
:param full: If true, the return will have three columns
(x, y, z) instead of one (z).
:type full: bool
:returns: (z) or (x, y, z)
:rtype: ndarray shape=(N, 1 or 3)
### Response:
def get_height(self, points, only_in = True, edge=True, full=False):
"""
Given a set of points, it computes the z value for the
parametric equation of the plane where the polygon belongs.
Only the two first columns of the points will be taken into
account as x and y.
By default, the points outside the object will have a NaN value
in the z column. If the inputed points has a third column the z
values outside the Surface's domain will remain unchanged, the
rest will be replaced.
:param points: Coordinates of the points to calculate.
:type points: ndarray shape=(N, 2 or 3)
:param only_in: If True, computes only the points which are
inside of the Polygon.
:type only_in: bool
:param edge: If True, consider the points in the Polygon's edge
inside the Polygon.
:type edge: bool
:param full: If true, the return will have three columns
(x, y, z) instead of one (z).
:type full: bool
:returns: (z) or (x, y, z)
:rtype: ndarray shape=(N, 1 or 3)
"""
p = self.get_parametric()
z = (-p[0]*points[:, 0]-p[1]*points[:, 1]-p[3])/p[2]
if only_in:
pip = self.contains(points, edge=edge)
z[pip == False] *= np.nan
if full:
z = np.hstack((points[:, :2],
np.reshape(z, (points.shape[0], 1))))
if points.shape[1] == 3: # Restore original z
z[pip == False] = points[pip == False]
return z
|
def _association_types(self):
"""Retrieve Custom Indicator Associations types from the ThreatConnect API."""
# Dynamically create custom indicator class
r = self.session.get('/v2/types/associationTypes')
# check for bad status code and response that is not JSON
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
warn = u'Custom Indicators Associations are not supported.'
self.log.warning(warn)
return
# validate successful API results
data = r.json()
if data.get('status') != 'Success':
warn = u'Bad Status: Custom Indicators Associations are not supported.'
self.log.warning(warn)
return
try:
# Association Type Name is not a unique value at this time, but should be.
for association in data.get('data', {}).get('associationType', []):
self._indicator_associations_types_data[association.get('name')] = association
except Exception as e:
self.handle_error(200, [e])
|
Retrieve Custom Indicator Associations types from the ThreatConnect API.
|
Below is the the instruction that describes the task:
### Input:
Retrieve Custom Indicator Associations types from the ThreatConnect API.
### Response:
def _association_types(self):
"""Retrieve Custom Indicator Associations types from the ThreatConnect API."""
# Dynamically create custom indicator class
r = self.session.get('/v2/types/associationTypes')
# check for bad status code and response that is not JSON
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
warn = u'Custom Indicators Associations are not supported.'
self.log.warning(warn)
return
# validate successful API results
data = r.json()
if data.get('status') != 'Success':
warn = u'Bad Status: Custom Indicators Associations are not supported.'
self.log.warning(warn)
return
try:
# Association Type Name is not a unique value at this time, but should be.
for association in data.get('data', {}).get('associationType', []):
self._indicator_associations_types_data[association.get('name')] = association
except Exception as e:
self.handle_error(200, [e])
|
def is_valid_lval(t):
"""Checks whether t is valid JS identifier name (no keyword like var, function, if etc)
Also returns false on internal"""
if not is_internal(t) and is_lval(t) and t not in RESERVED_NAMES:
return True
return False
|
Checks whether t is valid JS identifier name (no keyword like var, function, if etc)
Also returns false on internal
|
Below is the the instruction that describes the task:
### Input:
Checks whether t is valid JS identifier name (no keyword like var, function, if etc)
Also returns false on internal
### Response:
def is_valid_lval(t):
"""Checks whether t is valid JS identifier name (no keyword like var, function, if etc)
Also returns false on internal"""
if not is_internal(t) and is_lval(t) and t not in RESERVED_NAMES:
return True
return False
|
def keys(self, history=None):
"""
Get the set of I{all} property names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of property names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
keys = set()
keys.update(self.definitions.keys())
for x in self.links:
if x in history:
continue
keys.update(x.keys(history))
history.remove(self)
return keys
|
Get the set of I{all} property names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of property names.
@rtype: list
|
Below is the the instruction that describes the task:
### Input:
Get the set of I{all} property names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of property names.
@rtype: list
### Response:
def keys(self, history=None):
"""
Get the set of I{all} property names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of property names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
keys = set()
keys.update(self.definitions.keys())
for x in self.links:
if x in history:
continue
keys.update(x.keys(history))
history.remove(self)
return keys
|
def add_to_ptr_size(self, ptr_size):
# type: (int) -> bool
'''
Add the space for a path table record to the volume descriptor.
Parameters:
ptr_size - The length of the Path Table Record being added to this Volume Descriptor.
Returns:
True if extents need to be added to the Volume Descriptor, False otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not yet initialized')
# First add to the path table size.
self.path_tbl_size += ptr_size
if (utils.ceiling_div(self.path_tbl_size, 4096) * 2) > self.path_table_num_extents:
# If we overflowed the path table size, then we need to update the
# space size. Since we always add two extents for the little and
# two for the big, add four total extents. The locations will be
# fixed up during reshuffle_extents.
self.path_table_num_extents += 2
return True
return False
|
Add the space for a path table record to the volume descriptor.
Parameters:
ptr_size - The length of the Path Table Record being added to this Volume Descriptor.
Returns:
True if extents need to be added to the Volume Descriptor, False otherwise.
|
Below is the the instruction that describes the task:
### Input:
Add the space for a path table record to the volume descriptor.
Parameters:
ptr_size - The length of the Path Table Record being added to this Volume Descriptor.
Returns:
True if extents need to be added to the Volume Descriptor, False otherwise.
### Response:
def add_to_ptr_size(self, ptr_size):
# type: (int) -> bool
'''
Add the space for a path table record to the volume descriptor.
Parameters:
ptr_size - The length of the Path Table Record being added to this Volume Descriptor.
Returns:
True if extents need to be added to the Volume Descriptor, False otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not yet initialized')
# First add to the path table size.
self.path_tbl_size += ptr_size
if (utils.ceiling_div(self.path_tbl_size, 4096) * 2) > self.path_table_num_extents:
# If we overflowed the path table size, then we need to update the
# space size. Since we always add two extents for the little and
# two for the big, add four total extents. The locations will be
# fixed up during reshuffle_extents.
self.path_table_num_extents += 2
return True
return False
|
def paginate_search_results(object_class, search_results, page_size, page):
"""
Takes edx-search results and returns a Page object populated
with db objects for that page.
:param object_class: Model class to use when querying the db for objects.
:param search_results: edX-search results.
:param page_size: Number of results per page.
:param page: Page number.
:return: Paginator object with model objects
"""
paginator = Paginator(search_results['results'], page_size)
# This code is taken from within the GenericAPIView#paginate_queryset method.
# It is common code, but
try:
page_number = paginator.validate_number(page)
except InvalidPage:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404("Page is not 'last', nor can it be converted to an int.")
try:
paged_results = paginator.page(page_number)
except InvalidPage as exception:
raise Http404(
"Invalid page {page_number}: {message}".format(
page_number=page_number,
message=str(exception)
)
)
search_queryset_pks = [item['data']['pk'] for item in paged_results.object_list]
queryset = object_class.objects.filter(pk__in=search_queryset_pks)
def ordered_objects(primary_key):
""" Returns database object matching the search result object"""
for obj in queryset:
if obj.pk == primary_key:
return obj
# map over the search results and get a list of database objects in the same order
object_results = list(map(ordered_objects, search_queryset_pks))
paged_results.object_list = object_results
return paged_results
|
Takes edx-search results and returns a Page object populated
with db objects for that page.
:param object_class: Model class to use when querying the db for objects.
:param search_results: edX-search results.
:param page_size: Number of results per page.
:param page: Page number.
:return: Paginator object with model objects
|
Below is the the instruction that describes the task:
### Input:
Takes edx-search results and returns a Page object populated
with db objects for that page.
:param object_class: Model class to use when querying the db for objects.
:param search_results: edX-search results.
:param page_size: Number of results per page.
:param page: Page number.
:return: Paginator object with model objects
### Response:
def paginate_search_results(object_class, search_results, page_size, page):
"""
Takes edx-search results and returns a Page object populated
with db objects for that page.
:param object_class: Model class to use when querying the db for objects.
:param search_results: edX-search results.
:param page_size: Number of results per page.
:param page: Page number.
:return: Paginator object with model objects
"""
paginator = Paginator(search_results['results'], page_size)
# This code is taken from within the GenericAPIView#paginate_queryset method.
# It is common code, but
try:
page_number = paginator.validate_number(page)
except InvalidPage:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404("Page is not 'last', nor can it be converted to an int.")
try:
paged_results = paginator.page(page_number)
except InvalidPage as exception:
raise Http404(
"Invalid page {page_number}: {message}".format(
page_number=page_number,
message=str(exception)
)
)
search_queryset_pks = [item['data']['pk'] for item in paged_results.object_list]
queryset = object_class.objects.filter(pk__in=search_queryset_pks)
def ordered_objects(primary_key):
""" Returns database object matching the search result object"""
for obj in queryset:
if obj.pk == primary_key:
return obj
# map over the search results and get a list of database objects in the same order
object_results = list(map(ordered_objects, search_queryset_pks))
paged_results.object_list = object_results
return paged_results
|
def register_access_db(fullfilename: str, dsn: str, description: str) -> bool:
"""
(Windows only.)
Registers a Microsoft Access database with ODBC.
Args:
fullfilename: filename of the existing database
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created?
"""
directory = os.path.dirname(fullfilename)
return create_sys_dsn(
access_driver,
SERVER="",
DESCRIPTION=description,
DSN=dsn,
DBQ=fullfilename,
DefaultDir=directory
)
|
(Windows only.)
Registers a Microsoft Access database with ODBC.
Args:
fullfilename: filename of the existing database
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created?
|
Below is the the instruction that describes the task:
### Input:
(Windows only.)
Registers a Microsoft Access database with ODBC.
Args:
fullfilename: filename of the existing database
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created?
### Response:
def register_access_db(fullfilename: str, dsn: str, description: str) -> bool:
"""
(Windows only.)
Registers a Microsoft Access database with ODBC.
Args:
fullfilename: filename of the existing database
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created?
"""
directory = os.path.dirname(fullfilename)
return create_sys_dsn(
access_driver,
SERVER="",
DESCRIPTION=description,
DSN=dsn,
DBQ=fullfilename,
DefaultDir=directory
)
|
def distinct():
"""
Validates that all items in the given field list value are distinct,
i.e. that the list contains no duplicates.
"""
def validate(value):
for i, item in enumerate(value):
if item in value[i+1:]:
return e("{} is not a distinct set of values", value)
return validate
|
Validates that all items in the given field list value are distinct,
i.e. that the list contains no duplicates.
|
Below is the the instruction that describes the task:
### Input:
Validates that all items in the given field list value are distinct,
i.e. that the list contains no duplicates.
### Response:
def distinct():
"""
Validates that all items in the given field list value are distinct,
i.e. that the list contains no duplicates.
"""
def validate(value):
for i, item in enumerate(value):
if item in value[i+1:]:
return e("{} is not a distinct set of values", value)
return validate
|
def oauth_register(form):
"""Register user if possible.
:param form: A form instance.
:returns: A :class:`invenio_accounts.models.User` instance.
"""
if form.validate():
data = form.to_dict()
if not data.get('password'):
data['password'] = ''
user = register_user(**data)
if not data['password']:
user.password = None
_datastore.commit()
return user
|
Register user if possible.
:param form: A form instance.
:returns: A :class:`invenio_accounts.models.User` instance.
|
Below is the the instruction that describes the task:
### Input:
Register user if possible.
:param form: A form instance.
:returns: A :class:`invenio_accounts.models.User` instance.
### Response:
def oauth_register(form):
"""Register user if possible.
:param form: A form instance.
:returns: A :class:`invenio_accounts.models.User` instance.
"""
if form.validate():
data = form.to_dict()
if not data.get('password'):
data['password'] = ''
user = register_user(**data)
if not data['password']:
user.password = None
_datastore.commit()
return user
|
def _make_get_request(self, uri, parameters=None, timeout=None):
"""
Given a request add in the required parameters and return the parsed
XML object.
"""
if not timeout:
timeout = self.timeout
return self._make_request(requests.get, uri, params=parameters, timeout=timeout)
|
Given a request add in the required parameters and return the parsed
XML object.
|
Below is the the instruction that describes the task:
### Input:
Given a request add in the required parameters and return the parsed
XML object.
### Response:
def _make_get_request(self, uri, parameters=None, timeout=None):
"""
Given a request add in the required parameters and return the parsed
XML object.
"""
if not timeout:
timeout = self.timeout
return self._make_request(requests.get, uri, params=parameters, timeout=timeout)
|
def delete_snapshot(snapshots_ids=None, config="root"):
'''
Deletes an snapshot
config
Configuration name. (Default: root)
snapshots_ids
List of the snapshots IDs to be deleted.
CLI example:
.. code-block:: bash
salt '*' snapper.delete_snapshot 54
salt '*' snapper.delete_snapshot config=root 54
salt '*' snapper.delete_snapshot config=root snapshots_ids=[54,55,56]
'''
if not snapshots_ids:
raise CommandExecutionError('Error: No snapshot ID has been provided')
try:
current_snapshots_ids = [x['id'] for x in list_snapshots(config)]
if not isinstance(snapshots_ids, list):
snapshots_ids = [snapshots_ids]
if not set(snapshots_ids).issubset(set(current_snapshots_ids)):
raise CommandExecutionError(
"Error: Snapshots '{0}' not found".format(", ".join(
[six.text_type(x) for x in set(snapshots_ids).difference(
set(current_snapshots_ids))]))
)
snapper.DeleteSnapshots(config, snapshots_ids)
return {config: {"ids": snapshots_ids, "status": "deleted"}}
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
|
Deletes an snapshot
config
Configuration name. (Default: root)
snapshots_ids
List of the snapshots IDs to be deleted.
CLI example:
.. code-block:: bash
salt '*' snapper.delete_snapshot 54
salt '*' snapper.delete_snapshot config=root 54
salt '*' snapper.delete_snapshot config=root snapshots_ids=[54,55,56]
|
Below is the the instruction that describes the task:
### Input:
Deletes an snapshot
config
Configuration name. (Default: root)
snapshots_ids
List of the snapshots IDs to be deleted.
CLI example:
.. code-block:: bash
salt '*' snapper.delete_snapshot 54
salt '*' snapper.delete_snapshot config=root 54
salt '*' snapper.delete_snapshot config=root snapshots_ids=[54,55,56]
### Response:
def delete_snapshot(snapshots_ids=None, config="root"):
'''
Deletes an snapshot
config
Configuration name. (Default: root)
snapshots_ids
List of the snapshots IDs to be deleted.
CLI example:
.. code-block:: bash
salt '*' snapper.delete_snapshot 54
salt '*' snapper.delete_snapshot config=root 54
salt '*' snapper.delete_snapshot config=root snapshots_ids=[54,55,56]
'''
if not snapshots_ids:
raise CommandExecutionError('Error: No snapshot ID has been provided')
try:
current_snapshots_ids = [x['id'] for x in list_snapshots(config)]
if not isinstance(snapshots_ids, list):
snapshots_ids = [snapshots_ids]
if not set(snapshots_ids).issubset(set(current_snapshots_ids)):
raise CommandExecutionError(
"Error: Snapshots '{0}' not found".format(", ".join(
[six.text_type(x) for x in set(snapshots_ids).difference(
set(current_snapshots_ids))]))
)
snapper.DeleteSnapshots(config, snapshots_ids)
return {config: {"ids": snapshots_ids, "status": "deleted"}}
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
|
def _check_fpos(self, fp_, fpos, offset, block):
"""Check file position matches blocksize"""
if (fp_.tell() + offset != fpos):
warnings.warn("Actual "+block+" header size does not match expected")
return
|
Check file position matches blocksize
|
Below is the the instruction that describes the task:
### Input:
Check file position matches blocksize
### Response:
def _check_fpos(self, fp_, fpos, offset, block):
"""Check file position matches blocksize"""
if (fp_.tell() + offset != fpos):
warnings.warn("Actual "+block+" header size does not match expected")
return
|
def _mainthread_poll_readable(self):
"""Searches for readable client sockets. These sockets are then put in a subthread
to be handled by _handle_readable
"""
events = self._recv_selector.select(self.block_time)
for key, mask in events:
if mask == selectors.EVENT_READ:
self._recv_selector.unregister(key.fileobj)
self._threads_limiter.start_thread(target=self._subthread_handle_readable,
args=(key.fileobj,))
|
Searches for readable client sockets. These sockets are then put in a subthread
to be handled by _handle_readable
|
Below is the the instruction that describes the task:
### Input:
Searches for readable client sockets. These sockets are then put in a subthread
to be handled by _handle_readable
### Response:
def _mainthread_poll_readable(self):
"""Searches for readable client sockets. These sockets are then put in a subthread
to be handled by _handle_readable
"""
events = self._recv_selector.select(self.block_time)
for key, mask in events:
if mask == selectors.EVENT_READ:
self._recv_selector.unregister(key.fileobj)
self._threads_limiter.start_thread(target=self._subthread_handle_readable,
args=(key.fileobj,))
|
def add_signal(self, signal):
"""Adds "input" signal to connected signals.
Internally connects the signal to a control slot."""
self.__signals.append(signal)
if self.__connected:
# Connects signal if the current state is "connected"
self.__connect_signal(signal)
|
Adds "input" signal to connected signals.
Internally connects the signal to a control slot.
|
Below is the the instruction that describes the task:
### Input:
Adds "input" signal to connected signals.
Internally connects the signal to a control slot.
### Response:
def add_signal(self, signal):
"""Adds "input" signal to connected signals.
Internally connects the signal to a control slot."""
self.__signals.append(signal)
if self.__connected:
# Connects signal if the current state is "connected"
self.__connect_signal(signal)
|
def readline( file, skip_blank=False ):
"""Read a line from provided file, skipping any blank or comment lines"""
while 1:
line = file.readline()
#print "every line: %r" % line
if not line: return None
if line[0] != '#' and not ( skip_blank and line.isspace() ):
return line
|
Read a line from provided file, skipping any blank or comment lines
|
Below is the the instruction that describes the task:
### Input:
Read a line from provided file, skipping any blank or comment lines
### Response:
def readline( file, skip_blank=False ):
"""Read a line from provided file, skipping any blank or comment lines"""
while 1:
line = file.readline()
#print "every line: %r" % line
if not line: return None
if line[0] != '#' and not ( skip_blank and line.isspace() ):
return line
|
def simBirth(self,which_agents):
'''
Makes new consumers for the given indices. Slightly extends base method by also setting
pLvlTrue = 1.0 in the very first simulated period, as well as initializing the perception
of aggregate productivity for each Markov state. The representative agent begins with
the correct perception of the Markov state.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
'''
if which_agents==np.array([True]):
RepAgentMarkovConsumerType.simBirth(self,which_agents)
if self.t_sim == 0: # Initialize perception distribution for Markov state
self.pLvlTrue = np.ones(self.AgentCount)
self.aLvlNow = self.aNrmNow*self.pLvlTrue
StateCount = self.MrkvArray.shape[0]
self.pLvlNow = np.ones(StateCount) # Perceived productivity level by Markov state
self.MrkvPcvd = np.zeros(StateCount) # Distribution of perceived Markov state
self.MrkvPcvd[self.MrkvNow[0]] = 1.0
|
Makes new consumers for the given indices. Slightly extends base method by also setting
pLvlTrue = 1.0 in the very first simulated period, as well as initializing the perception
of aggregate productivity for each Markov state. The representative agent begins with
the correct perception of the Markov state.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
|
Below is the the instruction that describes the task:
### Input:
Makes new consumers for the given indices. Slightly extends base method by also setting
pLvlTrue = 1.0 in the very first simulated period, as well as initializing the perception
of aggregate productivity for each Markov state. The representative agent begins with
the correct perception of the Markov state.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
### Response:
def simBirth(self,which_agents):
'''
Makes new consumers for the given indices. Slightly extends base method by also setting
pLvlTrue = 1.0 in the very first simulated period, as well as initializing the perception
of aggregate productivity for each Markov state. The representative agent begins with
the correct perception of the Markov state.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
'''
if which_agents==np.array([True]):
RepAgentMarkovConsumerType.simBirth(self,which_agents)
if self.t_sim == 0: # Initialize perception distribution for Markov state
self.pLvlTrue = np.ones(self.AgentCount)
self.aLvlNow = self.aNrmNow*self.pLvlTrue
StateCount = self.MrkvArray.shape[0]
self.pLvlNow = np.ones(StateCount) # Perceived productivity level by Markov state
self.MrkvPcvd = np.zeros(StateCount) # Distribution of perceived Markov state
self.MrkvPcvd[self.MrkvNow[0]] = 1.0
|
def _config(name, key=None, **kwargs):
'''
Return a value for 'name' from command line args then config file options.
Specify 'key' if the config file option is not the same as 'name'.
'''
if key is None:
key = name
if name in kwargs:
value = kwargs[name]
else:
value = __salt__['config.option']('ldap.{0}'.format(key))
return salt.utils.data.decode(value, to_str=True)
|
Return a value for 'name' from command line args then config file options.
Specify 'key' if the config file option is not the same as 'name'.
|
Below is the the instruction that describes the task:
### Input:
Return a value for 'name' from command line args then config file options.
Specify 'key' if the config file option is not the same as 'name'.
### Response:
def _config(name, key=None, **kwargs):
'''
Return a value for 'name' from command line args then config file options.
Specify 'key' if the config file option is not the same as 'name'.
'''
if key is None:
key = name
if name in kwargs:
value = kwargs[name]
else:
value = __salt__['config.option']('ldap.{0}'.format(key))
return salt.utils.data.decode(value, to_str=True)
|
def settzinfo(self, tzinfo, start=2000, end=2030):
"""
Create appropriate objects in self to represent tzinfo.
Collapse DST transitions to rrules as much as possible.
Assumptions:
- DST <-> Standard transitions occur on the hour
- never within a month of one another
- twice or fewer times a year
- never in the month of December
- DST always moves offset exactly one hour later
- tzinfo classes dst method always treats times that could be in either
offset as being in the later regime
"""
def fromLastWeek(dt):
"""
How many weeks from the end of the month dt is, starting from 1.
"""
weekDelta = datetime.timedelta(weeks=1)
n = 1
current = dt + weekDelta
while current.month == dt.month:
n += 1
current += weekDelta
return n
# lists of dictionaries defining rules which are no longer in effect
completed = {'daylight' : [], 'standard' : []}
# dictionary defining rules which are currently in effect
working = {'daylight' : None, 'standard' : None}
# rule may be based on nth week of the month or the nth from the last
for year in range(start, end + 1):
newyear = datetime.datetime(year, 1, 1)
for transitionTo in 'daylight', 'standard':
transition = getTransition(transitionTo, year, tzinfo)
oldrule = working[transitionTo]
if transition == newyear:
# transitionTo is in effect for the whole year
rule = {'end' : None,
'start' : newyear,
'month' : 1,
'weekday' : None,
'hour' : None,
'plus' : None,
'minus' : None,
'name' : tzinfo.tzname(newyear),
'offset' : tzinfo.utcoffset(newyear),
'offsetfrom' : tzinfo.utcoffset(newyear)}
if oldrule is None:
# transitionTo was not yet in effect
working[transitionTo] = rule
else:
# transitionTo was already in effect
if (oldrule['offset'] !=
tzinfo.utcoffset(newyear)):
# old rule was different, it shouldn't continue
oldrule['end'] = year - 1
completed[transitionTo].append(oldrule)
working[transitionTo] = rule
elif transition is None:
# transitionTo is not in effect
if oldrule is not None:
# transitionTo used to be in effect
oldrule['end'] = year - 1
completed[transitionTo].append(oldrule)
working[transitionTo] = None
else:
# an offset transition was found
try:
old_offset = tzinfo.utcoffset(transition - twoHours)
name = tzinfo.tzname(transition)
offset = tzinfo.utcoffset(transition)
except (pytz.AmbiguousTimeError, pytz.NonExistentTimeError):
# guaranteed that tzinfo is a pytz timezone
is_dst = (transitionTo == "daylight")
old_offset = tzinfo.utcoffset(transition - twoHours, is_dst=is_dst)
name = tzinfo.tzname(transition, is_dst=is_dst)
offset = tzinfo.utcoffset(transition, is_dst=is_dst)
rule = {'end' : None, # None, or an integer year
'start' : transition, # the datetime of transition
'month' : transition.month,
'weekday' : transition.weekday(),
'hour' : transition.hour,
'name' : name,
'plus' : int(
(transition.day - 1)/ 7 + 1), # nth week of the month
'minus' : fromLastWeek(transition), # nth from last week
'offset' : offset,
'offsetfrom' : old_offset}
if oldrule is None:
working[transitionTo] = rule
else:
plusMatch = rule['plus'] == oldrule['plus']
minusMatch = rule['minus'] == oldrule['minus']
truth = plusMatch or minusMatch
for key in 'month', 'weekday', 'hour', 'offset':
truth = truth and rule[key] == oldrule[key]
if truth:
# the old rule is still true, limit to plus or minus
if not plusMatch:
oldrule['plus'] = None
if not minusMatch:
oldrule['minus'] = None
else:
# the new rule did not match the old
oldrule['end'] = year - 1
completed[transitionTo].append(oldrule)
working[transitionTo] = rule
for transitionTo in 'daylight', 'standard':
if working[transitionTo] is not None:
completed[transitionTo].append(working[transitionTo])
self.tzid = []
self.daylight = []
self.standard = []
self.add('tzid').value = self.pickTzid(tzinfo, True)
# old = None # unused?
for transitionTo in 'daylight', 'standard':
for rule in completed[transitionTo]:
comp = self.add(transitionTo)
dtstart = comp.add('dtstart')
dtstart.value = rule['start']
if rule['name'] is not None:
comp.add('tzname').value = rule['name']
line = comp.add('tzoffsetto')
line.value = deltaToOffset(rule['offset'])
line = comp.add('tzoffsetfrom')
line.value = deltaToOffset(rule['offsetfrom'])
if rule['plus'] is not None:
num = rule['plus']
elif rule['minus'] is not None:
num = -1 * rule['minus']
else:
num = None
if num is not None:
dayString = ";BYDAY=" + str(num) + WEEKDAYS[rule['weekday']]
else:
dayString = ""
if rule['end'] is not None:
if rule['hour'] is None:
# all year offset, with no rule
endDate = datetime.datetime(rule['end'], 1, 1)
else:
weekday = rrule.weekday(rule['weekday'], num)
du_rule = rrule.rrule(rrule.YEARLY,
bymonth = rule['month'],byweekday = weekday,
dtstart = datetime.datetime(
rule['end'], 1, 1, rule['hour'])
)
endDate = du_rule[0]
endDate = endDate.replace(tzinfo = utc) - rule['offsetfrom']
endString = ";UNTIL="+ dateTimeToString(endDate)
else:
endString = ''
new_rule = "FREQ=YEARLY{0!s};BYMONTH={1!s}{2!s}"\
.format(dayString, rule['month'], endString)
comp.add('rrule').value = new_rule
|
Create appropriate objects in self to represent tzinfo.
Collapse DST transitions to rrules as much as possible.
Assumptions:
- DST <-> Standard transitions occur on the hour
- never within a month of one another
- twice or fewer times a year
- never in the month of December
- DST always moves offset exactly one hour later
- tzinfo classes dst method always treats times that could be in either
offset as being in the later regime
|
Below is the the instruction that describes the task:
### Input:
Create appropriate objects in self to represent tzinfo.
Collapse DST transitions to rrules as much as possible.
Assumptions:
- DST <-> Standard transitions occur on the hour
- never within a month of one another
- twice or fewer times a year
- never in the month of December
- DST always moves offset exactly one hour later
- tzinfo classes dst method always treats times that could be in either
offset as being in the later regime
### Response:
def settzinfo(self, tzinfo, start=2000, end=2030):
"""
Create appropriate objects in self to represent tzinfo.
Collapse DST transitions to rrules as much as possible.
Assumptions:
- DST <-> Standard transitions occur on the hour
- never within a month of one another
- twice or fewer times a year
- never in the month of December
- DST always moves offset exactly one hour later
- tzinfo classes dst method always treats times that could be in either
offset as being in the later regime
"""
def fromLastWeek(dt):
"""
How many weeks from the end of the month dt is, starting from 1.
"""
weekDelta = datetime.timedelta(weeks=1)
n = 1
current = dt + weekDelta
while current.month == dt.month:
n += 1
current += weekDelta
return n
# lists of dictionaries defining rules which are no longer in effect
completed = {'daylight' : [], 'standard' : []}
# dictionary defining rules which are currently in effect
working = {'daylight' : None, 'standard' : None}
# rule may be based on nth week of the month or the nth from the last
for year in range(start, end + 1):
newyear = datetime.datetime(year, 1, 1)
for transitionTo in 'daylight', 'standard':
transition = getTransition(transitionTo, year, tzinfo)
oldrule = working[transitionTo]
if transition == newyear:
# transitionTo is in effect for the whole year
rule = {'end' : None,
'start' : newyear,
'month' : 1,
'weekday' : None,
'hour' : None,
'plus' : None,
'minus' : None,
'name' : tzinfo.tzname(newyear),
'offset' : tzinfo.utcoffset(newyear),
'offsetfrom' : tzinfo.utcoffset(newyear)}
if oldrule is None:
# transitionTo was not yet in effect
working[transitionTo] = rule
else:
# transitionTo was already in effect
if (oldrule['offset'] !=
tzinfo.utcoffset(newyear)):
# old rule was different, it shouldn't continue
oldrule['end'] = year - 1
completed[transitionTo].append(oldrule)
working[transitionTo] = rule
elif transition is None:
# transitionTo is not in effect
if oldrule is not None:
# transitionTo used to be in effect
oldrule['end'] = year - 1
completed[transitionTo].append(oldrule)
working[transitionTo] = None
else:
# an offset transition was found
try:
old_offset = tzinfo.utcoffset(transition - twoHours)
name = tzinfo.tzname(transition)
offset = tzinfo.utcoffset(transition)
except (pytz.AmbiguousTimeError, pytz.NonExistentTimeError):
# guaranteed that tzinfo is a pytz timezone
is_dst = (transitionTo == "daylight")
old_offset = tzinfo.utcoffset(transition - twoHours, is_dst=is_dst)
name = tzinfo.tzname(transition, is_dst=is_dst)
offset = tzinfo.utcoffset(transition, is_dst=is_dst)
rule = {'end' : None, # None, or an integer year
'start' : transition, # the datetime of transition
'month' : transition.month,
'weekday' : transition.weekday(),
'hour' : transition.hour,
'name' : name,
'plus' : int(
(transition.day - 1)/ 7 + 1), # nth week of the month
'minus' : fromLastWeek(transition), # nth from last week
'offset' : offset,
'offsetfrom' : old_offset}
if oldrule is None:
working[transitionTo] = rule
else:
plusMatch = rule['plus'] == oldrule['plus']
minusMatch = rule['minus'] == oldrule['minus']
truth = plusMatch or minusMatch
for key in 'month', 'weekday', 'hour', 'offset':
truth = truth and rule[key] == oldrule[key]
if truth:
# the old rule is still true, limit to plus or minus
if not plusMatch:
oldrule['plus'] = None
if not minusMatch:
oldrule['minus'] = None
else:
# the new rule did not match the old
oldrule['end'] = year - 1
completed[transitionTo].append(oldrule)
working[transitionTo] = rule
for transitionTo in 'daylight', 'standard':
if working[transitionTo] is not None:
completed[transitionTo].append(working[transitionTo])
self.tzid = []
self.daylight = []
self.standard = []
self.add('tzid').value = self.pickTzid(tzinfo, True)
# old = None # unused?
for transitionTo in 'daylight', 'standard':
for rule in completed[transitionTo]:
comp = self.add(transitionTo)
dtstart = comp.add('dtstart')
dtstart.value = rule['start']
if rule['name'] is not None:
comp.add('tzname').value = rule['name']
line = comp.add('tzoffsetto')
line.value = deltaToOffset(rule['offset'])
line = comp.add('tzoffsetfrom')
line.value = deltaToOffset(rule['offsetfrom'])
if rule['plus'] is not None:
num = rule['plus']
elif rule['minus'] is not None:
num = -1 * rule['minus']
else:
num = None
if num is not None:
dayString = ";BYDAY=" + str(num) + WEEKDAYS[rule['weekday']]
else:
dayString = ""
if rule['end'] is not None:
if rule['hour'] is None:
# all year offset, with no rule
endDate = datetime.datetime(rule['end'], 1, 1)
else:
weekday = rrule.weekday(rule['weekday'], num)
du_rule = rrule.rrule(rrule.YEARLY,
bymonth = rule['month'],byweekday = weekday,
dtstart = datetime.datetime(
rule['end'], 1, 1, rule['hour'])
)
endDate = du_rule[0]
endDate = endDate.replace(tzinfo = utc) - rule['offsetfrom']
endString = ";UNTIL="+ dateTimeToString(endDate)
else:
endString = ''
new_rule = "FREQ=YEARLY{0!s};BYMONTH={1!s}{2!s}"\
.format(dayString, rule['month'], endString)
comp.add('rrule').value = new_rule
|
def addMember(self, imagePtr=None):
"""
Combines the input image with the static mask that
has the same signature.
Parameters
----------
imagePtr : object
An imageObject reference
Notes
-----
The signature parameter consists of the tuple::
(instrument/detector, (nx,ny), chip_id)
The signature is defined in the image object for each chip
"""
numchips=imagePtr._numchips
log.info("Computing static mask:\n")
chips = imagePtr.group
if chips is None:
chips = imagePtr.getExtensions()
#for chip in range(1,numchips+1,1):
for chip in chips:
chipid=imagePtr.scienceExt + ','+ str(chip)
chipimage=imagePtr.getData(chipid)
signature=imagePtr[chipid].signature
# If this is a new signature, create a new Static Mask file which is empty
# only create a new mask if one doesn't already exist
if ((signature not in self.masklist) or (len(self.masklist) == 0)):
self.masklist[signature] = self._buildMaskArray(signature)
maskname = constructFilename(signature)
self.masknames[signature] = maskname
else:
chip_sig = buildSignatureKey(signature)
for s in self.masknames:
if chip_sig in self.masknames[s]:
maskname = self.masknames[s]
break
imagePtr[chipid].outputNames['staticMask'] = maskname
stats = ImageStats(chipimage,nclip=3,fields='mode')
mode = stats.mode
rms = stats.stddev
nbins = len(stats.histogram)
del stats
log.info(' mode = %9f; rms = %7f; static_sig = %0.2f' %
(mode, rms, self.static_sig))
if nbins >= 2: # only combine data from new image if enough data to mask
sky_rms_diff = mode - (self.static_sig*rms)
np.bitwise_and(self.masklist[signature],
np.logical_not(np.less(chipimage, sky_rms_diff)),
self.masklist[signature])
del chipimage
|
Combines the input image with the static mask that
has the same signature.
Parameters
----------
imagePtr : object
An imageObject reference
Notes
-----
The signature parameter consists of the tuple::
(instrument/detector, (nx,ny), chip_id)
The signature is defined in the image object for each chip
|
Below is the the instruction that describes the task:
### Input:
Combines the input image with the static mask that
has the same signature.
Parameters
----------
imagePtr : object
An imageObject reference
Notes
-----
The signature parameter consists of the tuple::
(instrument/detector, (nx,ny), chip_id)
The signature is defined in the image object for each chip
### Response:
def addMember(self, imagePtr=None):
"""
Combines the input image with the static mask that
has the same signature.
Parameters
----------
imagePtr : object
An imageObject reference
Notes
-----
The signature parameter consists of the tuple::
(instrument/detector, (nx,ny), chip_id)
The signature is defined in the image object for each chip
"""
numchips=imagePtr._numchips
log.info("Computing static mask:\n")
chips = imagePtr.group
if chips is None:
chips = imagePtr.getExtensions()
#for chip in range(1,numchips+1,1):
for chip in chips:
chipid=imagePtr.scienceExt + ','+ str(chip)
chipimage=imagePtr.getData(chipid)
signature=imagePtr[chipid].signature
# If this is a new signature, create a new Static Mask file which is empty
# only create a new mask if one doesn't already exist
if ((signature not in self.masklist) or (len(self.masklist) == 0)):
self.masklist[signature] = self._buildMaskArray(signature)
maskname = constructFilename(signature)
self.masknames[signature] = maskname
else:
chip_sig = buildSignatureKey(signature)
for s in self.masknames:
if chip_sig in self.masknames[s]:
maskname = self.masknames[s]
break
imagePtr[chipid].outputNames['staticMask'] = maskname
stats = ImageStats(chipimage,nclip=3,fields='mode')
mode = stats.mode
rms = stats.stddev
nbins = len(stats.histogram)
del stats
log.info(' mode = %9f; rms = %7f; static_sig = %0.2f' %
(mode, rms, self.static_sig))
if nbins >= 2: # only combine data from new image if enough data to mask
sky_rms_diff = mode - (self.static_sig*rms)
np.bitwise_and(self.masklist[signature],
np.logical_not(np.less(chipimage, sky_rms_diff)),
self.masklist[signature])
del chipimage
|
def get_authors(self, language):
""" Return the list of this task's authors """
return self.gettext(language, self._author) if self._author else ""
|
Return the list of this task's authors
|
Below is the the instruction that describes the task:
### Input:
Return the list of this task's authors
### Response:
def get_authors(self, language):
""" Return the list of this task's authors """
return self.gettext(language, self._author) if self._author else ""
|
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Stop CPC (requires DPM mode)."""
assert wait_for_completion is True # async not supported yet
cpc_oid = uri_parms[0]
try:
cpc = hmc.cpcs.lookup_by_oid(cpc_oid)
except KeyError:
raise InvalidResourceError(method, uri)
if not cpc.dpm_enabled:
raise CpcNotInDpmError(method, uri, cpc)
cpc.properties['status'] = 'not-operating'
|
Operation: Stop CPC (requires DPM mode).
|
Below is the the instruction that describes the task:
### Input:
Operation: Stop CPC (requires DPM mode).
### Response:
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Stop CPC (requires DPM mode)."""
assert wait_for_completion is True # async not supported yet
cpc_oid = uri_parms[0]
try:
cpc = hmc.cpcs.lookup_by_oid(cpc_oid)
except KeyError:
raise InvalidResourceError(method, uri)
if not cpc.dpm_enabled:
raise CpcNotInDpmError(method, uri, cpc)
cpc.properties['status'] = 'not-operating'
|
def decode_text(s):
"""Decodes a PDFDocEncoding string to Unicode."""
if s.startswith(b'\xfe\xff'):
return unicode(s[2:], 'utf-16be', 'ignore')
else:
return ''.join(PDFDocEncoding[ord(c)] for c in s)
|
Decodes a PDFDocEncoding string to Unicode.
|
Below is the the instruction that describes the task:
### Input:
Decodes a PDFDocEncoding string to Unicode.
### Response:
def decode_text(s):
"""Decodes a PDFDocEncoding string to Unicode."""
if s.startswith(b'\xfe\xff'):
return unicode(s[2:], 'utf-16be', 'ignore')
else:
return ''.join(PDFDocEncoding[ord(c)] for c in s)
|
def nonFinalisedReqs(self, reqKeys: List[Tuple[str, int]]):
"""
Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs
"""
return {key for key in reqKeys if not self.requests.is_finalised(key)}
|
Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs
|
Below is the the instruction that describes the task:
### Input:
Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs
### Response:
def nonFinalisedReqs(self, reqKeys: List[Tuple[str, int]]):
"""
Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs
"""
return {key for key in reqKeys if not self.requests.is_finalised(key)}
|
def _get_deployment_instance_summary(awsclient, deployment_id, instance_id):
"""instance summary.
:param awsclient:
:param deployment_id:
:param instance_id:
return: status, last_event
"""
client_codedeploy = awsclient.get_client('codedeploy')
request = {
'deploymentId': deployment_id,
'instanceId': instance_id
}
response = client_codedeploy.get_deployment_instance(**request)
return response['instanceSummary']['status'], \
response['instanceSummary']['lifecycleEvents'][-1]['lifecycleEventName']
|
instance summary.
:param awsclient:
:param deployment_id:
:param instance_id:
return: status, last_event
|
Below is the the instruction that describes the task:
### Input:
instance summary.
:param awsclient:
:param deployment_id:
:param instance_id:
return: status, last_event
### Response:
def _get_deployment_instance_summary(awsclient, deployment_id, instance_id):
"""instance summary.
:param awsclient:
:param deployment_id:
:param instance_id:
return: status, last_event
"""
client_codedeploy = awsclient.get_client('codedeploy')
request = {
'deploymentId': deployment_id,
'instanceId': instance_id
}
response = client_codedeploy.get_deployment_instance(**request)
return response['instanceSummary']['status'], \
response['instanceSummary']['lifecycleEvents'][-1]['lifecycleEventName']
|
def get_possible_initializer_keys(cls, num_layers):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
wt: weight for input -> T gate
wh: weight for input -> H gate
wtL: weight for prev state -> T gate for layer L (indexed from 0)
whL: weight for prev state -> H gate for layer L (indexed from 0)
btL: bias for prev state -> T gate for layer L (indexed from 0)
bhL: bias for prev state -> H gate for layer L (indexed from 0)
Args:
num_layers: (int) Number of highway layers.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
keys = [cls.WT, cls.WH]
for layer_index in xrange(num_layers):
layer_str = str(layer_index)
keys += [
cls.WT + layer_str,
cls.BT + layer_str,
cls.WH + layer_str,
cls.BH + layer_str]
return set(keys)
|
Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
wt: weight for input -> T gate
wh: weight for input -> H gate
wtL: weight for prev state -> T gate for layer L (indexed from 0)
whL: weight for prev state -> H gate for layer L (indexed from 0)
btL: bias for prev state -> T gate for layer L (indexed from 0)
bhL: bias for prev state -> H gate for layer L (indexed from 0)
Args:
num_layers: (int) Number of highway layers.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
|
Below is the the instruction that describes the task:
### Input:
Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
wt: weight for input -> T gate
wh: weight for input -> H gate
wtL: weight for prev state -> T gate for layer L (indexed from 0)
whL: weight for prev state -> H gate for layer L (indexed from 0)
btL: bias for prev state -> T gate for layer L (indexed from 0)
bhL: bias for prev state -> H gate for layer L (indexed from 0)
Args:
num_layers: (int) Number of highway layers.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
### Response:
def get_possible_initializer_keys(cls, num_layers):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
wt: weight for input -> T gate
wh: weight for input -> H gate
wtL: weight for prev state -> T gate for layer L (indexed from 0)
whL: weight for prev state -> H gate for layer L (indexed from 0)
btL: bias for prev state -> T gate for layer L (indexed from 0)
bhL: bias for prev state -> H gate for layer L (indexed from 0)
Args:
num_layers: (int) Number of highway layers.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
keys = [cls.WT, cls.WH]
for layer_index in xrange(num_layers):
layer_str = str(layer_index)
keys += [
cls.WT + layer_str,
cls.BT + layer_str,
cls.WH + layer_str,
cls.BH + layer_str]
return set(keys)
|
def get_article_placeholders(self, article):
"""
In the project settings set up the variable
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {
'include': [ 'slot1', 'slot2', etc. ],
'exclude': [ 'slot3', 'slot4', etc. ],
}
or leave it empty
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {}
"""
placeholders_search_list = getattr(settings, 'CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST', {})
included = placeholders_search_list.get('include', [])
excluded = placeholders_search_list.get('exclude', [])
diff = set(included) - set(excluded)
if diff:
return article.placeholders.filter(slot__in=diff)
elif excluded:
return article.placeholders.exclude(slot__in=excluded)
else:
return article.placeholders.all()
|
In the project settings set up the variable
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {
'include': [ 'slot1', 'slot2', etc. ],
'exclude': [ 'slot3', 'slot4', etc. ],
}
or leave it empty
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {}
|
Below is the the instruction that describes the task:
### Input:
In the project settings set up the variable
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {
'include': [ 'slot1', 'slot2', etc. ],
'exclude': [ 'slot3', 'slot4', etc. ],
}
or leave it empty
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {}
### Response:
def get_article_placeholders(self, article):
"""
In the project settings set up the variable
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {
'include': [ 'slot1', 'slot2', etc. ],
'exclude': [ 'slot3', 'slot4', etc. ],
}
or leave it empty
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {}
"""
placeholders_search_list = getattr(settings, 'CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST', {})
included = placeholders_search_list.get('include', [])
excluded = placeholders_search_list.get('exclude', [])
diff = set(included) - set(excluded)
if diff:
return article.placeholders.filter(slot__in=diff)
elif excluded:
return article.placeholders.exclude(slot__in=excluded)
else:
return article.placeholders.all()
|
def bandpass_filter(rate=None, low=None, high=None, order=None):
"""Butterworth bandpass filter."""
assert low < high
assert order >= 1
return signal.butter(order,
(low / (rate / 2.), high / (rate / 2.)),
'pass')
|
Butterworth bandpass filter.
|
Below is the the instruction that describes the task:
### Input:
Butterworth bandpass filter.
### Response:
def bandpass_filter(rate=None, low=None, high=None, order=None):
"""Butterworth bandpass filter."""
assert low < high
assert order >= 1
return signal.butter(order,
(low / (rate / 2.), high / (rate / 2.)),
'pass')
|
def include(self, target):
"""
Determine if a given value is included in the
array or object using `is`.
"""
if self._clean.isDict():
return self._wrap(target in self.obj.values())
else:
return self._wrap(target in self.obj)
|
Determine if a given value is included in the
array or object using `is`.
|
Below is the the instruction that describes the task:
### Input:
Determine if a given value is included in the
array or object using `is`.
### Response:
def include(self, target):
"""
Determine if a given value is included in the
array or object using `is`.
"""
if self._clean.isDict():
return self._wrap(target in self.obj.values())
else:
return self._wrap(target in self.obj)
|
def result(i):
""" Returns which 8-bit registers are used by an asm
instruction to return a result.
"""
ins = inst(i)
op = oper(i)
if ins in ('or', 'and') and op == ['a']:
return ['f']
if ins in {'xor', 'or', 'and', 'neg', 'cpl', 'daa', 'rld', 'rrd', 'rra', 'rla', 'rrca', 'rlca'}:
return ['a', 'f']
if ins in {'bit', 'cp', 'scf', 'ccf'}:
return ['f']
if ins in {'sub', 'add', 'sbc', 'adc'}:
if len(op) == 1:
return ['a', 'f']
else:
return single_registers(op[0]) + ['f']
if ins == 'djnz':
return ['b', 'f']
if ins in {'ldir', 'ldi', 'lddr', 'ldd'}:
return ['f', 'b', 'c', 'd', 'e', 'h', 'l']
if ins in {'cpi', 'cpir', 'cpd', 'cpdr'}:
return ['f', 'b', 'c', 'h', 'l']
if ins in ('pop', 'ld'):
return single_registers(op[0])
if ins in {'inc', 'dec', 'sbc', 'rr', 'rl', 'rrc', 'rlc'}:
return ['f'] + single_registers(op[0])
if ins in ('set', 'res'):
return single_registers(op[1])
return []
|
Returns which 8-bit registers are used by an asm
instruction to return a result.
|
Below is the the instruction that describes the task:
### Input:
Returns which 8-bit registers are used by an asm
instruction to return a result.
### Response:
def result(i):
""" Returns which 8-bit registers are used by an asm
instruction to return a result.
"""
ins = inst(i)
op = oper(i)
if ins in ('or', 'and') and op == ['a']:
return ['f']
if ins in {'xor', 'or', 'and', 'neg', 'cpl', 'daa', 'rld', 'rrd', 'rra', 'rla', 'rrca', 'rlca'}:
return ['a', 'f']
if ins in {'bit', 'cp', 'scf', 'ccf'}:
return ['f']
if ins in {'sub', 'add', 'sbc', 'adc'}:
if len(op) == 1:
return ['a', 'f']
else:
return single_registers(op[0]) + ['f']
if ins == 'djnz':
return ['b', 'f']
if ins in {'ldir', 'ldi', 'lddr', 'ldd'}:
return ['f', 'b', 'c', 'd', 'e', 'h', 'l']
if ins in {'cpi', 'cpir', 'cpd', 'cpdr'}:
return ['f', 'b', 'c', 'h', 'l']
if ins in ('pop', 'ld'):
return single_registers(op[0])
if ins in {'inc', 'dec', 'sbc', 'rr', 'rl', 'rrc', 'rlc'}:
return ['f'] + single_registers(op[0])
if ins in ('set', 'res'):
return single_registers(op[1])
return []
|
def datetime_to_jd(date):
"""
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
"""
days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
return date_to_jd(date.year,date.month,days)
|
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
|
Below is the the instruction that describes the task:
### Input:
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
### Response:
def datetime_to_jd(date):
"""
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
"""
days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
return date_to_jd(date.year,date.month,days)
|
def protect_libraries_from_patching():
"""
In this function we delete some modules from `sys.modules` dictionary and import them again inside
`_pydev_saved_modules` in order to save their original copies there. After that we can use these
saved modules within the debugger to protect them from patching by external libraries (e.g. gevent).
"""
patched = ['threading', 'thread', '_thread', 'time', 'socket', 'Queue', 'queue', 'select',
'xmlrpclib', 'SimpleXMLRPCServer', 'BaseHTTPServer', 'SocketServer',
'xmlrpc.client', 'xmlrpc.server', 'http.server', 'socketserver']
for name in patched:
try:
__import__(name)
except:
pass
patched_modules = dict([(k, v) for k, v in sys.modules.items()
if k in patched])
for name in patched_modules:
del sys.modules[name]
# import for side effects
import _pydev_imps._pydev_saved_modules
for name in patched_modules:
sys.modules[name] = patched_modules[name]
|
In this function we delete some modules from `sys.modules` dictionary and import them again inside
`_pydev_saved_modules` in order to save their original copies there. After that we can use these
saved modules within the debugger to protect them from patching by external libraries (e.g. gevent).
|
Below is the the instruction that describes the task:
### Input:
In this function we delete some modules from `sys.modules` dictionary and import them again inside
`_pydev_saved_modules` in order to save their original copies there. After that we can use these
saved modules within the debugger to protect them from patching by external libraries (e.g. gevent).
### Response:
def protect_libraries_from_patching():
"""
In this function we delete some modules from `sys.modules` dictionary and import them again inside
`_pydev_saved_modules` in order to save their original copies there. After that we can use these
saved modules within the debugger to protect them from patching by external libraries (e.g. gevent).
"""
patched = ['threading', 'thread', '_thread', 'time', 'socket', 'Queue', 'queue', 'select',
'xmlrpclib', 'SimpleXMLRPCServer', 'BaseHTTPServer', 'SocketServer',
'xmlrpc.client', 'xmlrpc.server', 'http.server', 'socketserver']
for name in patched:
try:
__import__(name)
except:
pass
patched_modules = dict([(k, v) for k, v in sys.modules.items()
if k in patched])
for name in patched_modules:
del sys.modules[name]
# import for side effects
import _pydev_imps._pydev_saved_modules
for name in patched_modules:
sys.modules[name] = patched_modules[name]
|
def epilog(self):
"""Return text formatted for the usage description's epilog."""
bold = '\033[1m'
end = '\033[0m'
available = self.available.copy()
index = available.index(Config.DOWNLOADER_DEFAULT)
available[index] = bold + '(' + available[index] + ')' + end
formatted = ' | '.join(available)
return 'Downloaders available: ' + formatted
|
Return text formatted for the usage description's epilog.
|
Below is the the instruction that describes the task:
### Input:
Return text formatted for the usage description's epilog.
### Response:
def epilog(self):
"""Return text formatted for the usage description's epilog."""
bold = '\033[1m'
end = '\033[0m'
available = self.available.copy()
index = available.index(Config.DOWNLOADER_DEFAULT)
available[index] = bold + '(' + available[index] + ')' + end
formatted = ' | '.join(available)
return 'Downloaders available: ' + formatted
|
def _ExtractWithFilter(
self, source_path_specs, destination_path, output_writer,
artifact_filters, filter_file, artifact_definitions_path,
custom_artifacts_path, skip_duplicates=True):
"""Extracts files using a filter expression.
This method runs the file extraction process on the image and
potentially on every VSS if that is wanted.
Args:
source_path_specs (list[dfvfs.PathSpec]): path specifications to extract.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
artifact_filters (list[str]): names of artifact definitions that are
used for filtering file system and Windows Registry key paths.
filter_file (str): path of the file that contains the filter file path
filters.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
"""
extraction_engine = engine.BaseEngine()
# If the source is a directory or a storage media image
# run pre-processing.
if self._source_type in self._SOURCE_TYPES_TO_PREPROCESS:
self._PreprocessSources(extraction_engine)
for source_path_spec in source_path_specs:
file_system, mount_point = self._GetSourceFileSystem(
source_path_spec, resolver_context=self._resolver_context)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(
source_path_spec)
output_writer.Write(
'Extracting file entries from: {0:s}\n'.format(display_name))
filter_find_specs = extraction_engine.BuildFilterFindSpecs(
artifact_definitions_path, custom_artifacts_path,
extraction_engine.knowledge_base, artifact_filters, filter_file)
searcher = file_system_searcher.FileSystemSearcher(
file_system, mount_point)
for path_spec in searcher.Find(find_specs=filter_find_specs):
self._ExtractFileEntry(
path_spec, destination_path, output_writer,
skip_duplicates=skip_duplicates)
file_system.Close()
|
Extracts files using a filter expression.
This method runs the file extraction process on the image and
potentially on every VSS if that is wanted.
Args:
source_path_specs (list[dfvfs.PathSpec]): path specifications to extract.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
artifact_filters (list[str]): names of artifact definitions that are
used for filtering file system and Windows Registry key paths.
filter_file (str): path of the file that contains the filter file path
filters.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
|
Below is the the instruction that describes the task:
### Input:
Extracts files using a filter expression.
This method runs the file extraction process on the image and
potentially on every VSS if that is wanted.
Args:
source_path_specs (list[dfvfs.PathSpec]): path specifications to extract.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
artifact_filters (list[str]): names of artifact definitions that are
used for filtering file system and Windows Registry key paths.
filter_file (str): path of the file that contains the filter file path
filters.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
### Response:
def _ExtractWithFilter(
self, source_path_specs, destination_path, output_writer,
artifact_filters, filter_file, artifact_definitions_path,
custom_artifacts_path, skip_duplicates=True):
"""Extracts files using a filter expression.
This method runs the file extraction process on the image and
potentially on every VSS if that is wanted.
Args:
source_path_specs (list[dfvfs.PathSpec]): path specifications to extract.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
artifact_filters (list[str]): names of artifact definitions that are
used for filtering file system and Windows Registry key paths.
filter_file (str): path of the file that contains the filter file path
filters.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
"""
extraction_engine = engine.BaseEngine()
# If the source is a directory or a storage media image
# run pre-processing.
if self._source_type in self._SOURCE_TYPES_TO_PREPROCESS:
self._PreprocessSources(extraction_engine)
for source_path_spec in source_path_specs:
file_system, mount_point = self._GetSourceFileSystem(
source_path_spec, resolver_context=self._resolver_context)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(
source_path_spec)
output_writer.Write(
'Extracting file entries from: {0:s}\n'.format(display_name))
filter_find_specs = extraction_engine.BuildFilterFindSpecs(
artifact_definitions_path, custom_artifacts_path,
extraction_engine.knowledge_base, artifact_filters, filter_file)
searcher = file_system_searcher.FileSystemSearcher(
file_system, mount_point)
for path_spec in searcher.Find(find_specs=filter_find_specs):
self._ExtractFileEntry(
path_spec, destination_path, output_writer,
skip_duplicates=skip_duplicates)
file_system.Close()
|
def has_source_contents(self, src_id):
"""Checks if some sources exist."""
return bool(rustcall(_lib.lsm_view_has_source_contents,
self._get_ptr(), src_id))
|
Checks if some sources exist.
|
Below is the the instruction that describes the task:
### Input:
Checks if some sources exist.
### Response:
def has_source_contents(self, src_id):
"""Checks if some sources exist."""
return bool(rustcall(_lib.lsm_view_has_source_contents,
self._get_ptr(), src_id))
|
def looking_for(self):
"""Copy looking for attributes from the source profile to the
destination profile.
"""
looking_for = self.source_profile.looking_for
return self.dest_user.profile.looking_for.update(
gentation=looking_for.gentation,
single=looking_for.single,
near_me=looking_for.near_me,
kinds=looking_for.kinds,
ages=looking_for.ages
)
|
Copy looking for attributes from the source profile to the
destination profile.
|
Below is the the instruction that describes the task:
### Input:
Copy looking for attributes from the source profile to the
destination profile.
### Response:
def looking_for(self):
"""Copy looking for attributes from the source profile to the
destination profile.
"""
looking_for = self.source_profile.looking_for
return self.dest_user.profile.looking_for.update(
gentation=looking_for.gentation,
single=looking_for.single,
near_me=looking_for.near_me,
kinds=looking_for.kinds,
ages=looking_for.ages
)
|
def create_table(self, table_name, obj=None, **kwargs):
"""
Dispatch to ImpalaClient.create_table. See that function's docstring
for more
"""
return self.client.create_table(
table_name, obj=obj, database=self.name, **kwargs
)
|
Dispatch to ImpalaClient.create_table. See that function's docstring
for more
|
Below is the the instruction that describes the task:
### Input:
Dispatch to ImpalaClient.create_table. See that function's docstring
for more
### Response:
def create_table(self, table_name, obj=None, **kwargs):
"""
Dispatch to ImpalaClient.create_table. See that function's docstring
for more
"""
return self.client.create_table(
table_name, obj=obj, database=self.name, **kwargs
)
|
def padStr(s, field=None):
""" Pad the begining of a string with spaces, if necessary.
"""
if field is None:
return s
else:
if len(s) >= field:
return s
else:
return " " * (field - len(s)) + s
|
Pad the begining of a string with spaces, if necessary.
|
Below is the the instruction that describes the task:
### Input:
Pad the begining of a string with spaces, if necessary.
### Response:
def padStr(s, field=None):
""" Pad the begining of a string with spaces, if necessary.
"""
if field is None:
return s
else:
if len(s) >= field:
return s
else:
return " " * (field - len(s)) + s
|
def __doQuery(self, query, format, convert):
"""
Inner method that does the actual query
"""
self.__getFormat(format)
self.sparql.setQuery(query)
if convert:
results = self.sparql.query().convert()
else:
results = self.sparql.query()
return results
|
Inner method that does the actual query
|
Below is the the instruction that describes the task:
### Input:
Inner method that does the actual query
### Response:
def __doQuery(self, query, format, convert):
"""
Inner method that does the actual query
"""
self.__getFormat(format)
self.sparql.setQuery(query)
if convert:
results = self.sparql.query().convert()
else:
results = self.sparql.query()
return results
|
async def _receive_sack_chunk(self, chunk):
"""
Handle a SACK chunk.
"""
if uint32_gt(self._last_sacked_tsn, chunk.cumulative_tsn):
return
received_time = time.time()
self._last_sacked_tsn = chunk.cumulative_tsn
cwnd_fully_utilized = (self._flight_size >= self._cwnd)
done = 0
done_bytes = 0
# handle acknowledged data
while self._sent_queue and uint32_gte(self._last_sacked_tsn, self._sent_queue[0].tsn):
schunk = self._sent_queue.popleft()
done += 1
if not schunk._acked:
done_bytes += schunk._book_size
self._flight_size_decrease(schunk)
# update RTO estimate
if done == 1 and schunk._sent_count == 1:
self._update_rto(received_time - schunk._sent_time)
# handle gap blocks
loss = False
if chunk.gaps:
seen = set()
for gap in chunk.gaps:
for pos in range(gap[0], gap[1] + 1):
highest_seen_tsn = (chunk.cumulative_tsn + pos) % SCTP_TSN_MODULO
seen.add(highest_seen_tsn)
# determined Highest TSN Newly Acked (HTNA)
highest_newly_acked = chunk.cumulative_tsn
for schunk in self._sent_queue:
if uint32_gt(schunk.tsn, highest_seen_tsn):
break
if schunk.tsn in seen and not schunk._acked:
done_bytes += schunk._book_size
schunk._acked = True
self._flight_size_decrease(schunk)
highest_newly_acked = schunk.tsn
# strike missing chunks prior to HTNA
for schunk in self._sent_queue:
if uint32_gt(schunk.tsn, highest_newly_acked):
break
if schunk.tsn not in seen:
schunk._misses += 1
if schunk._misses == 3:
schunk._misses = 0
if not self._maybe_abandon(schunk):
schunk._retransmit = True
schunk._acked = False
self._flight_size_decrease(schunk)
loss = True
# adjust congestion window
if self._fast_recovery_exit is None:
if done and cwnd_fully_utilized:
if self._cwnd <= self._ssthresh:
# slow start
self._cwnd += min(done_bytes, USERDATA_MAX_LENGTH)
else:
# congestion avoidance
self._partial_bytes_acked += done_bytes
if self._partial_bytes_acked >= self._cwnd:
self._partial_bytes_acked -= self._cwnd
self._cwnd += USERDATA_MAX_LENGTH
if loss:
self._ssthresh = max(self._cwnd // 2, 4 * USERDATA_MAX_LENGTH)
self._cwnd = self._ssthresh
self._partial_bytes_acked = 0
self._fast_recovery_exit = self._sent_queue[-1].tsn
self._fast_recovery_transmit = True
elif uint32_gte(chunk.cumulative_tsn, self._fast_recovery_exit):
self._fast_recovery_exit = None
if not self._sent_queue:
# there is no outstanding data, stop T3
self._t3_cancel()
elif done:
# the earliest outstanding chunk was acknowledged, restart T3
self._t3_restart()
self._update_advanced_peer_ack_point()
await self._data_channel_flush()
await self._transmit()
|
Handle a SACK chunk.
|
Below is the the instruction that describes the task:
### Input:
Handle a SACK chunk.
### Response:
async def _receive_sack_chunk(self, chunk):
"""
Handle a SACK chunk.
"""
if uint32_gt(self._last_sacked_tsn, chunk.cumulative_tsn):
return
received_time = time.time()
self._last_sacked_tsn = chunk.cumulative_tsn
cwnd_fully_utilized = (self._flight_size >= self._cwnd)
done = 0
done_bytes = 0
# handle acknowledged data
while self._sent_queue and uint32_gte(self._last_sacked_tsn, self._sent_queue[0].tsn):
schunk = self._sent_queue.popleft()
done += 1
if not schunk._acked:
done_bytes += schunk._book_size
self._flight_size_decrease(schunk)
# update RTO estimate
if done == 1 and schunk._sent_count == 1:
self._update_rto(received_time - schunk._sent_time)
# handle gap blocks
loss = False
if chunk.gaps:
seen = set()
for gap in chunk.gaps:
for pos in range(gap[0], gap[1] + 1):
highest_seen_tsn = (chunk.cumulative_tsn + pos) % SCTP_TSN_MODULO
seen.add(highest_seen_tsn)
# determined Highest TSN Newly Acked (HTNA)
highest_newly_acked = chunk.cumulative_tsn
for schunk in self._sent_queue:
if uint32_gt(schunk.tsn, highest_seen_tsn):
break
if schunk.tsn in seen and not schunk._acked:
done_bytes += schunk._book_size
schunk._acked = True
self._flight_size_decrease(schunk)
highest_newly_acked = schunk.tsn
# strike missing chunks prior to HTNA
for schunk in self._sent_queue:
if uint32_gt(schunk.tsn, highest_newly_acked):
break
if schunk.tsn not in seen:
schunk._misses += 1
if schunk._misses == 3:
schunk._misses = 0
if not self._maybe_abandon(schunk):
schunk._retransmit = True
schunk._acked = False
self._flight_size_decrease(schunk)
loss = True
# adjust congestion window
if self._fast_recovery_exit is None:
if done and cwnd_fully_utilized:
if self._cwnd <= self._ssthresh:
# slow start
self._cwnd += min(done_bytes, USERDATA_MAX_LENGTH)
else:
# congestion avoidance
self._partial_bytes_acked += done_bytes
if self._partial_bytes_acked >= self._cwnd:
self._partial_bytes_acked -= self._cwnd
self._cwnd += USERDATA_MAX_LENGTH
if loss:
self._ssthresh = max(self._cwnd // 2, 4 * USERDATA_MAX_LENGTH)
self._cwnd = self._ssthresh
self._partial_bytes_acked = 0
self._fast_recovery_exit = self._sent_queue[-1].tsn
self._fast_recovery_transmit = True
elif uint32_gte(chunk.cumulative_tsn, self._fast_recovery_exit):
self._fast_recovery_exit = None
if not self._sent_queue:
# there is no outstanding data, stop T3
self._t3_cancel()
elif done:
# the earliest outstanding chunk was acknowledged, restart T3
self._t3_restart()
self._update_advanced_peer_ack_point()
await self._data_channel_flush()
await self._transmit()
|
def wait_one(self):
"""Waits until this worker has finished one work item or died."""
while True:
try:
item = self.output_queue.get(True, self.polltime)
except Queue.Empty:
continue
except KeyboardInterrupt:
LOGGER.debug('Exiting')
return
else:
item.check_result()
return
|
Waits until this worker has finished one work item or died.
|
Below is the the instruction that describes the task:
### Input:
Waits until this worker has finished one work item or died.
### Response:
def wait_one(self):
"""Waits until this worker has finished one work item or died."""
while True:
try:
item = self.output_queue.get(True, self.polltime)
except Queue.Empty:
continue
except KeyboardInterrupt:
LOGGER.debug('Exiting')
return
else:
item.check_result()
return
|
def get_contacts(address_books, query, method="all", reverse=False,
group=False, sort="first_name"):
"""Get a list of contacts from one or more address books.
:param address_books: the address books to search
:type address_books: list(address_book.AddressBook)
:param query: a search query to select contacts
:type quer: str
:param method: the search method, one of "all", "name" or "uid"
:type method: str
:param reverse: reverse the order of the returned contacts
:type reverse: bool
:param group: group results by address book
:type group: bool
:param sort: the field to use for sorting, one of "first_name", "last_name"
:type sort: str
:returns: contacts from the address_books that match the query
:rtype: list(CarddavObject)
"""
# Search for the contacts in all address books.
contacts = []
for address_book in address_books:
contacts.extend(address_book.search(query, method=method))
# Sort the contacts.
if group:
if sort == "first_name":
return sorted(contacts, reverse=reverse, key=lambda x: (
unidecode(x.address_book.name).lower(),
unidecode(x.get_first_name_last_name()).lower()))
elif sort == "last_name":
return sorted(contacts, reverse=reverse, key=lambda x: (
unidecode(x.address_book.name).lower(),
unidecode(x.get_last_name_first_name()).lower()))
else:
raise ValueError('sort must be "first_name" or "last_name" not '
'{}.'.format(sort))
else:
if sort == "first_name":
return sorted(contacts, reverse=reverse, key=lambda x:
unidecode(x.get_first_name_last_name()).lower())
elif sort == "last_name":
return sorted(contacts, reverse=reverse, key=lambda x:
unidecode(x.get_last_name_first_name()).lower())
else:
raise ValueError('sort must be "first_name" or "last_name" not '
'{}.'.format(sort))
|
Get a list of contacts from one or more address books.
:param address_books: the address books to search
:type address_books: list(address_book.AddressBook)
:param query: a search query to select contacts
:type quer: str
:param method: the search method, one of "all", "name" or "uid"
:type method: str
:param reverse: reverse the order of the returned contacts
:type reverse: bool
:param group: group results by address book
:type group: bool
:param sort: the field to use for sorting, one of "first_name", "last_name"
:type sort: str
:returns: contacts from the address_books that match the query
:rtype: list(CarddavObject)
|
Below is the the instruction that describes the task:
### Input:
Get a list of contacts from one or more address books.
:param address_books: the address books to search
:type address_books: list(address_book.AddressBook)
:param query: a search query to select contacts
:type quer: str
:param method: the search method, one of "all", "name" or "uid"
:type method: str
:param reverse: reverse the order of the returned contacts
:type reverse: bool
:param group: group results by address book
:type group: bool
:param sort: the field to use for sorting, one of "first_name", "last_name"
:type sort: str
:returns: contacts from the address_books that match the query
:rtype: list(CarddavObject)
### Response:
def get_contacts(address_books, query, method="all", reverse=False,
group=False, sort="first_name"):
"""Get a list of contacts from one or more address books.
:param address_books: the address books to search
:type address_books: list(address_book.AddressBook)
:param query: a search query to select contacts
:type quer: str
:param method: the search method, one of "all", "name" or "uid"
:type method: str
:param reverse: reverse the order of the returned contacts
:type reverse: bool
:param group: group results by address book
:type group: bool
:param sort: the field to use for sorting, one of "first_name", "last_name"
:type sort: str
:returns: contacts from the address_books that match the query
:rtype: list(CarddavObject)
"""
# Search for the contacts in all address books.
contacts = []
for address_book in address_books:
contacts.extend(address_book.search(query, method=method))
# Sort the contacts.
if group:
if sort == "first_name":
return sorted(contacts, reverse=reverse, key=lambda x: (
unidecode(x.address_book.name).lower(),
unidecode(x.get_first_name_last_name()).lower()))
elif sort == "last_name":
return sorted(contacts, reverse=reverse, key=lambda x: (
unidecode(x.address_book.name).lower(),
unidecode(x.get_last_name_first_name()).lower()))
else:
raise ValueError('sort must be "first_name" or "last_name" not '
'{}.'.format(sort))
else:
if sort == "first_name":
return sorted(contacts, reverse=reverse, key=lambda x:
unidecode(x.get_first_name_last_name()).lower())
elif sort == "last_name":
return sorted(contacts, reverse=reverse, key=lambda x:
unidecode(x.get_last_name_first_name()).lower())
else:
raise ValueError('sort must be "first_name" or "last_name" not '
'{}.'.format(sort))
|
def __demodulate_data(self, data):
"""
Demodulates received IQ data and adds demodulated bits to messages
:param data:
:return:
"""
if len(data) == 0:
return
power_spectrum = data.real ** 2 + data.imag ** 2
is_above_noise = np.sqrt(np.mean(power_spectrum)) > self.signal.noise_threshold
if self.adaptive_noise and not is_above_noise:
self.signal.noise_threshold = 0.9 * self.signal.noise_threshold + 0.1 * np.sqrt(np.max(power_spectrum))
if is_above_noise:
self.__add_to_buffer(data)
self.pause_length = 0
if not self.__buffer_is_full():
return
else:
self.pause_length += len(data)
if self.pause_length < 10 * self.signal.bit_len:
self.__add_to_buffer(data)
if not self.__buffer_is_full():
return
if self.__current_buffer_index == 0:
return
# clear cache and start a new message
self.signal._fulldata = self.__buffer[0:self.__current_buffer_index]
self.__clear_buffer()
self.signal._qad = None
bit_len = self.signal.bit_len
if self.automatic_center:
self.signal.qad_center = AutoInterpretation.detect_center(self.signal.qad, max_size=150*self.signal.bit_len)
ppseq = grab_pulse_lens(self.signal.qad, self.signal.qad_center,
self.signal.tolerance, self.signal.modulation_type, self.signal.bit_len)
bit_data, pauses, bit_sample_pos = self._ppseq_to_bits(ppseq, bit_len, write_bit_sample_pos=False)
for bits, pause in zip(bit_data, pauses):
message = Message(bits, pause, bit_len=bit_len, message_type=self.default_message_type,
decoder=self.decoder)
self.messages.append(message)
self.message_sniffed.emit(len(self.messages) - 1)
|
Demodulates received IQ data and adds demodulated bits to messages
:param data:
:return:
|
Below is the the instruction that describes the task:
### Input:
Demodulates received IQ data and adds demodulated bits to messages
:param data:
:return:
### Response:
def __demodulate_data(self, data):
"""
Demodulates received IQ data and adds demodulated bits to messages
:param data:
:return:
"""
if len(data) == 0:
return
power_spectrum = data.real ** 2 + data.imag ** 2
is_above_noise = np.sqrt(np.mean(power_spectrum)) > self.signal.noise_threshold
if self.adaptive_noise and not is_above_noise:
self.signal.noise_threshold = 0.9 * self.signal.noise_threshold + 0.1 * np.sqrt(np.max(power_spectrum))
if is_above_noise:
self.__add_to_buffer(data)
self.pause_length = 0
if not self.__buffer_is_full():
return
else:
self.pause_length += len(data)
if self.pause_length < 10 * self.signal.bit_len:
self.__add_to_buffer(data)
if not self.__buffer_is_full():
return
if self.__current_buffer_index == 0:
return
# clear cache and start a new message
self.signal._fulldata = self.__buffer[0:self.__current_buffer_index]
self.__clear_buffer()
self.signal._qad = None
bit_len = self.signal.bit_len
if self.automatic_center:
self.signal.qad_center = AutoInterpretation.detect_center(self.signal.qad, max_size=150*self.signal.bit_len)
ppseq = grab_pulse_lens(self.signal.qad, self.signal.qad_center,
self.signal.tolerance, self.signal.modulation_type, self.signal.bit_len)
bit_data, pauses, bit_sample_pos = self._ppseq_to_bits(ppseq, bit_len, write_bit_sample_pos=False)
for bits, pause in zip(bit_data, pauses):
message = Message(bits, pause, bit_len=bit_len, message_type=self.default_message_type,
decoder=self.decoder)
self.messages.append(message)
self.message_sniffed.emit(len(self.messages) - 1)
|
def memcpy_htod(self, dest, src):
"""perform a host to device memory copy
:param dest: A GPU memory allocation unit
:type dest: pycuda.driver.DeviceAllocation
:param src: A numpy array in host memory to store the data
:type src: numpy.ndarray
"""
if isinstance(dest, drv.DeviceAllocation):
drv.memcpy_htod(dest, src)
else:
dest = src
|
perform a host to device memory copy
:param dest: A GPU memory allocation unit
:type dest: pycuda.driver.DeviceAllocation
:param src: A numpy array in host memory to store the data
:type src: numpy.ndarray
|
Below is the the instruction that describes the task:
### Input:
perform a host to device memory copy
:param dest: A GPU memory allocation unit
:type dest: pycuda.driver.DeviceAllocation
:param src: A numpy array in host memory to store the data
:type src: numpy.ndarray
### Response:
def memcpy_htod(self, dest, src):
"""perform a host to device memory copy
:param dest: A GPU memory allocation unit
:type dest: pycuda.driver.DeviceAllocation
:param src: A numpy array in host memory to store the data
:type src: numpy.ndarray
"""
if isinstance(dest, drv.DeviceAllocation):
drv.memcpy_htod(dest, src)
else:
dest = src
|
def getCollectorPath(self):
"""
Returns collector path
servers.host.cpu.total.idle
return "cpu"
"""
# If we don't have a host name, assume it's just the third part of the
# metric path
if self.host is None:
return self.path.split('.')[2]
offset = self.path.index(self.host)
offset += len(self.host) + 1
endoffset = self.path.index('.', offset)
return self.path[offset:endoffset]
|
Returns collector path
servers.host.cpu.total.idle
return "cpu"
|
Below is the the instruction that describes the task:
### Input:
Returns collector path
servers.host.cpu.total.idle
return "cpu"
### Response:
def getCollectorPath(self):
"""
Returns collector path
servers.host.cpu.total.idle
return "cpu"
"""
# If we don't have a host name, assume it's just the third part of the
# metric path
if self.host is None:
return self.path.split('.')[2]
offset = self.path.index(self.host)
offset += len(self.host) + 1
endoffset = self.path.index('.', offset)
return self.path[offset:endoffset]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.