Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
6,700 | rhayes777/PyAutoFit | autofit/mapper/model_mapper.py | ModelMapper.prior_prior_model_name_dict | def prior_prior_model_name_dict(self):
"""
Returns
-------
prior_prior_model_name_dict: {Prior: str}
A dictionary mapping priors to the names of associated prior models. Each prior will only have one prior
model name; if a prior is shared by two prior models then one of those prior model's names will be in this
dictionary.
"""
prior_prior_model_name_dict = {prior_tuple.prior: prior_model_tuple.name
for prior_model_tuple in self.flat_prior_model_tuples
for prior_tuple in prior_model_tuple.prior_model.prior_tuples}
prior_list_prior_model_name_dict = {
prior_tuple.value: "{}_{}".format(list_prior_model_tuple.name, label_prior_model_tuple.name) for
list_prior_model_tuple in self.list_prior_model_tuples for label_prior_model_tuple in
list_prior_model_tuple.value.label_prior_model_tuples for prior_tuple in
label_prior_model_tuple.value.prior_tuples}
prior_prior_model_name_dict.update(prior_list_prior_model_name_dict)
return prior_prior_model_name_dict | python | def prior_prior_model_name_dict(self):
"""
Returns
-------
prior_prior_model_name_dict: {Prior: str}
A dictionary mapping priors to the names of associated prior models. Each prior will only have one prior
model name; if a prior is shared by two prior models then one of those prior model's names will be in this
dictionary.
"""
prior_prior_model_name_dict = {prior_tuple.prior: prior_model_tuple.name
for prior_model_tuple in self.flat_prior_model_tuples
for prior_tuple in prior_model_tuple.prior_model.prior_tuples}
prior_list_prior_model_name_dict = {
prior_tuple.value: "{}_{}".format(list_prior_model_tuple.name, label_prior_model_tuple.name) for
list_prior_model_tuple in self.list_prior_model_tuples for label_prior_model_tuple in
list_prior_model_tuple.value.label_prior_model_tuples for prior_tuple in
label_prior_model_tuple.value.prior_tuples}
prior_prior_model_name_dict.update(prior_list_prior_model_name_dict)
return prior_prior_model_name_dict | ['def', 'prior_prior_model_name_dict', '(', 'self', ')', ':', 'prior_prior_model_name_dict', '=', '{', 'prior_tuple', '.', 'prior', ':', 'prior_model_tuple', '.', 'name', 'for', 'prior_model_tuple', 'in', 'self', '.', 'flat_prior_model_tuples', 'for', 'prior_tuple', 'in', 'prior_model_tuple', '.', 'prior_model', '.', 'prior_tuples', '}', 'prior_list_prior_model_name_dict', '=', '{', 'prior_tuple', '.', 'value', ':', '"{}_{}"', '.', 'format', '(', 'list_prior_model_tuple', '.', 'name', ',', 'label_prior_model_tuple', '.', 'name', ')', 'for', 'list_prior_model_tuple', 'in', 'self', '.', 'list_prior_model_tuples', 'for', 'label_prior_model_tuple', 'in', 'list_prior_model_tuple', '.', 'value', '.', 'label_prior_model_tuples', 'for', 'prior_tuple', 'in', 'label_prior_model_tuple', '.', 'value', '.', 'prior_tuples', '}', 'prior_prior_model_name_dict', '.', 'update', '(', 'prior_list_prior_model_name_dict', ')', 'return', 'prior_prior_model_name_dict'] | Returns
-------
prior_prior_model_name_dict: {Prior: str}
A dictionary mapping priors to the names of associated prior models. Each prior will only have one prior
model name; if a prior is shared by two prior models then one of those prior model's names will be in this
dictionary. | ['Returns', '-------', 'prior_prior_model_name_dict', ':', '{', 'Prior', ':', 'str', '}', 'A', 'dictionary', 'mapping', 'priors', 'to', 'the', 'names', 'of', 'associated', 'prior', 'models', '.', 'Each', 'prior', 'will', 'only', 'have', 'one', 'prior', 'model', 'name', ';', 'if', 'a', 'prior', 'is', 'shared', 'by', 'two', 'prior', 'models', 'then', 'one', 'of', 'those', 'prior', 'model', 's', 'names', 'will', 'be', 'in', 'this', 'dictionary', '.'] | train | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/mapper/model_mapper.py#L205-L223 |
6,701 | Iotic-Labs/py-IoticAgent | src/IoticAgent/IOT/Point.py | Point.get_meta | def get_meta(self):
"""Get the metadata object for this Point
Returns a [PointMeta](PointMeta.m.html#IoticAgent.IOT.PointMeta.PointMeta) object - OR -
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
"""
rdf = self.get_meta_rdf(fmt='n3')
return PointMeta(self, rdf, self._client.default_lang, fmt='n3') | python | def get_meta(self):
"""Get the metadata object for this Point
Returns a [PointMeta](PointMeta.m.html#IoticAgent.IOT.PointMeta.PointMeta) object - OR -
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
"""
rdf = self.get_meta_rdf(fmt='n3')
return PointMeta(self, rdf, self._client.default_lang, fmt='n3') | ['def', 'get_meta', '(', 'self', ')', ':', 'rdf', '=', 'self', '.', 'get_meta_rdf', '(', 'fmt', '=', "'n3'", ')', 'return', 'PointMeta', '(', 'self', ',', 'rdf', ',', 'self', '.', '_client', '.', 'default_lang', ',', 'fmt', '=', "'n3'", ')'] | Get the metadata object for this Point
Returns a [PointMeta](PointMeta.m.html#IoticAgent.IOT.PointMeta.PointMeta) object - OR -
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure | ['Get', 'the', 'metadata', 'object', 'for', 'this', 'Point'] | train | https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/Point.py#L149-L161 |
6,702 | allenai/allennlp | allennlp/modules/token_embedders/embedding.py | _read_embeddings_from_text_file | def _read_embeddings_from_text_file(file_uri: str,
embedding_dim: int,
vocab: Vocabulary,
namespace: str = "tokens") -> torch.FloatTensor:
"""
Read pre-trained word vectors from an eventually compressed text file, possibly contained
inside an archive with multiple files. The text file is assumed to be utf-8 encoded with
space-separated fields: [word] [dim 1] [dim 2] ...
Lines that contain more numerical tokens than ``embedding_dim`` raise a warning and are skipped.
The remainder of the docstring is identical to ``_read_pretrained_embeddings_file``.
"""
tokens_to_keep = set(vocab.get_index_to_token_vocabulary(namespace).values())
vocab_size = vocab.get_vocab_size(namespace)
embeddings = {}
# First we read the embeddings from the file, only keeping vectors for the words we need.
logger.info("Reading pretrained embeddings from file")
with EmbeddingsTextFile(file_uri) as embeddings_file:
for line in Tqdm.tqdm(embeddings_file):
token = line.split(' ', 1)[0]
if token in tokens_to_keep:
fields = line.rstrip().split(' ')
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one column). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
logger.warning("Found line with wrong number of dimensions (expected: %d; actual: %d): %s",
embedding_dim, len(fields) - 1, line)
continue
vector = numpy.asarray(fields[1:], dtype='float32')
embeddings[token] = vector
if not embeddings:
raise ConfigurationError("No embeddings of correct dimension found; you probably "
"misspecified your embedding_dim parameter, or didn't "
"pre-populate your Vocabulary")
all_embeddings = numpy.asarray(list(embeddings.values()))
embeddings_mean = float(numpy.mean(all_embeddings))
embeddings_std = float(numpy.std(all_embeddings))
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
logger.info("Initializing pre-trained embedding layer")
embedding_matrix = torch.FloatTensor(vocab_size, embedding_dim).normal_(embeddings_mean,
embeddings_std)
num_tokens_found = 0
index_to_token = vocab.get_index_to_token_vocabulary(namespace)
for i in range(vocab_size):
token = index_to_token[i]
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if token in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[token])
num_tokens_found += 1
else:
logger.debug("Token %s was not found in the embedding file. Initialising randomly.", token)
logger.info("Pretrained embeddings were found for %d out of %d tokens",
num_tokens_found, vocab_size)
return embedding_matrix | python | def _read_embeddings_from_text_file(file_uri: str,
embedding_dim: int,
vocab: Vocabulary,
namespace: str = "tokens") -> torch.FloatTensor:
"""
Read pre-trained word vectors from an eventually compressed text file, possibly contained
inside an archive with multiple files. The text file is assumed to be utf-8 encoded with
space-separated fields: [word] [dim 1] [dim 2] ...
Lines that contain more numerical tokens than ``embedding_dim`` raise a warning and are skipped.
The remainder of the docstring is identical to ``_read_pretrained_embeddings_file``.
"""
tokens_to_keep = set(vocab.get_index_to_token_vocabulary(namespace).values())
vocab_size = vocab.get_vocab_size(namespace)
embeddings = {}
# First we read the embeddings from the file, only keeping vectors for the words we need.
logger.info("Reading pretrained embeddings from file")
with EmbeddingsTextFile(file_uri) as embeddings_file:
for line in Tqdm.tqdm(embeddings_file):
token = line.split(' ', 1)[0]
if token in tokens_to_keep:
fields = line.rstrip().split(' ')
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one column). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
logger.warning("Found line with wrong number of dimensions (expected: %d; actual: %d): %s",
embedding_dim, len(fields) - 1, line)
continue
vector = numpy.asarray(fields[1:], dtype='float32')
embeddings[token] = vector
if not embeddings:
raise ConfigurationError("No embeddings of correct dimension found; you probably "
"misspecified your embedding_dim parameter, or didn't "
"pre-populate your Vocabulary")
all_embeddings = numpy.asarray(list(embeddings.values()))
embeddings_mean = float(numpy.mean(all_embeddings))
embeddings_std = float(numpy.std(all_embeddings))
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
logger.info("Initializing pre-trained embedding layer")
embedding_matrix = torch.FloatTensor(vocab_size, embedding_dim).normal_(embeddings_mean,
embeddings_std)
num_tokens_found = 0
index_to_token = vocab.get_index_to_token_vocabulary(namespace)
for i in range(vocab_size):
token = index_to_token[i]
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if token in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[token])
num_tokens_found += 1
else:
logger.debug("Token %s was not found in the embedding file. Initialising randomly.", token)
logger.info("Pretrained embeddings were found for %d out of %d tokens",
num_tokens_found, vocab_size)
return embedding_matrix | ['def', '_read_embeddings_from_text_file', '(', 'file_uri', ':', 'str', ',', 'embedding_dim', ':', 'int', ',', 'vocab', ':', 'Vocabulary', ',', 'namespace', ':', 'str', '=', '"tokens"', ')', '->', 'torch', '.', 'FloatTensor', ':', 'tokens_to_keep', '=', 'set', '(', 'vocab', '.', 'get_index_to_token_vocabulary', '(', 'namespace', ')', '.', 'values', '(', ')', ')', 'vocab_size', '=', 'vocab', '.', 'get_vocab_size', '(', 'namespace', ')', 'embeddings', '=', '{', '}', '# First we read the embeddings from the file, only keeping vectors for the words we need.', 'logger', '.', 'info', '(', '"Reading pretrained embeddings from file"', ')', 'with', 'EmbeddingsTextFile', '(', 'file_uri', ')', 'as', 'embeddings_file', ':', 'for', 'line', 'in', 'Tqdm', '.', 'tqdm', '(', 'embeddings_file', ')', ':', 'token', '=', 'line', '.', 'split', '(', "' '", ',', '1', ')', '[', '0', ']', 'if', 'token', 'in', 'tokens_to_keep', ':', 'fields', '=', 'line', '.', 'rstrip', '(', ')', '.', 'split', '(', "' '", ')', 'if', 'len', '(', 'fields', ')', '-', '1', '!=', 'embedding_dim', ':', '# Sometimes there are funny unicode parsing problems that lead to different', '# fields lengths (e.g., a word with a unicode space character that splits', '# into more than one column). We skip those lines. Note that if you have', '# some kind of long header, this could result in all of your lines getting', "# skipped. It's hard to check for that here; you just have to look in the", '# embedding_misses_file and at the model summary to make sure things look', '# like they are supposed to.', 'logger', '.', 'warning', '(', '"Found line with wrong number of dimensions (expected: %d; actual: %d): %s"', ',', 'embedding_dim', ',', 'len', '(', 'fields', ')', '-', '1', ',', 'line', ')', 'continue', 'vector', '=', 'numpy', '.', 'asarray', '(', 'fields', '[', '1', ':', ']', ',', 'dtype', '=', "'float32'", ')', 'embeddings', '[', 'token', ']', '=', 'vector', 'if', 'not', 'embeddings', ':', 'raise', 'ConfigurationError', '(', '"No embeddings of correct dimension found; you probably "', '"misspecified your embedding_dim parameter, or didn\'t "', '"pre-populate your Vocabulary"', ')', 'all_embeddings', '=', 'numpy', '.', 'asarray', '(', 'list', '(', 'embeddings', '.', 'values', '(', ')', ')', ')', 'embeddings_mean', '=', 'float', '(', 'numpy', '.', 'mean', '(', 'all_embeddings', ')', ')', 'embeddings_std', '=', 'float', '(', 'numpy', '.', 'std', '(', 'all_embeddings', ')', ')', '# Now we initialize the weight matrix for an embedding layer, starting with random vectors,', '# then filling in the word vectors we just read.', 'logger', '.', 'info', '(', '"Initializing pre-trained embedding layer"', ')', 'embedding_matrix', '=', 'torch', '.', 'FloatTensor', '(', 'vocab_size', ',', 'embedding_dim', ')', '.', 'normal_', '(', 'embeddings_mean', ',', 'embeddings_std', ')', 'num_tokens_found', '=', '0', 'index_to_token', '=', 'vocab', '.', 'get_index_to_token_vocabulary', '(', 'namespace', ')', 'for', 'i', 'in', 'range', '(', 'vocab_size', ')', ':', 'token', '=', 'index_to_token', '[', 'i', ']', "# If we don't have a pre-trained vector for this word, we'll just leave this row alone,", '# so the word has a random initialization.', 'if', 'token', 'in', 'embeddings', ':', 'embedding_matrix', '[', 'i', ']', '=', 'torch', '.', 'FloatTensor', '(', 'embeddings', '[', 'token', ']', ')', 'num_tokens_found', '+=', '1', 'else', ':', 'logger', '.', 'debug', '(', '"Token %s was not found in the embedding file. Initialising randomly."', ',', 'token', ')', 'logger', '.', 'info', '(', '"Pretrained embeddings were found for %d out of %d tokens"', ',', 'num_tokens_found', ',', 'vocab_size', ')', 'return', 'embedding_matrix'] | Read pre-trained word vectors from an eventually compressed text file, possibly contained
inside an archive with multiple files. The text file is assumed to be utf-8 encoded with
space-separated fields: [word] [dim 1] [dim 2] ...
Lines that contain more numerical tokens than ``embedding_dim`` raise a warning and are skipped.
The remainder of the docstring is identical to ``_read_pretrained_embeddings_file``. | ['Read', 'pre', '-', 'trained', 'word', 'vectors', 'from', 'an', 'eventually', 'compressed', 'text', 'file', 'possibly', 'contained', 'inside', 'an', 'archive', 'with', 'multiple', 'files', '.', 'The', 'text', 'file', 'is', 'assumed', 'to', 'be', 'utf', '-', '8', 'encoded', 'with', 'space', '-', 'separated', 'fields', ':', '[', 'word', ']', '[', 'dim', '1', ']', '[', 'dim', '2', ']', '...'] | train | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/token_embedders/embedding.py#L374-L443 |
6,703 | fuzeman/PyUPnP | pyupnp/lict.py | Lict.setdefault | def setdefault(self, k, d=None):
""" D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D """
if k not in self._col_dict:
self._set_key(k, d)
return self._col_dict.get(k) | python | def setdefault(self, k, d=None):
""" D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D """
if k not in self._col_dict:
self._set_key(k, d)
return self._col_dict.get(k) | ['def', 'setdefault', '(', 'self', ',', 'k', ',', 'd', '=', 'None', ')', ':', 'if', 'k', 'not', 'in', 'self', '.', '_col_dict', ':', 'self', '.', '_set_key', '(', 'k', ',', 'd', ')', 'return', 'self', '.', '_col_dict', '.', 'get', '(', 'k', ')'] | D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D | ['D', '.', 'setdefault', '(', 'k', '[', 'd', ']', ')', '-', '>', 'D', '.', 'get', '(', 'k', 'd', ')', 'also', 'set', 'D', '[', 'k', ']', '=', 'd', 'if', 'k', 'not', 'in', 'D'] | train | https://github.com/fuzeman/PyUPnP/blob/6dea64be299952346a14300ab6cc7dac42736433/pyupnp/lict.py#L337-L341 |
6,704 | horazont/aioxmpp | aioxmpp/xso/model.py | Attr.handle_missing | def handle_missing(self, instance, ctx):
"""
Handle a missing attribute on `instance`. This is called whenever no
value for the attribute is found during parsing. The call to
:meth:`missing` is independent of the value of `required`.
If the `missing` callback is not :data:`None`, it is called with the
`instance` and the `ctx` as arguments. If the returned value is not
:data:`None`, it is used as the value of the attribute (validation
takes place as if the value had been set from the code, not as if the
value had been received from XML) and the handler returns.
If the `missing` callback is :data:`None` or returns :data:`None`, the
handling continues as normal: if `required` is true, a
:class:`ValueError` is raised.
"""
if self.missing is not None:
value = self.missing(instance, ctx)
if value is not None:
self._set_from_code(instance, value)
return
if self.default is _PropBase.NO_DEFAULT:
raise ValueError("missing attribute {} on {}".format(
tag_to_str(self.tag),
tag_to_str(instance.TAG),
)) | python | def handle_missing(self, instance, ctx):
"""
Handle a missing attribute on `instance`. This is called whenever no
value for the attribute is found during parsing. The call to
:meth:`missing` is independent of the value of `required`.
If the `missing` callback is not :data:`None`, it is called with the
`instance` and the `ctx` as arguments. If the returned value is not
:data:`None`, it is used as the value of the attribute (validation
takes place as if the value had been set from the code, not as if the
value had been received from XML) and the handler returns.
If the `missing` callback is :data:`None` or returns :data:`None`, the
handling continues as normal: if `required` is true, a
:class:`ValueError` is raised.
"""
if self.missing is not None:
value = self.missing(instance, ctx)
if value is not None:
self._set_from_code(instance, value)
return
if self.default is _PropBase.NO_DEFAULT:
raise ValueError("missing attribute {} on {}".format(
tag_to_str(self.tag),
tag_to_str(instance.TAG),
)) | ['def', 'handle_missing', '(', 'self', ',', 'instance', ',', 'ctx', ')', ':', 'if', 'self', '.', 'missing', 'is', 'not', 'None', ':', 'value', '=', 'self', '.', 'missing', '(', 'instance', ',', 'ctx', ')', 'if', 'value', 'is', 'not', 'None', ':', 'self', '.', '_set_from_code', '(', 'instance', ',', 'value', ')', 'return', 'if', 'self', '.', 'default', 'is', '_PropBase', '.', 'NO_DEFAULT', ':', 'raise', 'ValueError', '(', '"missing attribute {} on {}"', '.', 'format', '(', 'tag_to_str', '(', 'self', '.', 'tag', ')', ',', 'tag_to_str', '(', 'instance', '.', 'TAG', ')', ',', ')', ')'] | Handle a missing attribute on `instance`. This is called whenever no
value for the attribute is found during parsing. The call to
:meth:`missing` is independent of the value of `required`.
If the `missing` callback is not :data:`None`, it is called with the
`instance` and the `ctx` as arguments. If the returned value is not
:data:`None`, it is used as the value of the attribute (validation
takes place as if the value had been set from the code, not as if the
value had been received from XML) and the handler returns.
If the `missing` callback is :data:`None` or returns :data:`None`, the
handling continues as normal: if `required` is true, a
:class:`ValueError` is raised. | ['Handle', 'a', 'missing', 'attribute', 'on', 'instance', '.', 'This', 'is', 'called', 'whenever', 'no', 'value', 'for', 'the', 'attribute', 'is', 'found', 'during', 'parsing', '.', 'The', 'call', 'to', ':', 'meth', ':', 'missing', 'is', 'independent', 'of', 'the', 'value', 'of', 'required', '.'] | train | https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/xso/model.py#L812-L838 |
6,705 | Metatab/tableintuit | tableintuit/stats.py | Stats.run | def run(self):
""" Run the stats. The source must yield Row proxies.
"""
self._func, self._func_code = self.build()
def process_row(row):
try:
self._func(self._stats, row)
except TypeError as e:
raise TypeError("Failed for '{}'; {}".format(self._func_code, e))
except KeyError:
raise KeyError(
'Failed to find key in row. headers = "{}", code = "{}" '
.format(list(row.keys()), self._func_code))
except Exception as e:
raise type(e)(
'General exception in stats. headers = "{}", code = "{}": {} '
.format(list(row.keys()), self._func_code, e))
# Use all of the rows in the source
if self._sample_size is None:
for i, row in enumerate(self._source):
process_row(row)
# Use a sample of rows, evenly distributed though the source
else:
skip_rate = self._sample_size / self._n_rows
i = 0
skip = skip_rate
for j, row in enumerate(self._source):
skip += skip_rate
if skip >= 1:
skip -= 1
i += 1
process_row(row)
if i < 5000: # Since the hist bins aren't built until 5K row
for k, v in self._stats.items():
v._build_hist_bins()
return self | python | def run(self):
""" Run the stats. The source must yield Row proxies.
"""
self._func, self._func_code = self.build()
def process_row(row):
try:
self._func(self._stats, row)
except TypeError as e:
raise TypeError("Failed for '{}'; {}".format(self._func_code, e))
except KeyError:
raise KeyError(
'Failed to find key in row. headers = "{}", code = "{}" '
.format(list(row.keys()), self._func_code))
except Exception as e:
raise type(e)(
'General exception in stats. headers = "{}", code = "{}": {} '
.format(list(row.keys()), self._func_code, e))
# Use all of the rows in the source
if self._sample_size is None:
for i, row in enumerate(self._source):
process_row(row)
# Use a sample of rows, evenly distributed though the source
else:
skip_rate = self._sample_size / self._n_rows
i = 0
skip = skip_rate
for j, row in enumerate(self._source):
skip += skip_rate
if skip >= 1:
skip -= 1
i += 1
process_row(row)
if i < 5000: # Since the hist bins aren't built until 5K row
for k, v in self._stats.items():
v._build_hist_bins()
return self | ['def', 'run', '(', 'self', ')', ':', 'self', '.', '_func', ',', 'self', '.', '_func_code', '=', 'self', '.', 'build', '(', ')', 'def', 'process_row', '(', 'row', ')', ':', 'try', ':', 'self', '.', '_func', '(', 'self', '.', '_stats', ',', 'row', ')', 'except', 'TypeError', 'as', 'e', ':', 'raise', 'TypeError', '(', '"Failed for \'{}\'; {}"', '.', 'format', '(', 'self', '.', '_func_code', ',', 'e', ')', ')', 'except', 'KeyError', ':', 'raise', 'KeyError', '(', '\'Failed to find key in row. headers = "{}", code = "{}" \'', '.', 'format', '(', 'list', '(', 'row', '.', 'keys', '(', ')', ')', ',', 'self', '.', '_func_code', ')', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'type', '(', 'e', ')', '(', '\'General exception in stats. headers = "{}", code = "{}": {} \'', '.', 'format', '(', 'list', '(', 'row', '.', 'keys', '(', ')', ')', ',', 'self', '.', '_func_code', ',', 'e', ')', ')', '# Use all of the rows in the source', 'if', 'self', '.', '_sample_size', 'is', 'None', ':', 'for', 'i', ',', 'row', 'in', 'enumerate', '(', 'self', '.', '_source', ')', ':', 'process_row', '(', 'row', ')', '# Use a sample of rows, evenly distributed though the source', 'else', ':', 'skip_rate', '=', 'self', '.', '_sample_size', '/', 'self', '.', '_n_rows', 'i', '=', '0', 'skip', '=', 'skip_rate', 'for', 'j', ',', 'row', 'in', 'enumerate', '(', 'self', '.', '_source', ')', ':', 'skip', '+=', 'skip_rate', 'if', 'skip', '>=', '1', ':', 'skip', '-=', '1', 'i', '+=', '1', 'process_row', '(', 'row', ')', 'if', 'i', '<', '5000', ':', "# Since the hist bins aren't built until 5K row", 'for', 'k', ',', 'v', 'in', 'self', '.', '_stats', '.', 'items', '(', ')', ':', 'v', '.', '_build_hist_bins', '(', ')', 'return', 'self'] | Run the stats. The source must yield Row proxies. | ['Run', 'the', 'stats', '.', 'The', 'source', 'must', 'yield', 'Row', 'proxies', '.'] | train | https://github.com/Metatab/tableintuit/blob/9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c/tableintuit/stats.py#L427-L470 |
6,706 | stevearc/dql | dql/engine.py | Engine._explain | def _explain(self, tree):
""" Set up the engine to do a dry run of a query """
self._explaining = True
self._call_list = []
old_call = self.connection.call
def fake_call(command, **kwargs):
""" Replacement for connection.call that logs args """
if command == "describe_table":
return old_call(command, **kwargs)
self._call_list.append((command, kwargs))
raise ExplainSignal
self.connection.call = fake_call
try:
ret = self._run(tree[1])
try:
list(ret)
except TypeError:
pass
finally:
self.connection.call = old_call
self._explaining = False | python | def _explain(self, tree):
""" Set up the engine to do a dry run of a query """
self._explaining = True
self._call_list = []
old_call = self.connection.call
def fake_call(command, **kwargs):
""" Replacement for connection.call that logs args """
if command == "describe_table":
return old_call(command, **kwargs)
self._call_list.append((command, kwargs))
raise ExplainSignal
self.connection.call = fake_call
try:
ret = self._run(tree[1])
try:
list(ret)
except TypeError:
pass
finally:
self.connection.call = old_call
self._explaining = False | ['def', '_explain', '(', 'self', ',', 'tree', ')', ':', 'self', '.', '_explaining', '=', 'True', 'self', '.', '_call_list', '=', '[', ']', 'old_call', '=', 'self', '.', 'connection', '.', 'call', 'def', 'fake_call', '(', 'command', ',', '*', '*', 'kwargs', ')', ':', '""" Replacement for connection.call that logs args """', 'if', 'command', '==', '"describe_table"', ':', 'return', 'old_call', '(', 'command', ',', '*', '*', 'kwargs', ')', 'self', '.', '_call_list', '.', 'append', '(', '(', 'command', ',', 'kwargs', ')', ')', 'raise', 'ExplainSignal', 'self', '.', 'connection', '.', 'call', '=', 'fake_call', 'try', ':', 'ret', '=', 'self', '.', '_run', '(', 'tree', '[', '1', ']', ')', 'try', ':', 'list', '(', 'ret', ')', 'except', 'TypeError', ':', 'pass', 'finally', ':', 'self', '.', 'connection', '.', 'call', '=', 'old_call', 'self', '.', '_explaining', '=', 'False'] | Set up the engine to do a dry run of a query | ['Set', 'up', 'the', 'engine', 'to', 'do', 'a', 'dry', 'run', 'of', 'a', 'query'] | train | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L380-L402 |
6,707 | saltstack/salt | salt/utils/dns.py | _lookup_drill | def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None):
'''
Use drill to lookup addresses
:param name: Name of record to search
:param rdtype: DNS record type
:param timeout: command return timeout
:param servers: [] of servers to use
:return: [] of records or False if error
'''
cmd = 'drill '
if secure:
cmd += '-D -o ad '
cmd += '{0} {1} '.format(rdtype, name)
if servers:
cmd += ''.join(['@{0} '.format(srv) for srv in servers])
cmd = __salt__['cmd.run_all'](
cmd, timeout=timeout,
python_shell=False, output_loglevel='quiet')
if cmd['retcode'] != 0:
log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr'])
return False
lookup_res = iter(cmd['stdout'].splitlines())
validated = False
res = []
try:
line = ''
while 'ANSWER SECTION' not in line:
line = next(lookup_res)
while True:
line = next(lookup_res)
line = line.strip()
if not line or line.startswith(';;'):
break
l_type, l_rec = line.split(None, 4)[-2:]
if l_type == 'CNAME' and rdtype != 'CNAME':
continue
elif l_type == 'RRSIG':
validated = True
continue
elif l_type != rdtype:
raise ValueError('Invalid DNS type {}'.format(rdtype))
res.append(_data_clean(l_rec))
except StopIteration:
pass
if res and secure and not validated:
return False
else:
return res | python | def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None):
'''
Use drill to lookup addresses
:param name: Name of record to search
:param rdtype: DNS record type
:param timeout: command return timeout
:param servers: [] of servers to use
:return: [] of records or False if error
'''
cmd = 'drill '
if secure:
cmd += '-D -o ad '
cmd += '{0} {1} '.format(rdtype, name)
if servers:
cmd += ''.join(['@{0} '.format(srv) for srv in servers])
cmd = __salt__['cmd.run_all'](
cmd, timeout=timeout,
python_shell=False, output_loglevel='quiet')
if cmd['retcode'] != 0:
log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr'])
return False
lookup_res = iter(cmd['stdout'].splitlines())
validated = False
res = []
try:
line = ''
while 'ANSWER SECTION' not in line:
line = next(lookup_res)
while True:
line = next(lookup_res)
line = line.strip()
if not line or line.startswith(';;'):
break
l_type, l_rec = line.split(None, 4)[-2:]
if l_type == 'CNAME' and rdtype != 'CNAME':
continue
elif l_type == 'RRSIG':
validated = True
continue
elif l_type != rdtype:
raise ValueError('Invalid DNS type {}'.format(rdtype))
res.append(_data_clean(l_rec))
except StopIteration:
pass
if res and secure and not validated:
return False
else:
return res | ['def', '_lookup_drill', '(', 'name', ',', 'rdtype', ',', 'timeout', '=', 'None', ',', 'servers', '=', 'None', ',', 'secure', '=', 'None', ')', ':', 'cmd', '=', "'drill '", 'if', 'secure', ':', 'cmd', '+=', "'-D -o ad '", 'cmd', '+=', "'{0} {1} '", '.', 'format', '(', 'rdtype', ',', 'name', ')', 'if', 'servers', ':', 'cmd', '+=', "''", '.', 'join', '(', '[', "'@{0} '", '.', 'format', '(', 'srv', ')', 'for', 'srv', 'in', 'servers', ']', ')', 'cmd', '=', '__salt__', '[', "'cmd.run_all'", ']', '(', 'cmd', ',', 'timeout', '=', 'timeout', ',', 'python_shell', '=', 'False', ',', 'output_loglevel', '=', "'quiet'", ')', 'if', 'cmd', '[', "'retcode'", ']', '!=', '0', ':', 'log', '.', 'warning', '(', "'drill returned (%s): %s'", ',', 'cmd', '[', "'retcode'", ']', ',', 'cmd', '[', "'stderr'", ']', ')', 'return', 'False', 'lookup_res', '=', 'iter', '(', 'cmd', '[', "'stdout'", ']', '.', 'splitlines', '(', ')', ')', 'validated', '=', 'False', 'res', '=', '[', ']', 'try', ':', 'line', '=', "''", 'while', "'ANSWER SECTION'", 'not', 'in', 'line', ':', 'line', '=', 'next', '(', 'lookup_res', ')', 'while', 'True', ':', 'line', '=', 'next', '(', 'lookup_res', ')', 'line', '=', 'line', '.', 'strip', '(', ')', 'if', 'not', 'line', 'or', 'line', '.', 'startswith', '(', "';;'", ')', ':', 'break', 'l_type', ',', 'l_rec', '=', 'line', '.', 'split', '(', 'None', ',', '4', ')', '[', '-', '2', ':', ']', 'if', 'l_type', '==', "'CNAME'", 'and', 'rdtype', '!=', "'CNAME'", ':', 'continue', 'elif', 'l_type', '==', "'RRSIG'", ':', 'validated', '=', 'True', 'continue', 'elif', 'l_type', '!=', 'rdtype', ':', 'raise', 'ValueError', '(', "'Invalid DNS type {}'", '.', 'format', '(', 'rdtype', ')', ')', 'res', '.', 'append', '(', '_data_clean', '(', 'l_rec', ')', ')', 'except', 'StopIteration', ':', 'pass', 'if', 'res', 'and', 'secure', 'and', 'not', 'validated', ':', 'return', 'False', 'else', ':', 'return', 'res'] | Use drill to lookup addresses
:param name: Name of record to search
:param rdtype: DNS record type
:param timeout: command return timeout
:param servers: [] of servers to use
:return: [] of records or False if error | ['Use', 'drill', 'to', 'lookup', 'addresses', ':', 'param', 'name', ':', 'Name', 'of', 'record', 'to', 'search', ':', 'param', 'rdtype', ':', 'DNS', 'record', 'type', ':', 'param', 'timeout', ':', 'command', 'return', 'timeout', ':', 'param', 'servers', ':', '[]', 'of', 'servers', 'to', 'use', ':', 'return', ':', '[]', 'of', 'records', 'or', 'False', 'if', 'error'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L322-L375 |
6,708 | rbarrois/mpdlcd | mpdlcd/mpdhooks.py | MPDHook.handle | def handle(self, client, subhooks=()):
"""Handle a new update.
Fetches new data from the client, then compares it to the previous
lookup.
Returns:
(bool, new_data): whether changes occurred, and the new value.
"""
new_data = self.fetch(client)
# Holds the list of updated fields.
updated = {}
if not subhooks:
# We always want to compare to previous values.
subhooks = [self.name]
for subhook in subhooks:
new_key = self.extract_key(new_data, subhook)
if new_key != self.previous_keys.get(subhook):
updated[subhook] = new_key
if updated:
logger.debug("Hook %s: data changed from %r to %r", self.name, self.previous_keys, updated)
self.previous_keys.update(updated)
return (True, new_data)
return (False, None) | python | def handle(self, client, subhooks=()):
"""Handle a new update.
Fetches new data from the client, then compares it to the previous
lookup.
Returns:
(bool, new_data): whether changes occurred, and the new value.
"""
new_data = self.fetch(client)
# Holds the list of updated fields.
updated = {}
if not subhooks:
# We always want to compare to previous values.
subhooks = [self.name]
for subhook in subhooks:
new_key = self.extract_key(new_data, subhook)
if new_key != self.previous_keys.get(subhook):
updated[subhook] = new_key
if updated:
logger.debug("Hook %s: data changed from %r to %r", self.name, self.previous_keys, updated)
self.previous_keys.update(updated)
return (True, new_data)
return (False, None) | ['def', 'handle', '(', 'self', ',', 'client', ',', 'subhooks', '=', '(', ')', ')', ':', 'new_data', '=', 'self', '.', 'fetch', '(', 'client', ')', '# Holds the list of updated fields.', 'updated', '=', '{', '}', 'if', 'not', 'subhooks', ':', '# We always want to compare to previous values.', 'subhooks', '=', '[', 'self', '.', 'name', ']', 'for', 'subhook', 'in', 'subhooks', ':', 'new_key', '=', 'self', '.', 'extract_key', '(', 'new_data', ',', 'subhook', ')', 'if', 'new_key', '!=', 'self', '.', 'previous_keys', '.', 'get', '(', 'subhook', ')', ':', 'updated', '[', 'subhook', ']', '=', 'new_key', 'if', 'updated', ':', 'logger', '.', 'debug', '(', '"Hook %s: data changed from %r to %r"', ',', 'self', '.', 'name', ',', 'self', '.', 'previous_keys', ',', 'updated', ')', 'self', '.', 'previous_keys', '.', 'update', '(', 'updated', ')', 'return', '(', 'True', ',', 'new_data', ')', 'return', '(', 'False', ',', 'None', ')'] | Handle a new update.
Fetches new data from the client, then compares it to the previous
lookup.
Returns:
(bool, new_data): whether changes occurred, and the new value. | ['Handle', 'a', 'new', 'update', '.'] | train | https://github.com/rbarrois/mpdlcd/blob/85f16c8cc0883f8abb4c2cc7f69729c3e2f857da/mpdlcd/mpdhooks.py#L68-L96 |
6,709 | XRDX/pyleap | pyleap/util.py | repeat | def repeat(f, dt=1/60):
""" 重复执行函数f,时间间隔dt """
stop(f)
pyglet.clock.schedule_interval(f, dt) | python | def repeat(f, dt=1/60):
""" 重复执行函数f,时间间隔dt """
stop(f)
pyglet.clock.schedule_interval(f, dt) | ['def', 'repeat', '(', 'f', ',', 'dt', '=', '1', '/', '60', ')', ':', 'stop', '(', 'f', ')', 'pyglet', '.', 'clock', '.', 'schedule_interval', '(', 'f', ',', 'dt', ')'] | 重复执行函数f,时间间隔dt | ['重复执行函数f,时间间隔dt'] | train | https://github.com/XRDX/pyleap/blob/234c722cfbe66814254ab0d8f67d16b0b774f4d5/pyleap/util.py#L13-L16 |
6,710 | mezz64/pyEmby | pyemby/server.py | EmbyServer.api_request | async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None | python | async def api_request(self, url, params):
"""Make api fetch request."""
request = None
try:
with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop):
request = await self._api_session.get(
url, params=params)
if request.status != 200:
_LOGGER.error('Error fetching Emby data: %s', request.status)
return None
request_json = await request.json()
if 'error' in request_json:
_LOGGER.error('Error converting Emby data to json: %s: %s',
request_json['error']['code'],
request_json['error']['message'])
return None
return request_json
except (aiohttp.ClientError, asyncio.TimeoutError,
ConnectionRefusedError) as err:
_LOGGER.error('Error fetching Emby data: %s', err)
return None | ['async', 'def', 'api_request', '(', 'self', ',', 'url', ',', 'params', ')', ':', 'request', '=', 'None', 'try', ':', 'with', 'async_timeout', '.', 'timeout', '(', 'DEFAULT_TIMEOUT', ',', 'loop', '=', 'self', '.', '_event_loop', ')', ':', 'request', '=', 'await', 'self', '.', '_api_session', '.', 'get', '(', 'url', ',', 'params', '=', 'params', ')', 'if', 'request', '.', 'status', '!=', '200', ':', '_LOGGER', '.', 'error', '(', "'Error fetching Emby data: %s'", ',', 'request', '.', 'status', ')', 'return', 'None', 'request_json', '=', 'await', 'request', '.', 'json', '(', ')', 'if', "'error'", 'in', 'request_json', ':', '_LOGGER', '.', 'error', '(', "'Error converting Emby data to json: %s: %s'", ',', 'request_json', '[', "'error'", ']', '[', "'code'", ']', ',', 'request_json', '[', "'error'", ']', '[', "'message'", ']', ')', 'return', 'None', 'return', 'request_json', 'except', '(', 'aiohttp', '.', 'ClientError', ',', 'asyncio', '.', 'TimeoutError', ',', 'ConnectionRefusedError', ')', 'as', 'err', ':', '_LOGGER', '.', 'error', '(', "'Error fetching Emby data: %s'", ',', 'err', ')', 'return', 'None'] | Make api fetch request. | ['Make', 'api', 'fetch', 'request', '.'] | train | https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L232-L253 |
6,711 | myusuf3/delorean | delorean/interface.py | range_monthly | def range_monthly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
MONTHLY stops
"""
return stops(start=start, stop=stop, freq=MONTHLY, timezone=timezone, count=count) | python | def range_monthly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
MONTHLY stops
"""
return stops(start=start, stop=stop, freq=MONTHLY, timezone=timezone, count=count) | ['def', 'range_monthly', '(', 'start', '=', 'None', ',', 'stop', '=', 'None', ',', 'timezone', '=', "'UTC'", ',', 'count', '=', 'None', ')', ':', 'return', 'stops', '(', 'start', '=', 'start', ',', 'stop', '=', 'stop', ',', 'freq', '=', 'MONTHLY', ',', 'timezone', '=', 'timezone', ',', 'count', '=', 'count', ')'] | This an alternative way to generating sets of Delorean objects with
MONTHLY stops | ['This', 'an', 'alternative', 'way', 'to', 'generating', 'sets', 'of', 'Delorean', 'objects', 'with', 'MONTHLY', 'stops'] | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/interface.py#L124-L129 |
6,712 | suds-community/suds | suds/bindings/binding.py | Binding.envelope | def envelope(self, header, body):
"""
Build the B{<Envelope/>} for a SOAP outbound message.
@param header: The SOAP message B{header}.
@type header: L{Element}
@param body: The SOAP message B{body}.
@type body: L{Element}
@return: The SOAP envelope containing the body and header.
@rtype: L{Element}
"""
env = Element("Envelope", ns=envns)
env.addPrefix(Namespace.xsins[0], Namespace.xsins[1])
env.append(header)
env.append(body)
return env | python | def envelope(self, header, body):
"""
Build the B{<Envelope/>} for a SOAP outbound message.
@param header: The SOAP message B{header}.
@type header: L{Element}
@param body: The SOAP message B{body}.
@type body: L{Element}
@return: The SOAP envelope containing the body and header.
@rtype: L{Element}
"""
env = Element("Envelope", ns=envns)
env.addPrefix(Namespace.xsins[0], Namespace.xsins[1])
env.append(header)
env.append(body)
return env | ['def', 'envelope', '(', 'self', ',', 'header', ',', 'body', ')', ':', 'env', '=', 'Element', '(', '"Envelope"', ',', 'ns', '=', 'envns', ')', 'env', '.', 'addPrefix', '(', 'Namespace', '.', 'xsins', '[', '0', ']', ',', 'Namespace', '.', 'xsins', '[', '1', ']', ')', 'env', '.', 'append', '(', 'header', ')', 'env', '.', 'append', '(', 'body', ')', 'return', 'env'] | Build the B{<Envelope/>} for a SOAP outbound message.
@param header: The SOAP message B{header}.
@type header: L{Element}
@param body: The SOAP message B{body}.
@type body: L{Element}
@return: The SOAP envelope containing the body and header.
@rtype: L{Element} | ['Build', 'the', 'B', '{', '<Envelope', '/', '>', '}', 'for', 'a', 'SOAP', 'outbound', 'message', '.'] | train | https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/bindings/binding.py#L267-L283 |
6,713 | DistrictDataLabs/yellowbrick | yellowbrick/model_selection/validation_curve.py | ValidationCurve.fit | def fit(self, X, y=None):
"""
Fits the validation curve with the wrapped estimator and parameter
array to the specified data. Draws training and test score curves and
saves the scores to the visualizer.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
self : instance
Returns the instance of the validation curve visualizer for use in
pipelines and other sequential transformers.
"""
# arguments to pass to sk_validation_curve
skvc_kwargs = {
key: self.get_params()[key]
for key in (
'param_name', 'param_range', 'groups', 'cv', 'scoring',
'n_jobs', 'pre_dispatch',
)
}
# compute the validation curve and store scores
curve = sk_validation_curve(self.estimator, X, y, **skvc_kwargs)
self.train_scores_, self.test_scores_ = curve
# compute the mean and standard deviation of the training data
self.train_scores_mean_ = np.mean(self.train_scores_, axis=1)
self.train_scores_std_ = np.std(self.train_scores_, axis=1)
# compute the mean and standard deviation of the test data
self.test_scores_mean_ = np.mean(self.test_scores_, axis=1)
self.test_scores_std_ = np.std(self.test_scores_, axis=1)
# draw the curves on the current axes
self.draw()
return self | python | def fit(self, X, y=None):
"""
Fits the validation curve with the wrapped estimator and parameter
array to the specified data. Draws training and test score curves and
saves the scores to the visualizer.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
self : instance
Returns the instance of the validation curve visualizer for use in
pipelines and other sequential transformers.
"""
# arguments to pass to sk_validation_curve
skvc_kwargs = {
key: self.get_params()[key]
for key in (
'param_name', 'param_range', 'groups', 'cv', 'scoring',
'n_jobs', 'pre_dispatch',
)
}
# compute the validation curve and store scores
curve = sk_validation_curve(self.estimator, X, y, **skvc_kwargs)
self.train_scores_, self.test_scores_ = curve
# compute the mean and standard deviation of the training data
self.train_scores_mean_ = np.mean(self.train_scores_, axis=1)
self.train_scores_std_ = np.std(self.train_scores_, axis=1)
# compute the mean and standard deviation of the test data
self.test_scores_mean_ = np.mean(self.test_scores_, axis=1)
self.test_scores_std_ = np.std(self.test_scores_, axis=1)
# draw the curves on the current axes
self.draw()
return self | ['def', 'fit', '(', 'self', ',', 'X', ',', 'y', '=', 'None', ')', ':', '# arguments to pass to sk_validation_curve', 'skvc_kwargs', '=', '{', 'key', ':', 'self', '.', 'get_params', '(', ')', '[', 'key', ']', 'for', 'key', 'in', '(', "'param_name'", ',', "'param_range'", ',', "'groups'", ',', "'cv'", ',', "'scoring'", ',', "'n_jobs'", ',', "'pre_dispatch'", ',', ')', '}', '# compute the validation curve and store scores', 'curve', '=', 'sk_validation_curve', '(', 'self', '.', 'estimator', ',', 'X', ',', 'y', ',', '*', '*', 'skvc_kwargs', ')', 'self', '.', 'train_scores_', ',', 'self', '.', 'test_scores_', '=', 'curve', '# compute the mean and standard deviation of the training data', 'self', '.', 'train_scores_mean_', '=', 'np', '.', 'mean', '(', 'self', '.', 'train_scores_', ',', 'axis', '=', '1', ')', 'self', '.', 'train_scores_std_', '=', 'np', '.', 'std', '(', 'self', '.', 'train_scores_', ',', 'axis', '=', '1', ')', '# compute the mean and standard deviation of the test data', 'self', '.', 'test_scores_mean_', '=', 'np', '.', 'mean', '(', 'self', '.', 'test_scores_', ',', 'axis', '=', '1', ')', 'self', '.', 'test_scores_std_', '=', 'np', '.', 'std', '(', 'self', '.', 'test_scores_', ',', 'axis', '=', '1', ')', '# draw the curves on the current axes', 'self', '.', 'draw', '(', ')', 'return', 'self'] | Fits the validation curve with the wrapped estimator and parameter
array to the specified data. Draws training and test score curves and
saves the scores to the visualizer.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
self : instance
Returns the instance of the validation curve visualizer for use in
pipelines and other sequential transformers. | ['Fits', 'the', 'validation', 'curve', 'with', 'the', 'wrapped', 'estimator', 'and', 'parameter', 'array', 'to', 'the', 'specified', 'data', '.', 'Draws', 'training', 'and', 'test', 'score', 'curves', 'and', 'saves', 'the', 'scores', 'to', 'the', 'visualizer', '.'] | train | https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/model_selection/validation_curve.py#L172-L217 |
6,714 | GNS3/gns3-server | gns3server/compute/base_node.py | BaseNode.create | def create(self):
"""
Creates the node.
"""
log.info("{module}: {name} [{id}] created".format(module=self.manager.module_name,
name=self.name,
id=self.id)) | python | def create(self):
"""
Creates the node.
"""
log.info("{module}: {name} [{id}] created".format(module=self.manager.module_name,
name=self.name,
id=self.id)) | ['def', 'create', '(', 'self', ')', ':', 'log', '.', 'info', '(', '"{module}: {name} [{id}] created"', '.', 'format', '(', 'module', '=', 'self', '.', 'manager', '.', 'module_name', ',', 'name', '=', 'self', '.', 'name', ',', 'id', '=', 'self', '.', 'id', ')', ')'] | Creates the node. | ['Creates', 'the', 'node', '.'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/base_node.py#L254-L261 |
6,715 | peakwinter/python-nginx | nginx.py | Container.add | def add(self, *args):
"""
Add object(s) to the Container.
:param *args: Any objects to add to the Container.
:returns: full list of Container's child objects
"""
self.children.extend(args)
bump_child_depth(self, self._depth)
return self.children | python | def add(self, *args):
"""
Add object(s) to the Container.
:param *args: Any objects to add to the Container.
:returns: full list of Container's child objects
"""
self.children.extend(args)
bump_child_depth(self, self._depth)
return self.children | ['def', 'add', '(', 'self', ',', '*', 'args', ')', ':', 'self', '.', 'children', '.', 'extend', '(', 'args', ')', 'bump_child_depth', '(', 'self', ',', 'self', '.', '_depth', ')', 'return', 'self', '.', 'children'] | Add object(s) to the Container.
:param *args: Any objects to add to the Container.
:returns: full list of Container's child objects | ['Add', 'object', '(', 's', ')', 'to', 'the', 'Container', '.'] | train | https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L133-L142 |
6,716 | splunk/splunk-sdk-python | splunklib/searchcommands/search_command.py | SearchCommand.process | def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
""" Process data.
:param argv: Command line arguments.
:type argv: list or tuple
:param ifile: Input data file.
:type ifile: file
:param ofile: Output data file.
:type ofile: file
:return: :const:`None`
:rtype: NoneType
"""
if len(argv) > 1:
self._process_protocol_v1(argv, ifile, ofile)
else:
self._process_protocol_v2(argv, ifile, ofile) | python | def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
""" Process data.
:param argv: Command line arguments.
:type argv: list or tuple
:param ifile: Input data file.
:type ifile: file
:param ofile: Output data file.
:type ofile: file
:return: :const:`None`
:rtype: NoneType
"""
if len(argv) > 1:
self._process_protocol_v1(argv, ifile, ofile)
else:
self._process_protocol_v2(argv, ifile, ofile) | ['def', 'process', '(', 'self', ',', 'argv', '=', 'sys', '.', 'argv', ',', 'ifile', '=', 'sys', '.', 'stdin', ',', 'ofile', '=', 'sys', '.', 'stdout', ')', ':', 'if', 'len', '(', 'argv', ')', '>', '1', ':', 'self', '.', '_process_protocol_v1', '(', 'argv', ',', 'ifile', ',', 'ofile', ')', 'else', ':', 'self', '.', '_process_protocol_v2', '(', 'argv', ',', 'ifile', ',', 'ofile', ')'] | Process data.
:param argv: Command line arguments.
:type argv: list or tuple
:param ifile: Input data file.
:type ifile: file
:param ofile: Output data file.
:type ofile: file
:return: :const:`None`
:rtype: NoneType | ['Process', 'data', '.'] | train | https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/searchcommands/search_command.py#L415-L434 |
6,717 | dddomodossola/remi | editor/editor_widgets.py | ProjectConfigurationDialog.confirm_dialog | def confirm_dialog(self, emitter):
"""event called pressing on OK button.
"""
#here the user input is transferred to the dict, ready to use
self.from_fields_to_dict()
return super(ProjectConfigurationDialog,self).confirm_dialog(self) | python | def confirm_dialog(self, emitter):
"""event called pressing on OK button.
"""
#here the user input is transferred to the dict, ready to use
self.from_fields_to_dict()
return super(ProjectConfigurationDialog,self).confirm_dialog(self) | ['def', 'confirm_dialog', '(', 'self', ',', 'emitter', ')', ':', '#here the user input is transferred to the dict, ready to use', 'self', '.', 'from_fields_to_dict', '(', ')', 'return', 'super', '(', 'ProjectConfigurationDialog', ',', 'self', ')', '.', 'confirm_dialog', '(', 'self', ')'] | event called pressing on OK button. | ['event', 'called', 'pressing', 'on', 'OK', 'button', '.'] | train | https://github.com/dddomodossola/remi/blob/85206f62220662bb7ecd471042268def71ccad28/editor/editor_widgets.py#L256-L261 |
6,718 | tensorflow/tensor2tensor | tensor2tensor/utils/cloud_mlengine.py | _tar_and_copy | def _tar_and_copy(src_dir, target_dir):
"""Tar and gzip src_dir and copy to GCS target_dir."""
src_dir = src_dir.rstrip("/")
target_dir = target_dir.rstrip("/")
tmp_dir = tempfile.gettempdir().rstrip("/")
src_base = os.path.basename(src_dir)
shell_run(
"tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} .",
src_dir=src_dir,
src_base=src_base,
tmp_dir=tmp_dir)
final_destination = "%s/%s.tar.gz" % (target_dir, src_base)
shell_run(
("gsutil cp {tmp_dir}/{src_base}.tar.gz "
"{final_destination}"),
tmp_dir=tmp_dir,
src_base=src_base,
final_destination=final_destination)
return final_destination | python | def _tar_and_copy(src_dir, target_dir):
"""Tar and gzip src_dir and copy to GCS target_dir."""
src_dir = src_dir.rstrip("/")
target_dir = target_dir.rstrip("/")
tmp_dir = tempfile.gettempdir().rstrip("/")
src_base = os.path.basename(src_dir)
shell_run(
"tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} .",
src_dir=src_dir,
src_base=src_base,
tmp_dir=tmp_dir)
final_destination = "%s/%s.tar.gz" % (target_dir, src_base)
shell_run(
("gsutil cp {tmp_dir}/{src_base}.tar.gz "
"{final_destination}"),
tmp_dir=tmp_dir,
src_base=src_base,
final_destination=final_destination)
return final_destination | ['def', '_tar_and_copy', '(', 'src_dir', ',', 'target_dir', ')', ':', 'src_dir', '=', 'src_dir', '.', 'rstrip', '(', '"/"', ')', 'target_dir', '=', 'target_dir', '.', 'rstrip', '(', '"/"', ')', 'tmp_dir', '=', 'tempfile', '.', 'gettempdir', '(', ')', '.', 'rstrip', '(', '"/"', ')', 'src_base', '=', 'os', '.', 'path', '.', 'basename', '(', 'src_dir', ')', 'shell_run', '(', '"tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} ."', ',', 'src_dir', '=', 'src_dir', ',', 'src_base', '=', 'src_base', ',', 'tmp_dir', '=', 'tmp_dir', ')', 'final_destination', '=', '"%s/%s.tar.gz"', '%', '(', 'target_dir', ',', 'src_base', ')', 'shell_run', '(', '(', '"gsutil cp {tmp_dir}/{src_base}.tar.gz "', '"{final_destination}"', ')', ',', 'tmp_dir', '=', 'tmp_dir', ',', 'src_base', '=', 'src_base', ',', 'final_destination', '=', 'final_destination', ')', 'return', 'final_destination'] | Tar and gzip src_dir and copy to GCS target_dir. | ['Tar', 'and', 'gzip', 'src_dir', 'and', 'copy', 'to', 'GCS', 'target_dir', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L184-L202 |
6,719 | siznax/wptools | wptools/query.py | WPToolsQuery.set_status | def set_status(self, action, target):
"""
Sets query status with format: "{domain} ({action}) {target}"
"""
try:
target = unquote(target)
except (AttributeError, TypeError):
pass
status = "%s (%s) %s" % (self.domain, action, target)
status = status.strip().replace('\n', '')
if len(status) >= self.MAXWIDTH:
tail = '...'
extent = self.MAXWIDTH - (len(tail) + self.RPAD)
self.status = status[:extent] + tail
else:
self.status = status | python | def set_status(self, action, target):
"""
Sets query status with format: "{domain} ({action}) {target}"
"""
try:
target = unquote(target)
except (AttributeError, TypeError):
pass
status = "%s (%s) %s" % (self.domain, action, target)
status = status.strip().replace('\n', '')
if len(status) >= self.MAXWIDTH:
tail = '...'
extent = self.MAXWIDTH - (len(tail) + self.RPAD)
self.status = status[:extent] + tail
else:
self.status = status | ['def', 'set_status', '(', 'self', ',', 'action', ',', 'target', ')', ':', 'try', ':', 'target', '=', 'unquote', '(', 'target', ')', 'except', '(', 'AttributeError', ',', 'TypeError', ')', ':', 'pass', 'status', '=', '"%s (%s) %s"', '%', '(', 'self', '.', 'domain', ',', 'action', ',', 'target', ')', 'status', '=', 'status', '.', 'strip', '(', ')', '.', 'replace', '(', "'\\n'", ',', "''", ')', 'if', 'len', '(', 'status', ')', '>=', 'self', '.', 'MAXWIDTH', ':', 'tail', '=', "'...'", 'extent', '=', 'self', '.', 'MAXWIDTH', '-', '(', 'len', '(', 'tail', ')', '+', 'self', '.', 'RPAD', ')', 'self', '.', 'status', '=', 'status', '[', ':', 'extent', ']', '+', 'tail', 'else', ':', 'self', '.', 'status', '=', 'status'] | Sets query status with format: "{domain} ({action}) {target}" | ['Sets', 'query', 'status', 'with', 'format', ':', '{', 'domain', '}', '(', '{', 'action', '}', ')', '{', 'target', '}'] | train | https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/query.py#L310-L327 |
6,720 | mgedmin/check-manifest | check_manifest.py | strip_toplevel_name | def strip_toplevel_name(filelist):
"""Strip toplevel name from a file list.
>>> strip_toplevel_name(['a', 'a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
>>> strip_toplevel_name(['a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
"""
if not filelist:
return filelist
prefix = filelist[0]
if '/' in prefix:
prefix = prefix.partition('/')[0] + '/'
names = filelist
else:
prefix = prefix + '/'
names = filelist[1:]
for name in names:
if not name.startswith(prefix):
raise Failure("File doesn't have the common prefix (%s): %s"
% (name, prefix))
return [name[len(prefix):] for name in names] | python | def strip_toplevel_name(filelist):
"""Strip toplevel name from a file list.
>>> strip_toplevel_name(['a', 'a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
>>> strip_toplevel_name(['a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
"""
if not filelist:
return filelist
prefix = filelist[0]
if '/' in prefix:
prefix = prefix.partition('/')[0] + '/'
names = filelist
else:
prefix = prefix + '/'
names = filelist[1:]
for name in names:
if not name.startswith(prefix):
raise Failure("File doesn't have the common prefix (%s): %s"
% (name, prefix))
return [name[len(prefix):] for name in names] | ['def', 'strip_toplevel_name', '(', 'filelist', ')', ':', 'if', 'not', 'filelist', ':', 'return', 'filelist', 'prefix', '=', 'filelist', '[', '0', ']', 'if', "'/'", 'in', 'prefix', ':', 'prefix', '=', 'prefix', '.', 'partition', '(', "'/'", ')', '[', '0', ']', '+', "'/'", 'names', '=', 'filelist', 'else', ':', 'prefix', '=', 'prefix', '+', "'/'", 'names', '=', 'filelist', '[', '1', ':', ']', 'for', 'name', 'in', 'names', ':', 'if', 'not', 'name', '.', 'startswith', '(', 'prefix', ')', ':', 'raise', 'Failure', '(', '"File doesn\'t have the common prefix (%s): %s"', '%', '(', 'name', ',', 'prefix', ')', ')', 'return', '[', 'name', '[', 'len', '(', 'prefix', ')', ':', ']', 'for', 'name', 'in', 'names', ']'] | Strip toplevel name from a file list.
>>> strip_toplevel_name(['a', 'a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
>>> strip_toplevel_name(['a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d'] | ['Strip', 'toplevel', 'name', 'from', 'a', 'file', 'list', '.'] | train | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L280-L303 |
6,721 | mattrobenolt/ec2 | ec2/base.py | objects_base.get | def get(cls, **kwargs):
"""
Generic get() for one item only
>>> ec2.instances.get(name='production-web-01')
<Instance: ...>
"""
things = cls.filter(**kwargs)
if len(things) > 1:
# Raise an exception if more than one object is matched
raise cls.MultipleObjectsReturned
elif len(things) == 0:
# Rase an exception if no objects were matched
raise cls.DoesNotExist
return things[0] | python | def get(cls, **kwargs):
"""
Generic get() for one item only
>>> ec2.instances.get(name='production-web-01')
<Instance: ...>
"""
things = cls.filter(**kwargs)
if len(things) > 1:
# Raise an exception if more than one object is matched
raise cls.MultipleObjectsReturned
elif len(things) == 0:
# Rase an exception if no objects were matched
raise cls.DoesNotExist
return things[0] | ['def', 'get', '(', 'cls', ',', '*', '*', 'kwargs', ')', ':', 'things', '=', 'cls', '.', 'filter', '(', '*', '*', 'kwargs', ')', 'if', 'len', '(', 'things', ')', '>', '1', ':', '# Raise an exception if more than one object is matched', 'raise', 'cls', '.', 'MultipleObjectsReturned', 'elif', 'len', '(', 'things', ')', '==', '0', ':', '# Rase an exception if no objects were matched', 'raise', 'cls', '.', 'DoesNotExist', 'return', 'things', '[', '0', ']'] | Generic get() for one item only
>>> ec2.instances.get(name='production-web-01')
<Instance: ...> | ['Generic', 'get', '()', 'for', 'one', 'item', 'only'] | train | https://github.com/mattrobenolt/ec2/blob/fc1f8bce6cf76899165d9ac006371181d52439f8/ec2/base.py#L40-L54 |
6,722 | aerkalov/ebooklib | ebooklib/epub.py | EpubBook.get_item_with_href | def get_item_with_href(self, href):
"""
Returns item for defined HREF.
>>> book.get_item_with_href('EPUB/document.xhtml')
:Args:
- href: HREF for the item we are searching for
:Returns:
Returns item object. Returns None if nothing was found.
"""
for item in self.get_items():
if item.get_name() == href:
return item
return None | python | def get_item_with_href(self, href):
"""
Returns item for defined HREF.
>>> book.get_item_with_href('EPUB/document.xhtml')
:Args:
- href: HREF for the item we are searching for
:Returns:
Returns item object. Returns None if nothing was found.
"""
for item in self.get_items():
if item.get_name() == href:
return item
return None | ['def', 'get_item_with_href', '(', 'self', ',', 'href', ')', ':', 'for', 'item', 'in', 'self', '.', 'get_items', '(', ')', ':', 'if', 'item', '.', 'get_name', '(', ')', '==', 'href', ':', 'return', 'item', 'return', 'None'] | Returns item for defined HREF.
>>> book.get_item_with_href('EPUB/document.xhtml')
:Args:
- href: HREF for the item we are searching for
:Returns:
Returns item object. Returns None if nothing was found. | ['Returns', 'item', 'for', 'defined', 'HREF', '.'] | train | https://github.com/aerkalov/ebooklib/blob/305f2dd7f02923ffabf9586a5d16266113d00c4a/ebooklib/epub.py#L763-L779 |
6,723 | yamcs/yamcs-python | yamcs-client/yamcs/tmtc/model.py | Alarm.acknowledge_message | def acknowledge_message(self):
"""Comment provided when acknowledging the alarm."""
if (self.is_acknowledged and
self._proto.acknowledgeInfo.HasField('acknowledgeMessage')):
return self._proto.acknowledgeInfo.acknowledgeMessage
return None | python | def acknowledge_message(self):
"""Comment provided when acknowledging the alarm."""
if (self.is_acknowledged and
self._proto.acknowledgeInfo.HasField('acknowledgeMessage')):
return self._proto.acknowledgeInfo.acknowledgeMessage
return None | ['def', 'acknowledge_message', '(', 'self', ')', ':', 'if', '(', 'self', '.', 'is_acknowledged', 'and', 'self', '.', '_proto', '.', 'acknowledgeInfo', '.', 'HasField', '(', "'acknowledgeMessage'", ')', ')', ':', 'return', 'self', '.', '_proto', '.', 'acknowledgeInfo', '.', 'acknowledgeMessage', 'return', 'None'] | Comment provided when acknowledging the alarm. | ['Comment', 'provided', 'when', 'acknowledging', 'the', 'alarm', '.'] | train | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L323-L328 |
6,724 | iotile/coretools | iotilecore/iotile/core/dev/semver.py | SemanticVersionRange._check_insersection | def _check_insersection(self, version, ranges):
"""Check that a version is inside all of a list of ranges"""
for ver_range in ranges:
if not self._check_ver_range(version, ver_range):
return False
return True | python | def _check_insersection(self, version, ranges):
"""Check that a version is inside all of a list of ranges"""
for ver_range in ranges:
if not self._check_ver_range(version, ver_range):
return False
return True | ['def', '_check_insersection', '(', 'self', ',', 'version', ',', 'ranges', ')', ':', 'for', 'ver_range', 'in', 'ranges', ':', 'if', 'not', 'self', '.', '_check_ver_range', '(', 'version', ',', 'ver_range', ')', ':', 'return', 'False', 'return', 'True'] | Check that a version is inside all of a list of ranges | ['Check', 'that', 'a', 'version', 'is', 'inside', 'all', 'of', 'a', 'list', 'of', 'ranges'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/dev/semver.py#L333-L340 |
6,725 | bjmorgan/lattice_mc | lattice_mc/lattice.py | Lattice.enforce_periodic_boundary_conditions | def enforce_periodic_boundary_conditions( self ):
"""
Ensure that all lattice sites are within the central periodic image of the simulation cell.
Sites that are outside the central simulation cell are mapped back into this cell.
Args:
None
Returns:
None
"""
for s in self.sites:
for i in range(3):
if s.r[i] < 0.0:
s.r[i] += self.cell_lengths[i]
if s.r[i] > self.cell_lengths[i]:
s.r[i] -= self.cell_lengths[i] | python | def enforce_periodic_boundary_conditions( self ):
"""
Ensure that all lattice sites are within the central periodic image of the simulation cell.
Sites that are outside the central simulation cell are mapped back into this cell.
Args:
None
Returns:
None
"""
for s in self.sites:
for i in range(3):
if s.r[i] < 0.0:
s.r[i] += self.cell_lengths[i]
if s.r[i] > self.cell_lengths[i]:
s.r[i] -= self.cell_lengths[i] | ['def', 'enforce_periodic_boundary_conditions', '(', 'self', ')', ':', 'for', 's', 'in', 'self', '.', 'sites', ':', 'for', 'i', 'in', 'range', '(', '3', ')', ':', 'if', 's', '.', 'r', '[', 'i', ']', '<', '0.0', ':', 's', '.', 'r', '[', 'i', ']', '+=', 'self', '.', 'cell_lengths', '[', 'i', ']', 'if', 's', '.', 'r', '[', 'i', ']', '>', 'self', '.', 'cell_lengths', '[', 'i', ']', ':', 's', '.', 'r', '[', 'i', ']', '-=', 'self', '.', 'cell_lengths', '[', 'i', ']'] | Ensure that all lattice sites are within the central periodic image of the simulation cell.
Sites that are outside the central simulation cell are mapped back into this cell.
Args:
None
Returns:
None | ['Ensure', 'that', 'all', 'lattice', 'sites', 'are', 'within', 'the', 'central', 'periodic', 'image', 'of', 'the', 'simulation', 'cell', '.', 'Sites', 'that', 'are', 'outside', 'the', 'central', 'simulation', 'cell', 'are', 'mapped', 'back', 'into', 'this', 'cell', '.', 'Args', ':', 'None'] | train | https://github.com/bjmorgan/lattice_mc/blob/7fa7be85f2f23a2d8dfd0830ecdb89d0dbf2bfd5/lattice_mc/lattice.py#L41-L57 |
6,726 | bububa/pyTOP | pyTOP/simba.py | CampaignArea.get | def get(self, campaign_id, nick=None):
'''xxxxx.xxxxx.campaign.area.get
===================================
取得一个推广计划的投放地域设置'''
request = TOPRequest('xxxxx.xxxxx.campaign.area.get')
request['campaign_id'] = campaign_id
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignArea})
return self.result | python | def get(self, campaign_id, nick=None):
'''xxxxx.xxxxx.campaign.area.get
===================================
取得一个推广计划的投放地域设置'''
request = TOPRequest('xxxxx.xxxxx.campaign.area.get')
request['campaign_id'] = campaign_id
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignArea})
return self.result | ['def', 'get', '(', 'self', ',', 'campaign_id', ',', 'nick', '=', 'None', ')', ':', 'request', '=', 'TOPRequest', '(', "'xxxxx.xxxxx.campaign.area.get'", ')', 'request', '[', "'campaign_id'", ']', '=', 'campaign_id', 'if', 'nick', '!=', 'None', ':', 'request', '[', "'nick'", ']', '=', 'nick', 'self', '.', 'create', '(', 'self', '.', 'execute', '(', 'request', ')', ',', 'fields', '=', '[', "'success'", ',', "'result'", ',', "'success'", ',', "'result_code'", ',', "'result_message'", ']', ',', 'models', '=', '{', "'result'", ':', 'CampaignArea', '}', ')', 'return', 'self', '.', 'result'] | xxxxx.xxxxx.campaign.area.get
===================================
取得一个推广计划的投放地域设置 | ['xxxxx', '.', 'xxxxx', '.', 'campaign', '.', 'area', '.', 'get', '===================================', '取得一个推广计划的投放地域设置'] | train | https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/simba.py#L86-L94 |
6,727 | dh1tw/pyhamtools | pyhamtools/dxcluster.py | decode_pc11_message | def decode_pc11_message(raw_string):
"""Decode PC11 message, which usually contains DX Spots"""
data = {}
spot = raw_string.split("^")
data[const.FREQUENCY] = float(spot[1])
data[const.DX] = spot[2]
data[const.TIME] = datetime.fromtimestamp(mktime(strptime(spot[3]+" "+spot[4][:-1], "%d-%b-%Y %H%M")))
data[const.COMMENT] = spot[5]
data[const.SPOTTER] = spot[6]
data["node"] = spot[7]
data["raw_spot"] = raw_string
return data | python | def decode_pc11_message(raw_string):
"""Decode PC11 message, which usually contains DX Spots"""
data = {}
spot = raw_string.split("^")
data[const.FREQUENCY] = float(spot[1])
data[const.DX] = spot[2]
data[const.TIME] = datetime.fromtimestamp(mktime(strptime(spot[3]+" "+spot[4][:-1], "%d-%b-%Y %H%M")))
data[const.COMMENT] = spot[5]
data[const.SPOTTER] = spot[6]
data["node"] = spot[7]
data["raw_spot"] = raw_string
return data | ['def', 'decode_pc11_message', '(', 'raw_string', ')', ':', 'data', '=', '{', '}', 'spot', '=', 'raw_string', '.', 'split', '(', '"^"', ')', 'data', '[', 'const', '.', 'FREQUENCY', ']', '=', 'float', '(', 'spot', '[', '1', ']', ')', 'data', '[', 'const', '.', 'DX', ']', '=', 'spot', '[', '2', ']', 'data', '[', 'const', '.', 'TIME', ']', '=', 'datetime', '.', 'fromtimestamp', '(', 'mktime', '(', 'strptime', '(', 'spot', '[', '3', ']', '+', '" "', '+', 'spot', '[', '4', ']', '[', ':', '-', '1', ']', ',', '"%d-%b-%Y %H%M"', ')', ')', ')', 'data', '[', 'const', '.', 'COMMENT', ']', '=', 'spot', '[', '5', ']', 'data', '[', 'const', '.', 'SPOTTER', ']', '=', 'spot', '[', '6', ']', 'data', '[', '"node"', ']', '=', 'spot', '[', '7', ']', 'data', '[', '"raw_spot"', ']', '=', 'raw_string', 'return', 'data'] | Decode PC11 message, which usually contains DX Spots | ['Decode', 'PC11', 'message', 'which', 'usually', 'contains', 'DX', 'Spots'] | train | https://github.com/dh1tw/pyhamtools/blob/ee7e4b8732e23c298da10e07163748156c16d0fa/pyhamtools/dxcluster.py#L38-L50 |
6,728 | awslabs/sockeye | sockeye/coverage.py | get_coverage | def get_coverage(config: CoverageConfig) -> 'Coverage':
"""
Returns a Coverage instance.
:param config: Coverage configuration.
:return: Instance of Coverage.
"""
if config.type == C.COVERAGE_COUNT or config.type == C.COVERAGE_FERTILITY:
utils.check_condition(config.num_hidden == 1, "Count or fertility coverage requires coverage_num_hidden==1")
if config.type == C.GRU_TYPE:
return GRUCoverage(config.num_hidden, config.layer_normalization)
elif config.type in {C.TANH, C.SIGMOID, C.RELU, C.SOFT_RELU}:
return ActivationCoverage(config.num_hidden, config.type, config.layer_normalization)
elif config.type == C.COVERAGE_COUNT:
return CountCoverage()
elif config.type == C.COVERAGE_FERTILITY:
return FertilityCoverage(config.max_fertility)
else:
raise ValueError("Unknown coverage type %s" % config.type) | python | def get_coverage(config: CoverageConfig) -> 'Coverage':
"""
Returns a Coverage instance.
:param config: Coverage configuration.
:return: Instance of Coverage.
"""
if config.type == C.COVERAGE_COUNT or config.type == C.COVERAGE_FERTILITY:
utils.check_condition(config.num_hidden == 1, "Count or fertility coverage requires coverage_num_hidden==1")
if config.type == C.GRU_TYPE:
return GRUCoverage(config.num_hidden, config.layer_normalization)
elif config.type in {C.TANH, C.SIGMOID, C.RELU, C.SOFT_RELU}:
return ActivationCoverage(config.num_hidden, config.type, config.layer_normalization)
elif config.type == C.COVERAGE_COUNT:
return CountCoverage()
elif config.type == C.COVERAGE_FERTILITY:
return FertilityCoverage(config.max_fertility)
else:
raise ValueError("Unknown coverage type %s" % config.type) | ['def', 'get_coverage', '(', 'config', ':', 'CoverageConfig', ')', '->', "'Coverage'", ':', 'if', 'config', '.', 'type', '==', 'C', '.', 'COVERAGE_COUNT', 'or', 'config', '.', 'type', '==', 'C', '.', 'COVERAGE_FERTILITY', ':', 'utils', '.', 'check_condition', '(', 'config', '.', 'num_hidden', '==', '1', ',', '"Count or fertility coverage requires coverage_num_hidden==1"', ')', 'if', 'config', '.', 'type', '==', 'C', '.', 'GRU_TYPE', ':', 'return', 'GRUCoverage', '(', 'config', '.', 'num_hidden', ',', 'config', '.', 'layer_normalization', ')', 'elif', 'config', '.', 'type', 'in', '{', 'C', '.', 'TANH', ',', 'C', '.', 'SIGMOID', ',', 'C', '.', 'RELU', ',', 'C', '.', 'SOFT_RELU', '}', ':', 'return', 'ActivationCoverage', '(', 'config', '.', 'num_hidden', ',', 'config', '.', 'type', ',', 'config', '.', 'layer_normalization', ')', 'elif', 'config', '.', 'type', '==', 'C', '.', 'COVERAGE_COUNT', ':', 'return', 'CountCoverage', '(', ')', 'elif', 'config', '.', 'type', '==', 'C', '.', 'COVERAGE_FERTILITY', ':', 'return', 'FertilityCoverage', '(', 'config', '.', 'max_fertility', ')', 'else', ':', 'raise', 'ValueError', '(', '"Unknown coverage type %s"', '%', 'config', '.', 'type', ')'] | Returns a Coverage instance.
:param config: Coverage configuration.
:return: Instance of Coverage. | ['Returns', 'a', 'Coverage', 'instance', '.'] | train | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/coverage.py#L52-L70 |
6,729 | smarie/python-parsyfiles | parsyfiles/parsing_registries.py | ConversionFinder.get_all_conversion_chains | def get_all_conversion_chains(self, from_type: Type[Any] = JOKER, to_type: Type[Any] = JOKER)\
-> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find all converters or conversion chains matching the provided query.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact
"""
pass | python | def get_all_conversion_chains(self, from_type: Type[Any] = JOKER, to_type: Type[Any] = JOKER)\
-> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find all converters or conversion chains matching the provided query.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact
"""
pass | ['def', 'get_all_conversion_chains', '(', 'self', ',', 'from_type', ':', 'Type', '[', 'Any', ']', '=', 'JOKER', ',', 'to_type', ':', 'Type', '[', 'Any', ']', '=', 'JOKER', ')', '->', 'Tuple', '[', 'List', '[', 'Converter', ']', ',', 'List', '[', 'Converter', ']', ',', 'List', '[', 'Converter', ']', ']', ':', 'pass'] | Utility method to find all converters or conversion chains matching the provided query.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact | ['Utility', 'method', 'to', 'find', 'all', 'converters', 'or', 'conversion', 'chains', 'matching', 'the', 'provided', 'query', '.'] | train | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L943-L958 |
6,730 | jobovy/galpy | galpy/potential/MN3ExponentialDiskPotential.py | MN3ExponentialDiskPotential._z2deriv | def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second vertical derivative
HISTORY:
2015-02-07 - Written - Bovy (IAS)
"""
return self._mn3[0].z2deriv(R,z,phi=phi,t=t)\
+self._mn3[1].z2deriv(R,z,phi=phi,t=t)\
+self._mn3[2].z2deriv(R,z,phi=phi,t=t) | python | def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second vertical derivative
HISTORY:
2015-02-07 - Written - Bovy (IAS)
"""
return self._mn3[0].z2deriv(R,z,phi=phi,t=t)\
+self._mn3[1].z2deriv(R,z,phi=phi,t=t)\
+self._mn3[2].z2deriv(R,z,phi=phi,t=t) | ['def', '_z2deriv', '(', 'self', ',', 'R', ',', 'z', ',', 'phi', '=', '0.', ',', 't', '=', '0.', ')', ':', 'return', 'self', '.', '_mn3', '[', '0', ']', '.', 'z2deriv', '(', 'R', ',', 'z', ',', 'phi', '=', 'phi', ',', 't', '=', 't', ')', '+', 'self', '.', '_mn3', '[', '1', ']', '.', 'z2deriv', '(', 'R', ',', 'z', ',', 'phi', '=', 'phi', ',', 't', '=', 't', ')', '+', 'self', '.', '_mn3', '[', '2', ']', '.', 'z2deriv', '(', 'R', ',', 'z', ',', 'phi', '=', 'phi', ',', 't', '=', 't', ')'] | NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second vertical derivative
HISTORY:
2015-02-07 - Written - Bovy (IAS) | ['NAME', ':', '_z2deriv', 'PURPOSE', ':', 'evaluate', 'the', 'second', 'vertical', 'derivative', 'for', 'this', 'potential', 'INPUT', ':', 'R', '-', 'Galactocentric', 'cylindrical', 'radius', 'z', '-', 'vertical', 'height', 'phi', '-', 'azimuth', 't', '-', 'time', 'OUTPUT', ':', 'the', 'second', 'vertical', 'derivative', 'HISTORY', ':', '2015', '-', '02', '-', '07', '-', 'Written', '-', 'Bovy', '(', 'IAS', ')'] | train | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/MN3ExponentialDiskPotential.py#L221-L239 |
6,731 | cloud-custodian/cloud-custodian | tools/c7n_logexporter/c7n_logexporter/exporter.py | size | def size(config, accounts=(), day=None, group=None, human=True, region=None):
"""size of exported records for a given day."""
config = validate.callback(config)
destination = config.get('destination')
client = boto3.Session().client('s3')
day = parse(day)
def export_size(client, account):
paginator = client.get_paginator('list_objects_v2')
count = 0
size = 0
session = get_session(account['role'], region)
account_id = session.client('sts').get_caller_identity()['Account']
prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id
prefix = "%s/%s/%s" % (prefix, group, day.strftime("%Y/%m/%d"))
account['account_id'] = account_id
for page in paginator.paginate(
Bucket=destination['bucket'],
Prefix=prefix):
for k in page.get('Contents', ()):
size += k['Size']
count += 1
return (count, size)
total_size = 0
accounts_report = []
logging.getLogger('botocore').setLevel(logging.ERROR)
with ThreadPoolExecutor(max_workers=16) as w:
futures = {}
for account in config.get('accounts'):
if accounts and account['name'] not in accounts:
continue
futures[w.submit(export_size, client, account)] = account
for f in as_completed(futures):
account = futures[f]
count, size = f.result()
account.pop('role')
account.pop('groups')
total_size += size
if human:
account['size'] = GetHumanSize(size)
else:
account['size'] = size
account['count'] = count
accounts_report.append(account)
accounts_report.sort(key=operator.itemgetter('count'), reverse=True)
print(tabulate(accounts_report, headers='keys'))
log.info("total size:%s", GetHumanSize(total_size)) | python | def size(config, accounts=(), day=None, group=None, human=True, region=None):
"""size of exported records for a given day."""
config = validate.callback(config)
destination = config.get('destination')
client = boto3.Session().client('s3')
day = parse(day)
def export_size(client, account):
paginator = client.get_paginator('list_objects_v2')
count = 0
size = 0
session = get_session(account['role'], region)
account_id = session.client('sts').get_caller_identity()['Account']
prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id
prefix = "%s/%s/%s" % (prefix, group, day.strftime("%Y/%m/%d"))
account['account_id'] = account_id
for page in paginator.paginate(
Bucket=destination['bucket'],
Prefix=prefix):
for k in page.get('Contents', ()):
size += k['Size']
count += 1
return (count, size)
total_size = 0
accounts_report = []
logging.getLogger('botocore').setLevel(logging.ERROR)
with ThreadPoolExecutor(max_workers=16) as w:
futures = {}
for account in config.get('accounts'):
if accounts and account['name'] not in accounts:
continue
futures[w.submit(export_size, client, account)] = account
for f in as_completed(futures):
account = futures[f]
count, size = f.result()
account.pop('role')
account.pop('groups')
total_size += size
if human:
account['size'] = GetHumanSize(size)
else:
account['size'] = size
account['count'] = count
accounts_report.append(account)
accounts_report.sort(key=operator.itemgetter('count'), reverse=True)
print(tabulate(accounts_report, headers='keys'))
log.info("total size:%s", GetHumanSize(total_size)) | ['def', 'size', '(', 'config', ',', 'accounts', '=', '(', ')', ',', 'day', '=', 'None', ',', 'group', '=', 'None', ',', 'human', '=', 'True', ',', 'region', '=', 'None', ')', ':', 'config', '=', 'validate', '.', 'callback', '(', 'config', ')', 'destination', '=', 'config', '.', 'get', '(', "'destination'", ')', 'client', '=', 'boto3', '.', 'Session', '(', ')', '.', 'client', '(', "'s3'", ')', 'day', '=', 'parse', '(', 'day', ')', 'def', 'export_size', '(', 'client', ',', 'account', ')', ':', 'paginator', '=', 'client', '.', 'get_paginator', '(', "'list_objects_v2'", ')', 'count', '=', '0', 'size', '=', '0', 'session', '=', 'get_session', '(', 'account', '[', "'role'", ']', ',', 'region', ')', 'account_id', '=', 'session', '.', 'client', '(', "'sts'", ')', '.', 'get_caller_identity', '(', ')', '[', "'Account'", ']', 'prefix', '=', 'destination', '.', 'get', '(', "'prefix'", ',', "''", ')', '.', 'rstrip', '(', "'/'", ')', '+', "'/%s'", '%', 'account_id', 'prefix', '=', '"%s/%s/%s"', '%', '(', 'prefix', ',', 'group', ',', 'day', '.', 'strftime', '(', '"%Y/%m/%d"', ')', ')', 'account', '[', "'account_id'", ']', '=', 'account_id', 'for', 'page', 'in', 'paginator', '.', 'paginate', '(', 'Bucket', '=', 'destination', '[', "'bucket'", ']', ',', 'Prefix', '=', 'prefix', ')', ':', 'for', 'k', 'in', 'page', '.', 'get', '(', "'Contents'", ',', '(', ')', ')', ':', 'size', '+=', 'k', '[', "'Size'", ']', 'count', '+=', '1', 'return', '(', 'count', ',', 'size', ')', 'total_size', '=', '0', 'accounts_report', '=', '[', ']', 'logging', '.', 'getLogger', '(', "'botocore'", ')', '.', 'setLevel', '(', 'logging', '.', 'ERROR', ')', 'with', 'ThreadPoolExecutor', '(', 'max_workers', '=', '16', ')', 'as', 'w', ':', 'futures', '=', '{', '}', 'for', 'account', 'in', 'config', '.', 'get', '(', "'accounts'", ')', ':', 'if', 'accounts', 'and', 'account', '[', "'name'", ']', 'not', 'in', 'accounts', ':', 'continue', 'futures', '[', 'w', '.', 'submit', '(', 'export_size', ',', 'client', ',', 'account', ')', ']', '=', 'account', 'for', 'f', 'in', 'as_completed', '(', 'futures', ')', ':', 'account', '=', 'futures', '[', 'f', ']', 'count', ',', 'size', '=', 'f', '.', 'result', '(', ')', 'account', '.', 'pop', '(', "'role'", ')', 'account', '.', 'pop', '(', "'groups'", ')', 'total_size', '+=', 'size', 'if', 'human', ':', 'account', '[', "'size'", ']', '=', 'GetHumanSize', '(', 'size', ')', 'else', ':', 'account', '[', "'size'", ']', '=', 'size', 'account', '[', "'count'", ']', '=', 'count', 'accounts_report', '.', 'append', '(', 'account', ')', 'accounts_report', '.', 'sort', '(', 'key', '=', 'operator', '.', 'itemgetter', '(', "'count'", ')', ',', 'reverse', '=', 'True', ')', 'print', '(', 'tabulate', '(', 'accounts_report', ',', 'headers', '=', "'keys'", ')', ')', 'log', '.', 'info', '(', '"total size:%s"', ',', 'GetHumanSize', '(', 'total_size', ')', ')'] | size of exported records for a given day. | ['size', 'of', 'exported', 'records', 'for', 'a', 'given', 'day', '.'] | train | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_logexporter/c7n_logexporter/exporter.py#L514-L563 |
6,732 | wbond/asn1crypto | asn1crypto/core.py | Sequence.native | def native(self):
"""
The native Python datatype representation of this value
:return:
An OrderedDict or None. If an OrderedDict, all child values are
recursively converted to native representation also.
"""
if self.contents is None:
return None
if self._native is None:
if self.children is None:
self._parse_children(recurse=True)
try:
self._native = OrderedDict()
for index, child in enumerate(self.children):
if child.__class__ == tuple:
child = _build(*child)
self.children[index] = child
try:
name = self._fields[index][0]
except (IndexError):
name = str_cls(index)
self._native[name] = child.native
except (ValueError, TypeError) as e:
self._native = None
args = e.args[1:]
e.args = (e.args[0] + '\n while parsing %s' % type_name(self),) + args
raise e
return self._native | python | def native(self):
"""
The native Python datatype representation of this value
:return:
An OrderedDict or None. If an OrderedDict, all child values are
recursively converted to native representation also.
"""
if self.contents is None:
return None
if self._native is None:
if self.children is None:
self._parse_children(recurse=True)
try:
self._native = OrderedDict()
for index, child in enumerate(self.children):
if child.__class__ == tuple:
child = _build(*child)
self.children[index] = child
try:
name = self._fields[index][0]
except (IndexError):
name = str_cls(index)
self._native[name] = child.native
except (ValueError, TypeError) as e:
self._native = None
args = e.args[1:]
e.args = (e.args[0] + '\n while parsing %s' % type_name(self),) + args
raise e
return self._native | ['def', 'native', '(', 'self', ')', ':', 'if', 'self', '.', 'contents', 'is', 'None', ':', 'return', 'None', 'if', 'self', '.', '_native', 'is', 'None', ':', 'if', 'self', '.', 'children', 'is', 'None', ':', 'self', '.', '_parse_children', '(', 'recurse', '=', 'True', ')', 'try', ':', 'self', '.', '_native', '=', 'OrderedDict', '(', ')', 'for', 'index', ',', 'child', 'in', 'enumerate', '(', 'self', '.', 'children', ')', ':', 'if', 'child', '.', '__class__', '==', 'tuple', ':', 'child', '=', '_build', '(', '*', 'child', ')', 'self', '.', 'children', '[', 'index', ']', '=', 'child', 'try', ':', 'name', '=', 'self', '.', '_fields', '[', 'index', ']', '[', '0', ']', 'except', '(', 'IndexError', ')', ':', 'name', '=', 'str_cls', '(', 'index', ')', 'self', '.', '_native', '[', 'name', ']', '=', 'child', '.', 'native', 'except', '(', 'ValueError', ',', 'TypeError', ')', 'as', 'e', ':', 'self', '.', '_native', '=', 'None', 'args', '=', 'e', '.', 'args', '[', '1', ':', ']', 'e', '.', 'args', '=', '(', 'e', '.', 'args', '[', '0', ']', '+', "'\\n while parsing %s'", '%', 'type_name', '(', 'self', ')', ',', ')', '+', 'args', 'raise', 'e', 'return', 'self', '.', '_native'] | The native Python datatype representation of this value
:return:
An OrderedDict or None. If an OrderedDict, all child values are
recursively converted to native representation also. | ['The', 'native', 'Python', 'datatype', 'representation', 'of', 'this', 'value'] | train | https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L3795-L3826 |
6,733 | jonathf/chaospy | chaospy/poly/typing.py | toarray | def toarray(vari):
"""
Convert polynomial array into a numpy.asarray of polynomials.
Args:
vari (Poly, numpy.ndarray):
Input data.
Returns:
(numpy.ndarray):
A numpy array with ``Q.shape==A.shape``.
Examples:
>>> poly = cp.prange(3)
>>> print(poly)
[1, q0, q0^2]
>>> array = cp.toarray(poly)
>>> print(isinstance(array, numpy.ndarray))
True
>>> print(array[1])
q0
"""
if isinstance(vari, Poly):
shape = vari.shape
out = numpy.asarray(
[{} for _ in range(numpy.prod(shape))],
dtype=object
)
core = vari.A.copy()
for key in core.keys():
core[key] = core[key].flatten()
for i in range(numpy.prod(shape)):
if not numpy.all(core[key][i] == 0):
out[i][key] = core[key][i]
for i in range(numpy.prod(shape)):
out[i] = Poly(out[i], vari.dim, (), vari.dtype)
out = out.reshape(shape)
return out
return numpy.asarray(vari) | python | def toarray(vari):
"""
Convert polynomial array into a numpy.asarray of polynomials.
Args:
vari (Poly, numpy.ndarray):
Input data.
Returns:
(numpy.ndarray):
A numpy array with ``Q.shape==A.shape``.
Examples:
>>> poly = cp.prange(3)
>>> print(poly)
[1, q0, q0^2]
>>> array = cp.toarray(poly)
>>> print(isinstance(array, numpy.ndarray))
True
>>> print(array[1])
q0
"""
if isinstance(vari, Poly):
shape = vari.shape
out = numpy.asarray(
[{} for _ in range(numpy.prod(shape))],
dtype=object
)
core = vari.A.copy()
for key in core.keys():
core[key] = core[key].flatten()
for i in range(numpy.prod(shape)):
if not numpy.all(core[key][i] == 0):
out[i][key] = core[key][i]
for i in range(numpy.prod(shape)):
out[i] = Poly(out[i], vari.dim, (), vari.dtype)
out = out.reshape(shape)
return out
return numpy.asarray(vari) | ['def', 'toarray', '(', 'vari', ')', ':', 'if', 'isinstance', '(', 'vari', ',', 'Poly', ')', ':', 'shape', '=', 'vari', '.', 'shape', 'out', '=', 'numpy', '.', 'asarray', '(', '[', '{', '}', 'for', '_', 'in', 'range', '(', 'numpy', '.', 'prod', '(', 'shape', ')', ')', ']', ',', 'dtype', '=', 'object', ')', 'core', '=', 'vari', '.', 'A', '.', 'copy', '(', ')', 'for', 'key', 'in', 'core', '.', 'keys', '(', ')', ':', 'core', '[', 'key', ']', '=', 'core', '[', 'key', ']', '.', 'flatten', '(', ')', 'for', 'i', 'in', 'range', '(', 'numpy', '.', 'prod', '(', 'shape', ')', ')', ':', 'if', 'not', 'numpy', '.', 'all', '(', 'core', '[', 'key', ']', '[', 'i', ']', '==', '0', ')', ':', 'out', '[', 'i', ']', '[', 'key', ']', '=', 'core', '[', 'key', ']', '[', 'i', ']', 'for', 'i', 'in', 'range', '(', 'numpy', '.', 'prod', '(', 'shape', ')', ')', ':', 'out', '[', 'i', ']', '=', 'Poly', '(', 'out', '[', 'i', ']', ',', 'vari', '.', 'dim', ',', '(', ')', ',', 'vari', '.', 'dtype', ')', 'out', '=', 'out', '.', 'reshape', '(', 'shape', ')', 'return', 'out', 'return', 'numpy', '.', 'asarray', '(', 'vari', ')'] | Convert polynomial array into a numpy.asarray of polynomials.
Args:
vari (Poly, numpy.ndarray):
Input data.
Returns:
(numpy.ndarray):
A numpy array with ``Q.shape==A.shape``.
Examples:
>>> poly = cp.prange(3)
>>> print(poly)
[1, q0, q0^2]
>>> array = cp.toarray(poly)
>>> print(isinstance(array, numpy.ndarray))
True
>>> print(array[1])
q0 | ['Convert', 'polynomial', 'array', 'into', 'a', 'numpy', '.', 'asarray', 'of', 'polynomials', '.'] | train | https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/poly/typing.py#L115-L159 |
6,734 | delfick/nose-of-yeti | noseOfYeti/tokeniser/spec_codec.py | TokeniserCodec.register | def register(self):
"""Register spec codec"""
# Assume utf8 encoding
utf8 = encodings.search_function('utf8')
class StreamReader(utf_8.StreamReader):
"""Used by cPython to deal with a spec file"""
def __init__(sr, stream, *args, **kwargs):
codecs.StreamReader.__init__(sr, stream, *args, **kwargs)
data = self.dealwith(sr.stream.readline)
sr.stream = StringIO(data)
def decode(text, *args, **kwargs):
"""Used by pypy and pylint to deal with a spec file"""
return_tuple = kwargs.get("return_tuple", True)
if six.PY3:
if hasattr(text, 'tobytes'):
text = text.tobytes().decode('utf8')
else:
text = text.decode('utf8')
buffered = StringIO(text)
# Determine if we need to have imports for this string
# It may be a fragment of the file
has_spec = regexes['encoding_matcher'].search(buffered.readline())
no_imports = not has_spec
buffered.seek(0)
# Translate the text
if six.PY2:
utf8 = encodings.search_function('utf8') # Assume utf8 encoding
reader = utf8.streamreader(buffered)
else:
reader = buffered
data = self.dealwith(reader.readline, no_imports=no_imports)
# If nothing was changed, then we want to use the original file/line
# Also have to replace indentation of original line with indentation of new line
# To take into account nested describes
if text and not regexes['only_whitespace'].match(text):
if regexes['whitespace'].sub('', text) == regexes['whitespace'].sub('', data):
bad_indentation = regexes['leading_whitespace'].search(text).groups()[0]
good_indentation = regexes['leading_whitespace'].search(data).groups()[0]
data = '%s%s' % (good_indentation, text[len(bad_indentation):])
# If text is empty and data isn't, then we should return text
if len(text) == 0 and len(data) == 1:
if return_tuple:
return "", 0
else:
return ""
# Return translated version and it's length
if return_tuple:
return data, len(data)
else:
return data
incrementaldecoder = utf8.incrementaldecoder
if six.PY3:
def incremental_decode(decoder, *args, **kwargs):
"""Wrapper for decode from IncrementalDecoder"""
kwargs["return_tuple"] = False
return decode(*args, **kwargs)
incrementaldecoder = type("IncrementalDecoder", (utf8.incrementaldecoder, ), {"decode": incremental_decode})
def search_function(s):
"""Determine if a file is of spec encoding and return special CodecInfo if it is"""
if s != 'spec': return None
return codecs.CodecInfo(
name='spec'
, encode=utf8.encode
, decode=decode
, streamreader=StreamReader
, streamwriter=utf8.streamwriter
, incrementalencoder=utf8.incrementalencoder
, incrementaldecoder=incrementaldecoder
)
# Do the register
codecs.register(search_function) | python | def register(self):
"""Register spec codec"""
# Assume utf8 encoding
utf8 = encodings.search_function('utf8')
class StreamReader(utf_8.StreamReader):
"""Used by cPython to deal with a spec file"""
def __init__(sr, stream, *args, **kwargs):
codecs.StreamReader.__init__(sr, stream, *args, **kwargs)
data = self.dealwith(sr.stream.readline)
sr.stream = StringIO(data)
def decode(text, *args, **kwargs):
"""Used by pypy and pylint to deal with a spec file"""
return_tuple = kwargs.get("return_tuple", True)
if six.PY3:
if hasattr(text, 'tobytes'):
text = text.tobytes().decode('utf8')
else:
text = text.decode('utf8')
buffered = StringIO(text)
# Determine if we need to have imports for this string
# It may be a fragment of the file
has_spec = regexes['encoding_matcher'].search(buffered.readline())
no_imports = not has_spec
buffered.seek(0)
# Translate the text
if six.PY2:
utf8 = encodings.search_function('utf8') # Assume utf8 encoding
reader = utf8.streamreader(buffered)
else:
reader = buffered
data = self.dealwith(reader.readline, no_imports=no_imports)
# If nothing was changed, then we want to use the original file/line
# Also have to replace indentation of original line with indentation of new line
# To take into account nested describes
if text and not regexes['only_whitespace'].match(text):
if regexes['whitespace'].sub('', text) == regexes['whitespace'].sub('', data):
bad_indentation = regexes['leading_whitespace'].search(text).groups()[0]
good_indentation = regexes['leading_whitespace'].search(data).groups()[0]
data = '%s%s' % (good_indentation, text[len(bad_indentation):])
# If text is empty and data isn't, then we should return text
if len(text) == 0 and len(data) == 1:
if return_tuple:
return "", 0
else:
return ""
# Return translated version and it's length
if return_tuple:
return data, len(data)
else:
return data
incrementaldecoder = utf8.incrementaldecoder
if six.PY3:
def incremental_decode(decoder, *args, **kwargs):
"""Wrapper for decode from IncrementalDecoder"""
kwargs["return_tuple"] = False
return decode(*args, **kwargs)
incrementaldecoder = type("IncrementalDecoder", (utf8.incrementaldecoder, ), {"decode": incremental_decode})
def search_function(s):
"""Determine if a file is of spec encoding and return special CodecInfo if it is"""
if s != 'spec': return None
return codecs.CodecInfo(
name='spec'
, encode=utf8.encode
, decode=decode
, streamreader=StreamReader
, streamwriter=utf8.streamwriter
, incrementalencoder=utf8.incrementalencoder
, incrementaldecoder=incrementaldecoder
)
# Do the register
codecs.register(search_function) | ['def', 'register', '(', 'self', ')', ':', '# Assume utf8 encoding', 'utf8', '=', 'encodings', '.', 'search_function', '(', "'utf8'", ')', 'class', 'StreamReader', '(', 'utf_8', '.', 'StreamReader', ')', ':', '"""Used by cPython to deal with a spec file"""', 'def', '__init__', '(', 'sr', ',', 'stream', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'codecs', '.', 'StreamReader', '.', '__init__', '(', 'sr', ',', 'stream', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'data', '=', 'self', '.', 'dealwith', '(', 'sr', '.', 'stream', '.', 'readline', ')', 'sr', '.', 'stream', '=', 'StringIO', '(', 'data', ')', 'def', 'decode', '(', 'text', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '"""Used by pypy and pylint to deal with a spec file"""', 'return_tuple', '=', 'kwargs', '.', 'get', '(', '"return_tuple"', ',', 'True', ')', 'if', 'six', '.', 'PY3', ':', 'if', 'hasattr', '(', 'text', ',', "'tobytes'", ')', ':', 'text', '=', 'text', '.', 'tobytes', '(', ')', '.', 'decode', '(', "'utf8'", ')', 'else', ':', 'text', '=', 'text', '.', 'decode', '(', "'utf8'", ')', 'buffered', '=', 'StringIO', '(', 'text', ')', '# Determine if we need to have imports for this string', '# It may be a fragment of the file', 'has_spec', '=', 'regexes', '[', "'encoding_matcher'", ']', '.', 'search', '(', 'buffered', '.', 'readline', '(', ')', ')', 'no_imports', '=', 'not', 'has_spec', 'buffered', '.', 'seek', '(', '0', ')', '# Translate the text', 'if', 'six', '.', 'PY2', ':', 'utf8', '=', 'encodings', '.', 'search_function', '(', "'utf8'", ')', '# Assume utf8 encoding', 'reader', '=', 'utf8', '.', 'streamreader', '(', 'buffered', ')', 'else', ':', 'reader', '=', 'buffered', 'data', '=', 'self', '.', 'dealwith', '(', 'reader', '.', 'readline', ',', 'no_imports', '=', 'no_imports', ')', '# If nothing was changed, then we want to use the original file/line', '# Also have to replace indentation of original line with indentation of new line', '# To take into account nested describes', 'if', 'text', 'and', 'not', 'regexes', '[', "'only_whitespace'", ']', '.', 'match', '(', 'text', ')', ':', 'if', 'regexes', '[', "'whitespace'", ']', '.', 'sub', '(', "''", ',', 'text', ')', '==', 'regexes', '[', "'whitespace'", ']', '.', 'sub', '(', "''", ',', 'data', ')', ':', 'bad_indentation', '=', 'regexes', '[', "'leading_whitespace'", ']', '.', 'search', '(', 'text', ')', '.', 'groups', '(', ')', '[', '0', ']', 'good_indentation', '=', 'regexes', '[', "'leading_whitespace'", ']', '.', 'search', '(', 'data', ')', '.', 'groups', '(', ')', '[', '0', ']', 'data', '=', "'%s%s'", '%', '(', 'good_indentation', ',', 'text', '[', 'len', '(', 'bad_indentation', ')', ':', ']', ')', "# If text is empty and data isn't, then we should return text", 'if', 'len', '(', 'text', ')', '==', '0', 'and', 'len', '(', 'data', ')', '==', '1', ':', 'if', 'return_tuple', ':', 'return', '""', ',', '0', 'else', ':', 'return', '""', "# Return translated version and it's length", 'if', 'return_tuple', ':', 'return', 'data', ',', 'len', '(', 'data', ')', 'else', ':', 'return', 'data', 'incrementaldecoder', '=', 'utf8', '.', 'incrementaldecoder', 'if', 'six', '.', 'PY3', ':', 'def', 'incremental_decode', '(', 'decoder', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '"""Wrapper for decode from IncrementalDecoder"""', 'kwargs', '[', '"return_tuple"', ']', '=', 'False', 'return', 'decode', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'incrementaldecoder', '=', 'type', '(', '"IncrementalDecoder"', ',', '(', 'utf8', '.', 'incrementaldecoder', ',', ')', ',', '{', '"decode"', ':', 'incremental_decode', '}', ')', 'def', 'search_function', '(', 's', ')', ':', '"""Determine if a file is of spec encoding and return special CodecInfo if it is"""', 'if', 's', '!=', "'spec'", ':', 'return', 'None', 'return', 'codecs', '.', 'CodecInfo', '(', 'name', '=', "'spec'", ',', 'encode', '=', 'utf8', '.', 'encode', ',', 'decode', '=', 'decode', ',', 'streamreader', '=', 'StreamReader', ',', 'streamwriter', '=', 'utf8', '.', 'streamwriter', ',', 'incrementalencoder', '=', 'utf8', '.', 'incrementalencoder', ',', 'incrementaldecoder', '=', 'incrementaldecoder', ')', '# Do the register', 'codecs', '.', 'register', '(', 'search_function', ')'] | Register spec codec | ['Register', 'spec', 'codec'] | train | https://github.com/delfick/nose-of-yeti/blob/0b545ff350cebd59b40b601333c13033ce40d6dc/noseOfYeti/tokeniser/spec_codec.py#L25-L108 |
6,735 | softlayer/softlayer-python | SoftLayer/managers/dedicated_host.py | DedicatedHostManager._get_item | def _get_item(self, package, flavor):
"""Returns the item for ordering a dedicated host."""
for item in package['items']:
if item['keyName'] == flavor:
return item
raise SoftLayer.SoftLayerError("Could not find valid item for: '%s'" % flavor) | python | def _get_item(self, package, flavor):
"""Returns the item for ordering a dedicated host."""
for item in package['items']:
if item['keyName'] == flavor:
return item
raise SoftLayer.SoftLayerError("Could not find valid item for: '%s'" % flavor) | ['def', '_get_item', '(', 'self', ',', 'package', ',', 'flavor', ')', ':', 'for', 'item', 'in', 'package', '[', "'items'", ']', ':', 'if', 'item', '[', "'keyName'", ']', '==', 'flavor', ':', 'return', 'item', 'raise', 'SoftLayer', '.', 'SoftLayerError', '(', '"Could not find valid item for: \'%s\'"', '%', 'flavor', ')'] | Returns the item for ordering a dedicated host. | ['Returns', 'the', 'item', 'for', 'ordering', 'a', 'dedicated', 'host', '.'] | train | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/dedicated_host.py#L431-L438 |
6,736 | thanethomson/statik | statik/utils.py | find_first_file_with_ext | def find_first_file_with_ext(base_paths, prefix, exts):
"""Runs through the given list of file extensions and returns the first file with the given base
path and extension combination that actually exists.
Args:
base_paths: The base paths in which to search for files.
prefix: The filename prefix of the file for which to search.
exts: An ordered list of file extensions for which to search.
Returns:
On success, a 2-tuple containing the base path in which the file was found, and the extension of the file.
On failure, returns (None, None).
"""
for base_path in base_paths:
for ext in exts:
filename = os.path.join(base_path, "%s%s" % (prefix, ext))
if os.path.exists(filename) and os.path.isfile(filename):
logger.debug("Found first file with relevant extension: %s", filename)
return base_path, ext
logger.debug("No files found for prefix %s, extensions %s", prefix, ", ".join(exts))
return None, None | python | def find_first_file_with_ext(base_paths, prefix, exts):
"""Runs through the given list of file extensions and returns the first file with the given base
path and extension combination that actually exists.
Args:
base_paths: The base paths in which to search for files.
prefix: The filename prefix of the file for which to search.
exts: An ordered list of file extensions for which to search.
Returns:
On success, a 2-tuple containing the base path in which the file was found, and the extension of the file.
On failure, returns (None, None).
"""
for base_path in base_paths:
for ext in exts:
filename = os.path.join(base_path, "%s%s" % (prefix, ext))
if os.path.exists(filename) and os.path.isfile(filename):
logger.debug("Found first file with relevant extension: %s", filename)
return base_path, ext
logger.debug("No files found for prefix %s, extensions %s", prefix, ", ".join(exts))
return None, None | ['def', 'find_first_file_with_ext', '(', 'base_paths', ',', 'prefix', ',', 'exts', ')', ':', 'for', 'base_path', 'in', 'base_paths', ':', 'for', 'ext', 'in', 'exts', ':', 'filename', '=', 'os', '.', 'path', '.', 'join', '(', 'base_path', ',', '"%s%s"', '%', '(', 'prefix', ',', 'ext', ')', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'filename', ')', 'and', 'os', '.', 'path', '.', 'isfile', '(', 'filename', ')', ':', 'logger', '.', 'debug', '(', '"Found first file with relevant extension: %s"', ',', 'filename', ')', 'return', 'base_path', ',', 'ext', 'logger', '.', 'debug', '(', '"No files found for prefix %s, extensions %s"', ',', 'prefix', ',', '", "', '.', 'join', '(', 'exts', ')', ')', 'return', 'None', ',', 'None'] | Runs through the given list of file extensions and returns the first file with the given base
path and extension combination that actually exists.
Args:
base_paths: The base paths in which to search for files.
prefix: The filename prefix of the file for which to search.
exts: An ordered list of file extensions for which to search.
Returns:
On success, a 2-tuple containing the base path in which the file was found, and the extension of the file.
On failure, returns (None, None). | ['Runs', 'through', 'the', 'given', 'list', 'of', 'file', 'extensions', 'and', 'returns', 'the', 'first', 'file', 'with', 'the', 'given', 'base', 'path', 'and', 'extension', 'combination', 'that', 'actually', 'exists', '.'] | train | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L336-L357 |
6,737 | sosy-lab/benchexec | benchexec/check_cgroups.py | main | def main(argv=None):
"""
A simple command-line interface for the cgroups check of BenchExec.
"""
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
description=
"""Check whether cgroups are available and can be used for BenchExec.
Part of BenchExec: https://github.com/sosy-lab/benchexec/""")
parser.add_argument("--wait", type=int, default=1, metavar="SECONDS",
help='wait some time to ensure no process interferes with cgroups in the meantime (default: 1s)')
parser.add_argument("--no-thread", action="store_true",
help='run check on the main thread instead of a separate thread'
+ '(behavior of cgrulesengd differs depending on this)')
options = parser.parse_args(argv[1:])
if options.no_thread:
check_cgroup_availability(options.wait)
else:
check_cgroup_availability_in_thread(options) | python | def main(argv=None):
"""
A simple command-line interface for the cgroups check of BenchExec.
"""
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
description=
"""Check whether cgroups are available and can be used for BenchExec.
Part of BenchExec: https://github.com/sosy-lab/benchexec/""")
parser.add_argument("--wait", type=int, default=1, metavar="SECONDS",
help='wait some time to ensure no process interferes with cgroups in the meantime (default: 1s)')
parser.add_argument("--no-thread", action="store_true",
help='run check on the main thread instead of a separate thread'
+ '(behavior of cgrulesengd differs depending on this)')
options = parser.parse_args(argv[1:])
if options.no_thread:
check_cgroup_availability(options.wait)
else:
check_cgroup_availability_in_thread(options) | ['def', 'main', '(', 'argv', '=', 'None', ')', ':', 'if', 'argv', 'is', 'None', ':', 'argv', '=', 'sys', '.', 'argv', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'fromfile_prefix_chars', '=', "'@'", ',', 'description', '=', '"""Check whether cgroups are available and can be used for BenchExec.\n Part of BenchExec: https://github.com/sosy-lab/benchexec/"""', ')', 'parser', '.', 'add_argument', '(', '"--wait"', ',', 'type', '=', 'int', ',', 'default', '=', '1', ',', 'metavar', '=', '"SECONDS"', ',', 'help', '=', "'wait some time to ensure no process interferes with cgroups in the meantime (default: 1s)'", ')', 'parser', '.', 'add_argument', '(', '"--no-thread"', ',', 'action', '=', '"store_true"', ',', 'help', '=', "'run check on the main thread instead of a separate thread'", '+', "'(behavior of cgrulesengd differs depending on this)'", ')', 'options', '=', 'parser', '.', 'parse_args', '(', 'argv', '[', '1', ':', ']', ')', 'if', 'options', '.', 'no_thread', ':', 'check_cgroup_availability', '(', 'options', '.', 'wait', ')', 'else', ':', 'check_cgroup_availability_in_thread', '(', 'options', ')'] | A simple command-line interface for the cgroups check of BenchExec. | ['A', 'simple', 'command', '-', 'line', 'interface', 'for', 'the', 'cgroups', 'check', 'of', 'BenchExec', '.'] | train | https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/check_cgroups.py#L112-L135 |
6,738 | dnanexus/dx-toolkit | src/python/dxpy/workflow_builder.py | _create_temporary_projects | def _create_temporary_projects(enabled_regions, args):
"""
Creates a temporary project needed to build an underlying workflow
for a global workflow. Returns a dictionary with region names as keys
and project IDs as values
The regions in which projects will be created can be:
i. regions specified in dxworkflow.json "regionalOptions"
ii. regions specified as an argument to "dx build"
iii. current context project, if None of the above are set
If both args and dxworkflow.json specify regions, they must match.
"""
# Create one temp project in each region
projects_by_region = {} # Project IDs by region
for region in enabled_regions:
try:
project_input = {"name": "Temporary build project for dx build global workflow",
"region": region}
if args.bill_to:
project_input["billTo"] = args.bill_to
temp_project = dxpy.api.project_new(project_input)["id"]
projects_by_region[region] = temp_project
logger.debug("Created temporary project {} to build in".format(temp_project))
except:
# Clean up any temp projects that might have been created
if projects_by_region:
dxpy.executable_builder.delete_temporary_projects(projects_by_region.values())
err_exit()
return projects_by_region | python | def _create_temporary_projects(enabled_regions, args):
"""
Creates a temporary project needed to build an underlying workflow
for a global workflow. Returns a dictionary with region names as keys
and project IDs as values
The regions in which projects will be created can be:
i. regions specified in dxworkflow.json "regionalOptions"
ii. regions specified as an argument to "dx build"
iii. current context project, if None of the above are set
If both args and dxworkflow.json specify regions, they must match.
"""
# Create one temp project in each region
projects_by_region = {} # Project IDs by region
for region in enabled_regions:
try:
project_input = {"name": "Temporary build project for dx build global workflow",
"region": region}
if args.bill_to:
project_input["billTo"] = args.bill_to
temp_project = dxpy.api.project_new(project_input)["id"]
projects_by_region[region] = temp_project
logger.debug("Created temporary project {} to build in".format(temp_project))
except:
# Clean up any temp projects that might have been created
if projects_by_region:
dxpy.executable_builder.delete_temporary_projects(projects_by_region.values())
err_exit()
return projects_by_region | ['def', '_create_temporary_projects', '(', 'enabled_regions', ',', 'args', ')', ':', '# Create one temp project in each region', 'projects_by_region', '=', '{', '}', '# Project IDs by region', 'for', 'region', 'in', 'enabled_regions', ':', 'try', ':', 'project_input', '=', '{', '"name"', ':', '"Temporary build project for dx build global workflow"', ',', '"region"', ':', 'region', '}', 'if', 'args', '.', 'bill_to', ':', 'project_input', '[', '"billTo"', ']', '=', 'args', '.', 'bill_to', 'temp_project', '=', 'dxpy', '.', 'api', '.', 'project_new', '(', 'project_input', ')', '[', '"id"', ']', 'projects_by_region', '[', 'region', ']', '=', 'temp_project', 'logger', '.', 'debug', '(', '"Created temporary project {} to build in"', '.', 'format', '(', 'temp_project', ')', ')', 'except', ':', '# Clean up any temp projects that might have been created', 'if', 'projects_by_region', ':', 'dxpy', '.', 'executable_builder', '.', 'delete_temporary_projects', '(', 'projects_by_region', '.', 'values', '(', ')', ')', 'err_exit', '(', ')', 'return', 'projects_by_region'] | Creates a temporary project needed to build an underlying workflow
for a global workflow. Returns a dictionary with region names as keys
and project IDs as values
The regions in which projects will be created can be:
i. regions specified in dxworkflow.json "regionalOptions"
ii. regions specified as an argument to "dx build"
iii. current context project, if None of the above are set
If both args and dxworkflow.json specify regions, they must match. | ['Creates', 'a', 'temporary', 'project', 'needed', 'to', 'build', 'an', 'underlying', 'workflow', 'for', 'a', 'global', 'workflow', '.', 'Returns', 'a', 'dictionary', 'with', 'region', 'names', 'as', 'keys', 'and', 'project', 'IDs', 'as', 'values'] | train | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/workflow_builder.py#L413-L441 |
6,739 | elastic/elasticsearch-py | elasticsearch/client/cat.py | CatClient.allocation | def allocation(self, node_id=None, params=None):
"""
Allocation provides a snapshot of how shards have located around the
cluster and the state of disk usage.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-allocation.html>`_
:arg node_id: A comma-separated list of node IDs or names to limit the
returned information
:arg bytes: The unit in which to display byte values, valid choices are:
'b', 'k', 'kb', 'm', 'mb', 'g', 'gb', 't', 'tb', 'p', 'pb'
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'allocation', node_id), params=params) | python | def allocation(self, node_id=None, params=None):
"""
Allocation provides a snapshot of how shards have located around the
cluster and the state of disk usage.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-allocation.html>`_
:arg node_id: A comma-separated list of node IDs or names to limit the
returned information
:arg bytes: The unit in which to display byte values, valid choices are:
'b', 'k', 'kb', 'm', 'mb', 'g', 'gb', 't', 'tb', 'p', 'pb'
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'allocation', node_id), params=params) | ['def', 'allocation', '(', 'self', ',', 'node_id', '=', 'None', ',', 'params', '=', 'None', ')', ':', 'return', 'self', '.', 'transport', '.', 'perform_request', '(', "'GET'", ',', '_make_path', '(', "'_cat'", ',', "'allocation'", ',', 'node_id', ')', ',', 'params', '=', 'params', ')'] | Allocation provides a snapshot of how shards have located around the
cluster and the state of disk usage.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-allocation.html>`_
:arg node_id: A comma-separated list of node IDs or names to limit the
returned information
:arg bytes: The unit in which to display byte values, valid choices are:
'b', 'k', 'kb', 'm', 'mb', 'g', 'gb', 't', 'tb', 'p', 'pb'
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg v: Verbose mode. Display column headers, default False | ['Allocation', 'provides', 'a', 'snapshot', 'of', 'how', 'shards', 'have', 'located', 'around', 'the', 'cluster', 'and', 'the', 'state', 'of', 'disk', 'usage', '.', '<https', ':', '//', 'www', '.', 'elastic', '.', 'co', '/', 'guide', '/', 'en', '/', 'elasticsearch', '/', 'reference', '/', 'current', '/', 'cat', '-', 'allocation', '.', 'html', '>', '_'] | train | https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/cat.py#L27-L49 |
6,740 | bitesofcode/projexui | projexui/widgets/xlistwidget.py | XListWidget.resizeToContents | def resizeToContents(self):
"""
Resizes the list widget to fit its contents vertically.
"""
if self.count():
item = self.item(self.count() - 1)
rect = self.visualItemRect(item)
height = rect.bottom() + 8
height = max(28, height)
self.setFixedHeight(height)
else:
self.setFixedHeight(self.minimumHeight()) | python | def resizeToContents(self):
"""
Resizes the list widget to fit its contents vertically.
"""
if self.count():
item = self.item(self.count() - 1)
rect = self.visualItemRect(item)
height = rect.bottom() + 8
height = max(28, height)
self.setFixedHeight(height)
else:
self.setFixedHeight(self.minimumHeight()) | ['def', 'resizeToContents', '(', 'self', ')', ':', 'if', 'self', '.', 'count', '(', ')', ':', 'item', '=', 'self', '.', 'item', '(', 'self', '.', 'count', '(', ')', '-', '1', ')', 'rect', '=', 'self', '.', 'visualItemRect', '(', 'item', ')', 'height', '=', 'rect', '.', 'bottom', '(', ')', '+', '8', 'height', '=', 'max', '(', '28', ',', 'height', ')', 'self', '.', 'setFixedHeight', '(', 'height', ')', 'else', ':', 'self', '.', 'setFixedHeight', '(', 'self', '.', 'minimumHeight', '(', ')', ')'] | Resizes the list widget to fit its contents vertically. | ['Resizes', 'the', 'list', 'widget', 'to', 'fit', 'its', 'contents', 'vertically', '.'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xlistwidget.py#L535-L546 |
6,741 | Dallinger/Dallinger | dallinger/bots.py | BotBase.complete_experiment | def complete_experiment(self, status):
"""Sends worker status ('worker_complete' or 'worker_failed')
to the experiment server.
"""
url = self.driver.current_url
p = urllib.parse.urlparse(url)
complete_url = "%s://%s/%s?participant_id=%s"
complete_url = complete_url % (p.scheme, p.netloc, status, self.participant_id)
self.driver.get(complete_url)
logger.info("Forced call to %s: %s" % (status, complete_url)) | python | def complete_experiment(self, status):
"""Sends worker status ('worker_complete' or 'worker_failed')
to the experiment server.
"""
url = self.driver.current_url
p = urllib.parse.urlparse(url)
complete_url = "%s://%s/%s?participant_id=%s"
complete_url = complete_url % (p.scheme, p.netloc, status, self.participant_id)
self.driver.get(complete_url)
logger.info("Forced call to %s: %s" % (status, complete_url)) | ['def', 'complete_experiment', '(', 'self', ',', 'status', ')', ':', 'url', '=', 'self', '.', 'driver', '.', 'current_url', 'p', '=', 'urllib', '.', 'parse', '.', 'urlparse', '(', 'url', ')', 'complete_url', '=', '"%s://%s/%s?participant_id=%s"', 'complete_url', '=', 'complete_url', '%', '(', 'p', '.', 'scheme', ',', 'p', '.', 'netloc', ',', 'status', ',', 'self', '.', 'participant_id', ')', 'self', '.', 'driver', '.', 'get', '(', 'complete_url', ')', 'logger', '.', 'info', '(', '"Forced call to %s: %s"', '%', '(', 'status', ',', 'complete_url', ')', ')'] | Sends worker status ('worker_complete' or 'worker_failed')
to the experiment server. | ['Sends', 'worker', 'status', '(', 'worker_complete', 'or', 'worker_failed', ')', 'to', 'the', 'experiment', 'server', '.'] | train | https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/bots.py#L175-L184 |
6,742 | GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/requirements.py | read_requirements | def read_requirements(req_file):
"""Reads a requirements file.
Args:
req_file (str): Filename of requirements file
"""
items = list(parse_requirements(req_file, session={}))
result = []
for item in items:
# Get line number from item
line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]
if item.req:
item.req.marker = item.markers
result.append((item.req, line_number))
else:
result.append((item, line_number))
return result | python | def read_requirements(req_file):
"""Reads a requirements file.
Args:
req_file (str): Filename of requirements file
"""
items = list(parse_requirements(req_file, session={}))
result = []
for item in items:
# Get line number from item
line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]
if item.req:
item.req.marker = item.markers
result.append((item.req, line_number))
else:
result.append((item, line_number))
return result | ['def', 'read_requirements', '(', 'req_file', ')', ':', 'items', '=', 'list', '(', 'parse_requirements', '(', 'req_file', ',', 'session', '=', '{', '}', ')', ')', 'result', '=', '[', ']', 'for', 'item', 'in', 'items', ':', '# Get line number from item', 'line_number', '=', 'item', '.', 'comes_from', '.', 'split', '(', 'req_file', '+', "' (line '", ')', '[', '1', ']', '[', ':', '-', '1', ']', 'if', 'item', '.', 'req', ':', 'item', '.', 'req', '.', 'marker', '=', 'item', '.', 'markers', 'result', '.', 'append', '(', '(', 'item', '.', 'req', ',', 'line_number', ')', ')', 'else', ':', 'result', '.', 'append', '(', '(', 'item', ',', 'line_number', ')', ')', 'return', 'result'] | Reads a requirements file.
Args:
req_file (str): Filename of requirements file | ['Reads', 'a', 'requirements', 'file', '.'] | train | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L38-L56 |
6,743 | SOBotics/pyRedunda | pyRedunda/Redunda.py | Redunda.uploadFiles | def uploadFiles(self):
"""
Uploads all the files in 'filesToSync'
"""
for each_file in self.filesToSync:
self.uploadFile(each_file["name"], each_file["ispickle"], each_file["at_home"]) | python | def uploadFiles(self):
"""
Uploads all the files in 'filesToSync'
"""
for each_file in self.filesToSync:
self.uploadFile(each_file["name"], each_file["ispickle"], each_file["at_home"]) | ['def', 'uploadFiles', '(', 'self', ')', ':', 'for', 'each_file', 'in', 'self', '.', 'filesToSync', ':', 'self', '.', 'uploadFile', '(', 'each_file', '[', '"name"', ']', ',', 'each_file', '[', '"ispickle"', ']', ',', 'each_file', '[', '"at_home"', ']', ')'] | Uploads all the files in 'filesToSync' | ['Uploads', 'all', 'the', 'files', 'in', 'filesToSync'] | train | https://github.com/SOBotics/pyRedunda/blob/4bd190dc908861c5fac4c9b60cf79eeb0e5c76ab/pyRedunda/Redunda.py#L141-L146 |
6,744 | opencobra/memote | memote/support/consistency.py | find_metabolites_not_produced_with_open_bounds | def find_metabolites_not_produced_with_open_bounds(model):
"""
Return metabolites that cannot be produced with open exchange reactions.
A perfect model should be able to produce each and every metabolite when
all medium components are available.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Those metabolites that could not be produced.
"""
mets_not_produced = list()
helpers.open_exchanges(model)
for met in model.metabolites:
with model:
exch = model.add_boundary(
met, type="irrex", reaction_id="IRREX", lb=0, ub=1000)
solution = helpers.run_fba(model, exch.id)
if np.isnan(solution) or solution < TOLERANCE_THRESHOLD:
mets_not_produced.append(met)
return mets_not_produced | python | def find_metabolites_not_produced_with_open_bounds(model):
"""
Return metabolites that cannot be produced with open exchange reactions.
A perfect model should be able to produce each and every metabolite when
all medium components are available.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Those metabolites that could not be produced.
"""
mets_not_produced = list()
helpers.open_exchanges(model)
for met in model.metabolites:
with model:
exch = model.add_boundary(
met, type="irrex", reaction_id="IRREX", lb=0, ub=1000)
solution = helpers.run_fba(model, exch.id)
if np.isnan(solution) or solution < TOLERANCE_THRESHOLD:
mets_not_produced.append(met)
return mets_not_produced | ['def', 'find_metabolites_not_produced_with_open_bounds', '(', 'model', ')', ':', 'mets_not_produced', '=', 'list', '(', ')', 'helpers', '.', 'open_exchanges', '(', 'model', ')', 'for', 'met', 'in', 'model', '.', 'metabolites', ':', 'with', 'model', ':', 'exch', '=', 'model', '.', 'add_boundary', '(', 'met', ',', 'type', '=', '"irrex"', ',', 'reaction_id', '=', '"IRREX"', ',', 'lb', '=', '0', ',', 'ub', '=', '1000', ')', 'solution', '=', 'helpers', '.', 'run_fba', '(', 'model', ',', 'exch', '.', 'id', ')', 'if', 'np', '.', 'isnan', '(', 'solution', ')', 'or', 'solution', '<', 'TOLERANCE_THRESHOLD', ':', 'mets_not_produced', '.', 'append', '(', 'met', ')', 'return', 'mets_not_produced'] | Return metabolites that cannot be produced with open exchange reactions.
A perfect model should be able to produce each and every metabolite when
all medium components are available.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
Those metabolites that could not be produced. | ['Return', 'metabolites', 'that', 'cannot', 'be', 'produced', 'with', 'open', 'exchange', 'reactions', '.'] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency.py#L493-L520 |
6,745 | ic-labs/django-icekit | icekit/publishing/models.py | PublishingModel.clone_parler_translations | def clone_parler_translations(self, dst_obj):
"""
Clone each of the translations from an object and relate
them to another.
:param self: The object to get the translations from.
:param dst_obj: The object to relate the new translations to.
:return: None
"""
# Find all django-parler translation attributes on model
translation_attrs = []
if hasattr(self, '_parler_meta'):
for parler_meta in self._parler_meta:
translation_attrs.append(parler_meta.rel_name)
# Clone all django-parler translations via attributes
for translation_attr in translation_attrs:
# Clear any translations already cloned to published object
# before we get here, which seems to happen via deepcopy()
# sometimes.
setattr(dst_obj, translation_attr, [])
# Clone attribute's translations from source to destination
for translation in getattr(self, translation_attr).all():
translation.pk = None
translation.master = dst_obj
translation.save() | python | def clone_parler_translations(self, dst_obj):
"""
Clone each of the translations from an object and relate
them to another.
:param self: The object to get the translations from.
:param dst_obj: The object to relate the new translations to.
:return: None
"""
# Find all django-parler translation attributes on model
translation_attrs = []
if hasattr(self, '_parler_meta'):
for parler_meta in self._parler_meta:
translation_attrs.append(parler_meta.rel_name)
# Clone all django-parler translations via attributes
for translation_attr in translation_attrs:
# Clear any translations already cloned to published object
# before we get here, which seems to happen via deepcopy()
# sometimes.
setattr(dst_obj, translation_attr, [])
# Clone attribute's translations from source to destination
for translation in getattr(self, translation_attr).all():
translation.pk = None
translation.master = dst_obj
translation.save() | ['def', 'clone_parler_translations', '(', 'self', ',', 'dst_obj', ')', ':', '# Find all django-parler translation attributes on model', 'translation_attrs', '=', '[', ']', 'if', 'hasattr', '(', 'self', ',', "'_parler_meta'", ')', ':', 'for', 'parler_meta', 'in', 'self', '.', '_parler_meta', ':', 'translation_attrs', '.', 'append', '(', 'parler_meta', '.', 'rel_name', ')', '# Clone all django-parler translations via attributes', 'for', 'translation_attr', 'in', 'translation_attrs', ':', '# Clear any translations already cloned to published object', '# before we get here, which seems to happen via deepcopy()', '# sometimes.', 'setattr', '(', 'dst_obj', ',', 'translation_attr', ',', '[', ']', ')', "# Clone attribute's translations from source to destination", 'for', 'translation', 'in', 'getattr', '(', 'self', ',', 'translation_attr', ')', '.', 'all', '(', ')', ':', 'translation', '.', 'pk', '=', 'None', 'translation', '.', 'master', '=', 'dst_obj', 'translation', '.', 'save', '(', ')'] | Clone each of the translations from an object and relate
them to another.
:param self: The object to get the translations from.
:param dst_obj: The object to relate the new translations to.
:return: None | ['Clone', 'each', 'of', 'the', 'translations', 'from', 'an', 'object', 'and', 'relate', 'them', 'to', 'another', '.', ':', 'param', 'self', ':', 'The', 'object', 'to', 'get', 'the', 'translations', 'from', '.', ':', 'param', 'dst_obj', ':', 'The', 'object', 'to', 'relate', 'the', 'new', 'translations', 'to', '.', ':', 'return', ':', 'None'] | train | https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/publishing/models.py#L570-L593 |
6,746 | Microsoft/nni | examples/trials/ga_squad/graph.py | Graph.mutation | def mutation(self, only_add=False):
'''
Mutation for a graph
'''
types = []
if self.layer_num() < self.max_layer_num:
types.append(0)
types.append(1)
if self.layer_num() > 5 and only_add is False:
types.append(2)
types.append(3)
# 0 : add a layer , delete a edge
# 1 : add a layer , change a edge
# 2 : delete a layer, delete a edge
# 3 : delete a layer, change a edge
graph_type = random.choice(types)
layer_type = random.choice([LayerType.attention.value,\
LayerType.self_attention.value, LayerType.rnn.value])
layers = copy.deepcopy(self.layers)
cnt_try = 0
while True:
layers_in = []
layers_out = []
layers_del = []
for i, layer in enumerate(layers):
if layer.is_delete is False:
if layer.graph_type != LayerType.output.value:
layers_in.append(i)
if layer.graph_type != LayerType.input.value:
layers_out.append(i)
if layer.graph_type != LayerType.output.value\
and layer.graph_type != LayerType.input.value:
layers_del.append(i)
if graph_type <= 1:
new_id = len(layers)
out = random.choice(layers_out)
inputs = []
output = [out]
pos = random.randint(0, len(layers[out].input) - 1)
last_in = layers[out].input[pos]
layers[out].input[pos] = new_id
if graph_type == 0:
layers[last_in].output.remove(out)
if graph_type == 1:
layers[last_in].output.remove(out)
layers[last_in].output.append(new_id)
inputs = [last_in]
lay = Layer(graph_type=layer_type, inputs=inputs, output=output)
while len(inputs) < lay.input_size:
layer1 = random.choice(layers_in)
inputs.append(layer1)
layers[layer1].output.append(new_id)
lay.input = inputs
layers.append(lay)
else:
layer1 = random.choice(layers_del)
for layer2 in layers[layer1].output:
layers[layer2].input.remove(layer1)
if graph_type == 2:
random_in = random.choice(layers_in)
else:
random_in = random.choice(layers[layer1].input)
layers[layer2].input.append(random_in)
layers[random_in].output.append(layer2)
for layer2 in layers[layer1].input:
layers[layer2].output.remove(layer1)
layers[layer1].is_delete = True
if self.is_legal(layers):
self.layers = layers
break
else:
layers = copy.deepcopy(self.layers)
cnt_try += 1 | python | def mutation(self, only_add=False):
'''
Mutation for a graph
'''
types = []
if self.layer_num() < self.max_layer_num:
types.append(0)
types.append(1)
if self.layer_num() > 5 and only_add is False:
types.append(2)
types.append(3)
# 0 : add a layer , delete a edge
# 1 : add a layer , change a edge
# 2 : delete a layer, delete a edge
# 3 : delete a layer, change a edge
graph_type = random.choice(types)
layer_type = random.choice([LayerType.attention.value,\
LayerType.self_attention.value, LayerType.rnn.value])
layers = copy.deepcopy(self.layers)
cnt_try = 0
while True:
layers_in = []
layers_out = []
layers_del = []
for i, layer in enumerate(layers):
if layer.is_delete is False:
if layer.graph_type != LayerType.output.value:
layers_in.append(i)
if layer.graph_type != LayerType.input.value:
layers_out.append(i)
if layer.graph_type != LayerType.output.value\
and layer.graph_type != LayerType.input.value:
layers_del.append(i)
if graph_type <= 1:
new_id = len(layers)
out = random.choice(layers_out)
inputs = []
output = [out]
pos = random.randint(0, len(layers[out].input) - 1)
last_in = layers[out].input[pos]
layers[out].input[pos] = new_id
if graph_type == 0:
layers[last_in].output.remove(out)
if graph_type == 1:
layers[last_in].output.remove(out)
layers[last_in].output.append(new_id)
inputs = [last_in]
lay = Layer(graph_type=layer_type, inputs=inputs, output=output)
while len(inputs) < lay.input_size:
layer1 = random.choice(layers_in)
inputs.append(layer1)
layers[layer1].output.append(new_id)
lay.input = inputs
layers.append(lay)
else:
layer1 = random.choice(layers_del)
for layer2 in layers[layer1].output:
layers[layer2].input.remove(layer1)
if graph_type == 2:
random_in = random.choice(layers_in)
else:
random_in = random.choice(layers[layer1].input)
layers[layer2].input.append(random_in)
layers[random_in].output.append(layer2)
for layer2 in layers[layer1].input:
layers[layer2].output.remove(layer1)
layers[layer1].is_delete = True
if self.is_legal(layers):
self.layers = layers
break
else:
layers = copy.deepcopy(self.layers)
cnt_try += 1 | ['def', 'mutation', '(', 'self', ',', 'only_add', '=', 'False', ')', ':', 'types', '=', '[', ']', 'if', 'self', '.', 'layer_num', '(', ')', '<', 'self', '.', 'max_layer_num', ':', 'types', '.', 'append', '(', '0', ')', 'types', '.', 'append', '(', '1', ')', 'if', 'self', '.', 'layer_num', '(', ')', '>', '5', 'and', 'only_add', 'is', 'False', ':', 'types', '.', 'append', '(', '2', ')', 'types', '.', 'append', '(', '3', ')', '# 0 : add a layer , delete a edge', '# 1 : add a layer , change a edge', '# 2 : delete a layer, delete a edge', '# 3 : delete a layer, change a edge', 'graph_type', '=', 'random', '.', 'choice', '(', 'types', ')', 'layer_type', '=', 'random', '.', 'choice', '(', '[', 'LayerType', '.', 'attention', '.', 'value', ',', 'LayerType', '.', 'self_attention', '.', 'value', ',', 'LayerType', '.', 'rnn', '.', 'value', ']', ')', 'layers', '=', 'copy', '.', 'deepcopy', '(', 'self', '.', 'layers', ')', 'cnt_try', '=', '0', 'while', 'True', ':', 'layers_in', '=', '[', ']', 'layers_out', '=', '[', ']', 'layers_del', '=', '[', ']', 'for', 'i', ',', 'layer', 'in', 'enumerate', '(', 'layers', ')', ':', 'if', 'layer', '.', 'is_delete', 'is', 'False', ':', 'if', 'layer', '.', 'graph_type', '!=', 'LayerType', '.', 'output', '.', 'value', ':', 'layers_in', '.', 'append', '(', 'i', ')', 'if', 'layer', '.', 'graph_type', '!=', 'LayerType', '.', 'input', '.', 'value', ':', 'layers_out', '.', 'append', '(', 'i', ')', 'if', 'layer', '.', 'graph_type', '!=', 'LayerType', '.', 'output', '.', 'value', 'and', 'layer', '.', 'graph_type', '!=', 'LayerType', '.', 'input', '.', 'value', ':', 'layers_del', '.', 'append', '(', 'i', ')', 'if', 'graph_type', '<=', '1', ':', 'new_id', '=', 'len', '(', 'layers', ')', 'out', '=', 'random', '.', 'choice', '(', 'layers_out', ')', 'inputs', '=', '[', ']', 'output', '=', '[', 'out', ']', 'pos', '=', 'random', '.', 'randint', '(', '0', ',', 'len', '(', 'layers', '[', 'out', ']', '.', 'input', ')', '-', '1', ')', 'last_in', '=', 'layers', '[', 'out', ']', '.', 'input', '[', 'pos', ']', 'layers', '[', 'out', ']', '.', 'input', '[', 'pos', ']', '=', 'new_id', 'if', 'graph_type', '==', '0', ':', 'layers', '[', 'last_in', ']', '.', 'output', '.', 'remove', '(', 'out', ')', 'if', 'graph_type', '==', '1', ':', 'layers', '[', 'last_in', ']', '.', 'output', '.', 'remove', '(', 'out', ')', 'layers', '[', 'last_in', ']', '.', 'output', '.', 'append', '(', 'new_id', ')', 'inputs', '=', '[', 'last_in', ']', 'lay', '=', 'Layer', '(', 'graph_type', '=', 'layer_type', ',', 'inputs', '=', 'inputs', ',', 'output', '=', 'output', ')', 'while', 'len', '(', 'inputs', ')', '<', 'lay', '.', 'input_size', ':', 'layer1', '=', 'random', '.', 'choice', '(', 'layers_in', ')', 'inputs', '.', 'append', '(', 'layer1', ')', 'layers', '[', 'layer1', ']', '.', 'output', '.', 'append', '(', 'new_id', ')', 'lay', '.', 'input', '=', 'inputs', 'layers', '.', 'append', '(', 'lay', ')', 'else', ':', 'layer1', '=', 'random', '.', 'choice', '(', 'layers_del', ')', 'for', 'layer2', 'in', 'layers', '[', 'layer1', ']', '.', 'output', ':', 'layers', '[', 'layer2', ']', '.', 'input', '.', 'remove', '(', 'layer1', ')', 'if', 'graph_type', '==', '2', ':', 'random_in', '=', 'random', '.', 'choice', '(', 'layers_in', ')', 'else', ':', 'random_in', '=', 'random', '.', 'choice', '(', 'layers', '[', 'layer1', ']', '.', 'input', ')', 'layers', '[', 'layer2', ']', '.', 'input', '.', 'append', '(', 'random_in', ')', 'layers', '[', 'random_in', ']', '.', 'output', '.', 'append', '(', 'layer2', ')', 'for', 'layer2', 'in', 'layers', '[', 'layer1', ']', '.', 'input', ':', 'layers', '[', 'layer2', ']', '.', 'output', '.', 'remove', '(', 'layer1', ')', 'layers', '[', 'layer1', ']', '.', 'is_delete', '=', 'True', 'if', 'self', '.', 'is_legal', '(', 'layers', ')', ':', 'self', '.', 'layers', '=', 'layers', 'break', 'else', ':', 'layers', '=', 'copy', '.', 'deepcopy', '(', 'self', '.', 'layers', ')', 'cnt_try', '+=', '1'] | Mutation for a graph | ['Mutation', 'for', 'a', 'graph'] | train | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/ga_squad/graph.py#L207-L280 |
6,747 | OzymandiasTheGreat/python-libinput | libinput/event.py | TabletPadEvent.ring_position | def ring_position(self):
"""The current position of the ring, in degrees
counterclockwise from the northern-most point of the ring in
the tablet's current logical orientation.
If the source is
:attr:`~libinput.constant.TabletPadRingAxisSource.FINGER`,
libinput sends a terminating event with a ring value of -1 when
the finger is lifted from the ring. A caller may use this information
to e.g. determine if kinetic scrolling should be triggered.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_RING`, this property
raises :exc:`AttributeError`.
Returns:
float: The current value of the the axis. -1 if the finger was
lifted.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_RING:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_ring_position(
self._handle) | python | def ring_position(self):
"""The current position of the ring, in degrees
counterclockwise from the northern-most point of the ring in
the tablet's current logical orientation.
If the source is
:attr:`~libinput.constant.TabletPadRingAxisSource.FINGER`,
libinput sends a terminating event with a ring value of -1 when
the finger is lifted from the ring. A caller may use this information
to e.g. determine if kinetic scrolling should be triggered.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_RING`, this property
raises :exc:`AttributeError`.
Returns:
float: The current value of the the axis. -1 if the finger was
lifted.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_RING:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_ring_position(
self._handle) | ['def', 'ring_position', '(', 'self', ')', ':', 'if', 'self', '.', 'type', '!=', 'EventType', '.', 'TABLET_PAD_RING', ':', 'raise', 'AttributeError', '(', '_wrong_prop', '.', 'format', '(', 'self', '.', 'type', ')', ')', 'return', 'self', '.', '_libinput', '.', 'libinput_event_tablet_pad_get_ring_position', '(', 'self', '.', '_handle', ')'] | The current position of the ring, in degrees
counterclockwise from the northern-most point of the ring in
the tablet's current logical orientation.
If the source is
:attr:`~libinput.constant.TabletPadRingAxisSource.FINGER`,
libinput sends a terminating event with a ring value of -1 when
the finger is lifted from the ring. A caller may use this information
to e.g. determine if kinetic scrolling should be triggered.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_RING`, this property
raises :exc:`AttributeError`.
Returns:
float: The current value of the the axis. -1 if the finger was
lifted.
Raises:
AttributeError | ['The', 'current', 'position', 'of', 'the', 'ring', 'in', 'degrees', 'counterclockwise', 'from', 'the', 'northern', '-', 'most', 'point', 'of', 'the', 'ring', 'in', 'the', 'tablet', 's', 'current', 'logical', 'orientation', '.'] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L1445-L1470 |
6,748 | cackharot/suds-py3 | suds/bindings/binding.py | Binding.get_fault | def get_fault(self, reply):
"""
Extract the fault from the specified soap reply. If I{faults} is True,
an exception is raised. Otherwise, the I{unmarshalled} fault L{Object}
is returned. This method is called when the server raises a
I{web fault}.
@param reply: A soap reply message.
@type reply: str
@return: A fault object.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
faultroot = sax.parse(string=reply)
soapenv = faultroot.getChild('Envelope')
soapbody = soapenv.getChild('Body')
fault = soapbody.getChild('Fault')
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, faultroot)
return (faultroot, p.detail) | python | def get_fault(self, reply):
"""
Extract the fault from the specified soap reply. If I{faults} is True,
an exception is raised. Otherwise, the I{unmarshalled} fault L{Object}
is returned. This method is called when the server raises a
I{web fault}.
@param reply: A soap reply message.
@type reply: str
@return: A fault object.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
faultroot = sax.parse(string=reply)
soapenv = faultroot.getChild('Envelope')
soapbody = soapenv.getChild('Body')
fault = soapbody.getChild('Fault')
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, faultroot)
return (faultroot, p.detail) | ['def', 'get_fault', '(', 'self', ',', 'reply', ')', ':', 'reply', '=', 'self', '.', 'replyfilter', '(', 'reply', ')', 'sax', '=', 'Parser', '(', ')', 'faultroot', '=', 'sax', '.', 'parse', '(', 'string', '=', 'reply', ')', 'soapenv', '=', 'faultroot', '.', 'getChild', '(', "'Envelope'", ')', 'soapbody', '=', 'soapenv', '.', 'getChild', '(', "'Body'", ')', 'fault', '=', 'soapbody', '.', 'getChild', '(', "'Fault'", ')', 'unmarshaller', '=', 'self', '.', 'unmarshaller', '(', 'False', ')', 'p', '=', 'unmarshaller', '.', 'process', '(', 'fault', ')', 'if', 'self', '.', 'options', '(', ')', '.', 'faults', ':', 'raise', 'WebFault', '(', 'p', ',', 'faultroot', ')', 'return', '(', 'faultroot', ',', 'p', '.', 'detail', ')'] | Extract the fault from the specified soap reply. If I{faults} is True,
an exception is raised. Otherwise, the I{unmarshalled} fault L{Object}
is returned. This method is called when the server raises a
I{web fault}.
@param reply: A soap reply message.
@type reply: str
@return: A fault object.
@rtype: tuple ( L{Element}, L{Object} ) | ['Extract', 'the', 'fault', 'from', 'the', 'specified', 'soap', 'reply', '.', 'If', 'I', '{', 'faults', '}', 'is', 'True', 'an', 'exception', 'is', 'raised', '.', 'Otherwise', 'the', 'I', '{', 'unmarshalled', '}', 'fault', 'L', '{', 'Object', '}', 'is', 'returned', '.', 'This', 'method', 'is', 'called', 'when', 'the', 'server', 'raises', 'a', 'I', '{', 'web', 'fault', '}', '.'] | train | https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/bindings/binding.py#L247-L268 |
6,749 | DataDog/integrations-core | redisdb/datadog_checks/redisdb/redisdb.py | Redis._check_command_stats | def _check_command_stats(self, conn, tags):
"""Get command-specific statistics from redis' INFO COMMANDSTATS command
"""
try:
command_stats = conn.info("commandstats")
except Exception:
self.warning('Could not retrieve command stats from Redis. INFO COMMANDSTATS only works with Redis >= 2.6.')
return
for key, stats in iteritems(command_stats):
command = key.split('_', 1)[1]
command_tags = tags + ['command:{}'.format(command)]
# When `host:` is passed as a command, `calls` ends up having a leading `:`
# see https://github.com/DataDog/integrations-core/issues/839
calls = stats.get('calls') if command != 'host' else stats.get(':calls')
self.gauge('redis.command.calls', calls, tags=command_tags)
self.gauge('redis.command.usec_per_call', stats['usec_per_call'], tags=command_tags) | python | def _check_command_stats(self, conn, tags):
"""Get command-specific statistics from redis' INFO COMMANDSTATS command
"""
try:
command_stats = conn.info("commandstats")
except Exception:
self.warning('Could not retrieve command stats from Redis. INFO COMMANDSTATS only works with Redis >= 2.6.')
return
for key, stats in iteritems(command_stats):
command = key.split('_', 1)[1]
command_tags = tags + ['command:{}'.format(command)]
# When `host:` is passed as a command, `calls` ends up having a leading `:`
# see https://github.com/DataDog/integrations-core/issues/839
calls = stats.get('calls') if command != 'host' else stats.get(':calls')
self.gauge('redis.command.calls', calls, tags=command_tags)
self.gauge('redis.command.usec_per_call', stats['usec_per_call'], tags=command_tags) | ['def', '_check_command_stats', '(', 'self', ',', 'conn', ',', 'tags', ')', ':', 'try', ':', 'command_stats', '=', 'conn', '.', 'info', '(', '"commandstats"', ')', 'except', 'Exception', ':', 'self', '.', 'warning', '(', "'Could not retrieve command stats from Redis. INFO COMMANDSTATS only works with Redis >= 2.6.'", ')', 'return', 'for', 'key', ',', 'stats', 'in', 'iteritems', '(', 'command_stats', ')', ':', 'command', '=', 'key', '.', 'split', '(', "'_'", ',', '1', ')', '[', '1', ']', 'command_tags', '=', 'tags', '+', '[', "'command:{}'", '.', 'format', '(', 'command', ')', ']', '# When `host:` is passed as a command, `calls` ends up having a leading `:`', '# see https://github.com/DataDog/integrations-core/issues/839', 'calls', '=', 'stats', '.', 'get', '(', "'calls'", ')', 'if', 'command', '!=', "'host'", 'else', 'stats', '.', 'get', '(', "':calls'", ')', 'self', '.', 'gauge', '(', "'redis.command.calls'", ',', 'calls', ',', 'tags', '=', 'command_tags', ')', 'self', '.', 'gauge', '(', "'redis.command.usec_per_call'", ',', 'stats', '[', "'usec_per_call'", ']', ',', 'tags', '=', 'command_tags', ')'] | Get command-specific statistics from redis' INFO COMMANDSTATS command | ['Get', 'command', '-', 'specific', 'statistics', 'from', 'redis', 'INFO', 'COMMANDSTATS', 'command'] | train | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/redisdb/datadog_checks/redisdb/redisdb.py#L426-L444 |
6,750 | nion-software/nionswift | nion/swift/FilterPanel.py | TreeNode.insert_value | def insert_value(self, keys, value):
"""
Insert a value (data item) into this tree node and then its
children. This will be called in response to a new data item being
inserted into the document. Also updates the tree node's cumulative
child count.
"""
self.count += 1
if not self.key:
self.__value_reverse_mapping[value] = keys
if len(keys) == 0:
self.values.append(value)
else:
key = keys[0]
index = bisect.bisect_left(self.children, TreeNode(key, reversed=self.reversed))
if index == len(self.children) or self.children[index].key != key:
new_tree_node = TreeNode(key, list(), reversed=self.reversed)
new_tree_node.child_inserted = self.child_inserted
new_tree_node.child_removed = self.child_removed
new_tree_node.tree_node_updated = self.tree_node_updated
new_tree_node.__set_parent(self)
self.children.insert(index, new_tree_node)
if self.child_inserted:
self.child_inserted(self, index, new_tree_node)
child = self.children[index]
child.insert_value(keys[1:], value)
if self.tree_node_updated:
self.tree_node_updated(child) | python | def insert_value(self, keys, value):
"""
Insert a value (data item) into this tree node and then its
children. This will be called in response to a new data item being
inserted into the document. Also updates the tree node's cumulative
child count.
"""
self.count += 1
if not self.key:
self.__value_reverse_mapping[value] = keys
if len(keys) == 0:
self.values.append(value)
else:
key = keys[0]
index = bisect.bisect_left(self.children, TreeNode(key, reversed=self.reversed))
if index == len(self.children) or self.children[index].key != key:
new_tree_node = TreeNode(key, list(), reversed=self.reversed)
new_tree_node.child_inserted = self.child_inserted
new_tree_node.child_removed = self.child_removed
new_tree_node.tree_node_updated = self.tree_node_updated
new_tree_node.__set_parent(self)
self.children.insert(index, new_tree_node)
if self.child_inserted:
self.child_inserted(self, index, new_tree_node)
child = self.children[index]
child.insert_value(keys[1:], value)
if self.tree_node_updated:
self.tree_node_updated(child) | ['def', 'insert_value', '(', 'self', ',', 'keys', ',', 'value', ')', ':', 'self', '.', 'count', '+=', '1', 'if', 'not', 'self', '.', 'key', ':', 'self', '.', '__value_reverse_mapping', '[', 'value', ']', '=', 'keys', 'if', 'len', '(', 'keys', ')', '==', '0', ':', 'self', '.', 'values', '.', 'append', '(', 'value', ')', 'else', ':', 'key', '=', 'keys', '[', '0', ']', 'index', '=', 'bisect', '.', 'bisect_left', '(', 'self', '.', 'children', ',', 'TreeNode', '(', 'key', ',', 'reversed', '=', 'self', '.', 'reversed', ')', ')', 'if', 'index', '==', 'len', '(', 'self', '.', 'children', ')', 'or', 'self', '.', 'children', '[', 'index', ']', '.', 'key', '!=', 'key', ':', 'new_tree_node', '=', 'TreeNode', '(', 'key', ',', 'list', '(', ')', ',', 'reversed', '=', 'self', '.', 'reversed', ')', 'new_tree_node', '.', 'child_inserted', '=', 'self', '.', 'child_inserted', 'new_tree_node', '.', 'child_removed', '=', 'self', '.', 'child_removed', 'new_tree_node', '.', 'tree_node_updated', '=', 'self', '.', 'tree_node_updated', 'new_tree_node', '.', '__set_parent', '(', 'self', ')', 'self', '.', 'children', '.', 'insert', '(', 'index', ',', 'new_tree_node', ')', 'if', 'self', '.', 'child_inserted', ':', 'self', '.', 'child_inserted', '(', 'self', ',', 'index', ',', 'new_tree_node', ')', 'child', '=', 'self', '.', 'children', '[', 'index', ']', 'child', '.', 'insert_value', '(', 'keys', '[', '1', ':', ']', ',', 'value', ')', 'if', 'self', '.', 'tree_node_updated', ':', 'self', '.', 'tree_node_updated', '(', 'child', ')'] | Insert a value (data item) into this tree node and then its
children. This will be called in response to a new data item being
inserted into the document. Also updates the tree node's cumulative
child count. | ['Insert', 'a', 'value', '(', 'data', 'item', ')', 'into', 'this', 'tree', 'node', 'and', 'then', 'its', 'children', '.', 'This', 'will', 'be', 'called', 'in', 'response', 'to', 'a', 'new', 'data', 'item', 'being', 'inserted', 'into', 'the', 'document', '.', 'Also', 'updates', 'the', 'tree', 'node', 's', 'cumulative', 'child', 'count', '.'] | train | https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/FilterPanel.py#L358-L385 |
6,751 | mitsei/dlkit | dlkit/json_/learning/sessions.py | ActivityObjectiveBankAssignmentSession.unassign_activity_from_objective_bank | def unassign_activity_from_objective_bank(self, activity_id, objective_bank_id):
"""Removes a ``Activity`` from a ``ObjectiveBank``.
arg: activity_id (osid.id.Id): the ``Id`` of the ``Activity``
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
``ObjectiveBank``
raise: NotFound - ``activity_id`` or ``objective_bank_id`` not
found or ``activity_id`` not mapped to
``objective_bank_id``
raise: NullArgument - ``activity_id`` or ``objective_bank_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin
mgr = self._get_provider_manager('LEARNING', local=True)
lookup_session = mgr.get_objective_bank_lookup_session(proxy=self._proxy)
lookup_session.get_objective_bank(objective_bank_id) # to raise NotFound
self._unassign_object_from_catalog(activity_id, objective_bank_id) | python | def unassign_activity_from_objective_bank(self, activity_id, objective_bank_id):
"""Removes a ``Activity`` from a ``ObjectiveBank``.
arg: activity_id (osid.id.Id): the ``Id`` of the ``Activity``
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
``ObjectiveBank``
raise: NotFound - ``activity_id`` or ``objective_bank_id`` not
found or ``activity_id`` not mapped to
``objective_bank_id``
raise: NullArgument - ``activity_id`` or ``objective_bank_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin
mgr = self._get_provider_manager('LEARNING', local=True)
lookup_session = mgr.get_objective_bank_lookup_session(proxy=self._proxy)
lookup_session.get_objective_bank(objective_bank_id) # to raise NotFound
self._unassign_object_from_catalog(activity_id, objective_bank_id) | ['def', 'unassign_activity_from_objective_bank', '(', 'self', ',', 'activity_id', ',', 'objective_bank_id', ')', ':', '# Implemented from template for', '# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin', 'mgr', '=', 'self', '.', '_get_provider_manager', '(', "'LEARNING'", ',', 'local', '=', 'True', ')', 'lookup_session', '=', 'mgr', '.', 'get_objective_bank_lookup_session', '(', 'proxy', '=', 'self', '.', '_proxy', ')', 'lookup_session', '.', 'get_objective_bank', '(', 'objective_bank_id', ')', '# to raise NotFound', 'self', '.', '_unassign_object_from_catalog', '(', 'activity_id', ',', 'objective_bank_id', ')'] | Removes a ``Activity`` from a ``ObjectiveBank``.
arg: activity_id (osid.id.Id): the ``Id`` of the ``Activity``
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
``ObjectiveBank``
raise: NotFound - ``activity_id`` or ``objective_bank_id`` not
found or ``activity_id`` not mapped to
``objective_bank_id``
raise: NullArgument - ``activity_id`` or ``objective_bank_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | ['Removes', 'a', 'Activity', 'from', 'a', 'ObjectiveBank', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/sessions.py#L3752-L3773 |
6,752 | thespacedoctor/rockAtlas | rockAtlas/phot/dophotMatch.py | dophotMatch._select_exposures_requiring_dophot_extraction | def _select_exposures_requiring_dophot_extraction(
self,
batch=10):
"""* select exposures requiring dophot extraction*
**Key Arguments:**
- ``batch`` -- the batch size of dophot file to process
**Return:**
- ``expnames`` -- the names of the expsoures in the batch
- ``remaining`` -- the number of exposured remainging that require orbfit/dophot crossmatching
"""
self.log.info(
'starting the ``_select_exposures_requiring_dophot_extraction`` method')
sqlQuery = u"""
select expname, floor(mjd) as mjd from atlas_exposures where local_data = 1 and dophot_match = 0 and orbfit_positions = 1;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlasMoversDBConn,
quiet=False
)
remaining = len(rows)
expnames = []
expnames[:] = [(r["expname"], int(r["mjd"])) for r in rows[:batch]]
self.log.info(
'completed the ``_select_exposures_requiring_dophot_extraction`` method')
return expnames, remaining | python | def _select_exposures_requiring_dophot_extraction(
self,
batch=10):
"""* select exposures requiring dophot extraction*
**Key Arguments:**
- ``batch`` -- the batch size of dophot file to process
**Return:**
- ``expnames`` -- the names of the expsoures in the batch
- ``remaining`` -- the number of exposured remainging that require orbfit/dophot crossmatching
"""
self.log.info(
'starting the ``_select_exposures_requiring_dophot_extraction`` method')
sqlQuery = u"""
select expname, floor(mjd) as mjd from atlas_exposures where local_data = 1 and dophot_match = 0 and orbfit_positions = 1;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlasMoversDBConn,
quiet=False
)
remaining = len(rows)
expnames = []
expnames[:] = [(r["expname"], int(r["mjd"])) for r in rows[:batch]]
self.log.info(
'completed the ``_select_exposures_requiring_dophot_extraction`` method')
return expnames, remaining | ['def', '_select_exposures_requiring_dophot_extraction', '(', 'self', ',', 'batch', '=', '10', ')', ':', 'self', '.', 'log', '.', 'info', '(', "'starting the ``_select_exposures_requiring_dophot_extraction`` method'", ')', 'sqlQuery', '=', 'u"""\n select expname, floor(mjd) as mjd from atlas_exposures where local_data = 1 and dophot_match = 0 and orbfit_positions = 1;\n """', '%', 'locals', '(', ')', 'rows', '=', 'readquery', '(', 'log', '=', 'self', '.', 'log', ',', 'sqlQuery', '=', 'sqlQuery', ',', 'dbConn', '=', 'self', '.', 'atlasMoversDBConn', ',', 'quiet', '=', 'False', ')', 'remaining', '=', 'len', '(', 'rows', ')', 'expnames', '=', '[', ']', 'expnames', '[', ':', ']', '=', '[', '(', 'r', '[', '"expname"', ']', ',', 'int', '(', 'r', '[', '"mjd"', ']', ')', ')', 'for', 'r', 'in', 'rows', '[', ':', 'batch', ']', ']', 'self', '.', 'log', '.', 'info', '(', "'completed the ``_select_exposures_requiring_dophot_extraction`` method'", ')', 'return', 'expnames', ',', 'remaining'] | * select exposures requiring dophot extraction*
**Key Arguments:**
- ``batch`` -- the batch size of dophot file to process
**Return:**
- ``expnames`` -- the names of the expsoures in the batch
- ``remaining`` -- the number of exposured remainging that require orbfit/dophot crossmatching | ['*', 'select', 'exposures', 'requiring', 'dophot', 'extraction', '*'] | train | https://github.com/thespacedoctor/rockAtlas/blob/062ecaa95ab547efda535aa33165944f13c621de/rockAtlas/phot/dophotMatch.py#L119-L151 |
6,753 | tino/pyFirmata | pyfirmata/util.py | str_to_two_byte_iter | def str_to_two_byte_iter(string):
"""
Return a iter consisting of two byte chars from a string.
"""
bstring = string.encode()
bytes = bytearray()
for char in bstring:
bytes.append(char)
bytes.append(0)
return bytes | python | def str_to_two_byte_iter(string):
"""
Return a iter consisting of two byte chars from a string.
"""
bstring = string.encode()
bytes = bytearray()
for char in bstring:
bytes.append(char)
bytes.append(0)
return bytes | ['def', 'str_to_two_byte_iter', '(', 'string', ')', ':', 'bstring', '=', 'string', '.', 'encode', '(', ')', 'bytes', '=', 'bytearray', '(', ')', 'for', 'char', 'in', 'bstring', ':', 'bytes', '.', 'append', '(', 'char', ')', 'bytes', '.', 'append', '(', '0', ')', 'return', 'bytes'] | Return a iter consisting of two byte chars from a string. | ['Return', 'a', 'iter', 'consisting', 'of', 'two', 'byte', 'chars', 'from', 'a', 'string', '.'] | train | https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/util.py#L124-L133 |
6,754 | ArchiveTeam/wpull | wpull/protocol/http/robots.py | RobotsTxtChecker.can_fetch | def can_fetch(self, request: Request, file=None) -> bool:
'''Return whether the request can fetched.
Args:
request: Request.
file: A file object to where the robots.txt contents are written.
Coroutine.
'''
try:
return self.can_fetch_pool(request)
except NotInPoolError:
pass
yield from self.fetch_robots_txt(request, file=file)
return self.can_fetch_pool(request) | python | def can_fetch(self, request: Request, file=None) -> bool:
'''Return whether the request can fetched.
Args:
request: Request.
file: A file object to where the robots.txt contents are written.
Coroutine.
'''
try:
return self.can_fetch_pool(request)
except NotInPoolError:
pass
yield from self.fetch_robots_txt(request, file=file)
return self.can_fetch_pool(request) | ['def', 'can_fetch', '(', 'self', ',', 'request', ':', 'Request', ',', 'file', '=', 'None', ')', '->', 'bool', ':', 'try', ':', 'return', 'self', '.', 'can_fetch_pool', '(', 'request', ')', 'except', 'NotInPoolError', ':', 'pass', 'yield', 'from', 'self', '.', 'fetch_robots_txt', '(', 'request', ',', 'file', '=', 'file', ')', 'return', 'self', '.', 'can_fetch_pool', '(', 'request', ')'] | Return whether the request can fetched.
Args:
request: Request.
file: A file object to where the robots.txt contents are written.
Coroutine. | ['Return', 'whether', 'the', 'request', 'can', 'fetched', '.'] | train | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/protocol/http/robots.py#L99-L115 |
6,755 | litl/backoff | backoff/_decorator.py | on_predicate | def on_predicate(wait_gen,
predicate=operator.not_,
max_tries=None,
max_time=None,
jitter=full_jitter,
on_success=None,
on_backoff=None,
on_giveup=None,
logger='backoff',
**wait_gen_kwargs):
"""Returns decorator for backoff and retry triggered by predicate.
Args:
wait_gen: A generator yielding successive wait times in
seconds.
predicate: A function which when called on the return value of
the target function will trigger backoff when considered
truthily. If not specified, the default behavior is to
backoff on falsey return values.
max_tries: The maximum number of attempts to make before giving
up. In the case of failure, the result of the last attempt
will be returned. The default value of None means there
is no limit to the number of tries. If a callable is passed,
it will be evaluated at runtime and its return value used.
max_time: The maximum total amount of time to try for before
giving up. If this time expires, the result of the last
attempt will be returned. If a callable is passed, it will
be evaluated at runtime and its return value used.
jitter: A function of the value yielded by wait_gen returning
the actual time to wait. This distributes wait times
stochastically in order to avoid timing collisions across
concurrent clients. Wait times are jittered by default
using the full_jitter function. Jittering may be disabled
altogether by passing jitter=None.
on_success: Callable (or iterable of callables) with a unary
signature to be called in the event of success. The
parameter is a dict containing details about the invocation.
on_backoff: Callable (or iterable of callables) with a unary
signature to be called in the event of a backoff. The
parameter is a dict containing details about the invocation.
on_giveup: Callable (or iterable of callables) with a unary
signature to be called in the event that max_tries
is exceeded. The parameter is a dict containing details
about the invocation.
logger: Name of logger or Logger object to log to. Defaults to
'backoff'.
**wait_gen_kwargs: Any additional keyword args specified will be
passed to wait_gen when it is initialized. Any callable
args will first be evaluated and their return values passed.
This is useful for runtime configuration.
"""
def decorate(target):
# change names because python 2.x doesn't have nonlocal
logger_ = logger
if isinstance(logger_, basestring):
logger_ = logging.getLogger(logger_)
on_success_ = _config_handlers(on_success)
on_backoff_ = _config_handlers(on_backoff, _log_backoff, logger_)
on_giveup_ = _config_handlers(on_giveup, _log_giveup, logger_)
retry = None
if sys.version_info >= (3, 5): # pragma: python=3.5
import asyncio
if asyncio.iscoroutinefunction(target):
import backoff._async
retry = backoff._async.retry_predicate
elif _is_event_loop() and _is_current_task():
# Verify that sync version is not being run from coroutine
# (that would lead to event loop hiccups).
raise TypeError(
"backoff.on_predicate applied to a regular function "
"inside coroutine, this will lead to event loop "
"hiccups. Use backoff.on_predicate on coroutines in "
"asynchronous code.")
if retry is None:
retry = _sync.retry_predicate
return retry(target, wait_gen, predicate,
max_tries, max_time, jitter,
on_success_, on_backoff_, on_giveup_,
wait_gen_kwargs)
# Return a function which decorates a target with a retry loop.
return decorate | python | def on_predicate(wait_gen,
predicate=operator.not_,
max_tries=None,
max_time=None,
jitter=full_jitter,
on_success=None,
on_backoff=None,
on_giveup=None,
logger='backoff',
**wait_gen_kwargs):
"""Returns decorator for backoff and retry triggered by predicate.
Args:
wait_gen: A generator yielding successive wait times in
seconds.
predicate: A function which when called on the return value of
the target function will trigger backoff when considered
truthily. If not specified, the default behavior is to
backoff on falsey return values.
max_tries: The maximum number of attempts to make before giving
up. In the case of failure, the result of the last attempt
will be returned. The default value of None means there
is no limit to the number of tries. If a callable is passed,
it will be evaluated at runtime and its return value used.
max_time: The maximum total amount of time to try for before
giving up. If this time expires, the result of the last
attempt will be returned. If a callable is passed, it will
be evaluated at runtime and its return value used.
jitter: A function of the value yielded by wait_gen returning
the actual time to wait. This distributes wait times
stochastically in order to avoid timing collisions across
concurrent clients. Wait times are jittered by default
using the full_jitter function. Jittering may be disabled
altogether by passing jitter=None.
on_success: Callable (or iterable of callables) with a unary
signature to be called in the event of success. The
parameter is a dict containing details about the invocation.
on_backoff: Callable (or iterable of callables) with a unary
signature to be called in the event of a backoff. The
parameter is a dict containing details about the invocation.
on_giveup: Callable (or iterable of callables) with a unary
signature to be called in the event that max_tries
is exceeded. The parameter is a dict containing details
about the invocation.
logger: Name of logger or Logger object to log to. Defaults to
'backoff'.
**wait_gen_kwargs: Any additional keyword args specified will be
passed to wait_gen when it is initialized. Any callable
args will first be evaluated and their return values passed.
This is useful for runtime configuration.
"""
def decorate(target):
# change names because python 2.x doesn't have nonlocal
logger_ = logger
if isinstance(logger_, basestring):
logger_ = logging.getLogger(logger_)
on_success_ = _config_handlers(on_success)
on_backoff_ = _config_handlers(on_backoff, _log_backoff, logger_)
on_giveup_ = _config_handlers(on_giveup, _log_giveup, logger_)
retry = None
if sys.version_info >= (3, 5): # pragma: python=3.5
import asyncio
if asyncio.iscoroutinefunction(target):
import backoff._async
retry = backoff._async.retry_predicate
elif _is_event_loop() and _is_current_task():
# Verify that sync version is not being run from coroutine
# (that would lead to event loop hiccups).
raise TypeError(
"backoff.on_predicate applied to a regular function "
"inside coroutine, this will lead to event loop "
"hiccups. Use backoff.on_predicate on coroutines in "
"asynchronous code.")
if retry is None:
retry = _sync.retry_predicate
return retry(target, wait_gen, predicate,
max_tries, max_time, jitter,
on_success_, on_backoff_, on_giveup_,
wait_gen_kwargs)
# Return a function which decorates a target with a retry loop.
return decorate | ['def', 'on_predicate', '(', 'wait_gen', ',', 'predicate', '=', 'operator', '.', 'not_', ',', 'max_tries', '=', 'None', ',', 'max_time', '=', 'None', ',', 'jitter', '=', 'full_jitter', ',', 'on_success', '=', 'None', ',', 'on_backoff', '=', 'None', ',', 'on_giveup', '=', 'None', ',', 'logger', '=', "'backoff'", ',', '*', '*', 'wait_gen_kwargs', ')', ':', 'def', 'decorate', '(', 'target', ')', ':', "# change names because python 2.x doesn't have nonlocal", 'logger_', '=', 'logger', 'if', 'isinstance', '(', 'logger_', ',', 'basestring', ')', ':', 'logger_', '=', 'logging', '.', 'getLogger', '(', 'logger_', ')', 'on_success_', '=', '_config_handlers', '(', 'on_success', ')', 'on_backoff_', '=', '_config_handlers', '(', 'on_backoff', ',', '_log_backoff', ',', 'logger_', ')', 'on_giveup_', '=', '_config_handlers', '(', 'on_giveup', ',', '_log_giveup', ',', 'logger_', ')', 'retry', '=', 'None', 'if', 'sys', '.', 'version_info', '>=', '(', '3', ',', '5', ')', ':', '# pragma: python=3.5', 'import', 'asyncio', 'if', 'asyncio', '.', 'iscoroutinefunction', '(', 'target', ')', ':', 'import', 'backoff', '.', '_async', 'retry', '=', 'backoff', '.', '_async', '.', 'retry_predicate', 'elif', '_is_event_loop', '(', ')', 'and', '_is_current_task', '(', ')', ':', '# Verify that sync version is not being run from coroutine', '# (that would lead to event loop hiccups).', 'raise', 'TypeError', '(', '"backoff.on_predicate applied to a regular function "', '"inside coroutine, this will lead to event loop "', '"hiccups. Use backoff.on_predicate on coroutines in "', '"asynchronous code."', ')', 'if', 'retry', 'is', 'None', ':', 'retry', '=', '_sync', '.', 'retry_predicate', 'return', 'retry', '(', 'target', ',', 'wait_gen', ',', 'predicate', ',', 'max_tries', ',', 'max_time', ',', 'jitter', ',', 'on_success_', ',', 'on_backoff_', ',', 'on_giveup_', ',', 'wait_gen_kwargs', ')', '# Return a function which decorates a target with a retry loop.', 'return', 'decorate'] | Returns decorator for backoff and retry triggered by predicate.
Args:
wait_gen: A generator yielding successive wait times in
seconds.
predicate: A function which when called on the return value of
the target function will trigger backoff when considered
truthily. If not specified, the default behavior is to
backoff on falsey return values.
max_tries: The maximum number of attempts to make before giving
up. In the case of failure, the result of the last attempt
will be returned. The default value of None means there
is no limit to the number of tries. If a callable is passed,
it will be evaluated at runtime and its return value used.
max_time: The maximum total amount of time to try for before
giving up. If this time expires, the result of the last
attempt will be returned. If a callable is passed, it will
be evaluated at runtime and its return value used.
jitter: A function of the value yielded by wait_gen returning
the actual time to wait. This distributes wait times
stochastically in order to avoid timing collisions across
concurrent clients. Wait times are jittered by default
using the full_jitter function. Jittering may be disabled
altogether by passing jitter=None.
on_success: Callable (or iterable of callables) with a unary
signature to be called in the event of success. The
parameter is a dict containing details about the invocation.
on_backoff: Callable (or iterable of callables) with a unary
signature to be called in the event of a backoff. The
parameter is a dict containing details about the invocation.
on_giveup: Callable (or iterable of callables) with a unary
signature to be called in the event that max_tries
is exceeded. The parameter is a dict containing details
about the invocation.
logger: Name of logger or Logger object to log to. Defaults to
'backoff'.
**wait_gen_kwargs: Any additional keyword args specified will be
passed to wait_gen when it is initialized. Any callable
args will first be evaluated and their return values passed.
This is useful for runtime configuration. | ['Returns', 'decorator', 'for', 'backoff', 'and', 'retry', 'triggered', 'by', 'predicate', '.'] | train | https://github.com/litl/backoff/blob/229d30adce4128f093550a1761c49594c78df4b4/backoff/_decorator.py#L20-L106 |
6,756 | GoogleCloudPlatform/datastore-ndb-python | ndb/metadata.py | Property.key_to_kind | def key_to_kind(cls, key):
"""Return the kind specified by a given __property__ key.
Args:
key: key whose kind name is requested.
Returns:
The kind specified by key.
"""
if key.kind() == Kind.KIND_NAME:
return key.id()
else:
return key.parent().id() | python | def key_to_kind(cls, key):
"""Return the kind specified by a given __property__ key.
Args:
key: key whose kind name is requested.
Returns:
The kind specified by key.
"""
if key.kind() == Kind.KIND_NAME:
return key.id()
else:
return key.parent().id() | ['def', 'key_to_kind', '(', 'cls', ',', 'key', ')', ':', 'if', 'key', '.', 'kind', '(', ')', '==', 'Kind', '.', 'KIND_NAME', ':', 'return', 'key', '.', 'id', '(', ')', 'else', ':', 'return', 'key', '.', 'parent', '(', ')', '.', 'id', '(', ')'] | Return the kind specified by a given __property__ key.
Args:
key: key whose kind name is requested.
Returns:
The kind specified by key. | ['Return', 'the', 'kind', 'specified', 'by', 'a', 'given', '__property__', 'key', '.'] | train | https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/metadata.py#L181-L193 |
6,757 | duniter/duniter-python-api | duniterpy/api/endpoint.py | ESCoreEndpoint.from_inline | def from_inline(cls: Type[ESCoreEndpointType], inline: str) -> ESCoreEndpointType:
"""
Return ESCoreEndpoint instance from endpoint string
:param inline: Endpoint string
:return:
"""
m = ESCoreEndpoint.re_inline.match(inline)
if m is None:
raise MalformedDocumentError(ESCoreEndpoint.API)
server = m.group(1)
port = int(m.group(2))
return cls(server, port) | python | def from_inline(cls: Type[ESCoreEndpointType], inline: str) -> ESCoreEndpointType:
"""
Return ESCoreEndpoint instance from endpoint string
:param inline: Endpoint string
:return:
"""
m = ESCoreEndpoint.re_inline.match(inline)
if m is None:
raise MalformedDocumentError(ESCoreEndpoint.API)
server = m.group(1)
port = int(m.group(2))
return cls(server, port) | ['def', 'from_inline', '(', 'cls', ':', 'Type', '[', 'ESCoreEndpointType', ']', ',', 'inline', ':', 'str', ')', '->', 'ESCoreEndpointType', ':', 'm', '=', 'ESCoreEndpoint', '.', 're_inline', '.', 'match', '(', 'inline', ')', 'if', 'm', 'is', 'None', ':', 'raise', 'MalformedDocumentError', '(', 'ESCoreEndpoint', '.', 'API', ')', 'server', '=', 'm', '.', 'group', '(', '1', ')', 'port', '=', 'int', '(', 'm', '.', 'group', '(', '2', ')', ')', 'return', 'cls', '(', 'server', ',', 'port', ')'] | Return ESCoreEndpoint instance from endpoint string
:param inline: Endpoint string
:return: | ['Return', 'ESCoreEndpoint', 'instance', 'from', 'endpoint', 'string'] | train | https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L372-L384 |
6,758 | Robpol86/libnl | libnl/attr.py | nla_put_u16 | def nla_put_u16(msg, attrtype, value):
"""Add 16 bit integer attribute to Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L588
Positional arguments:
msg -- Netlink message (nl_msg class instance).
attrtype -- attribute type (integer).
value -- numeric value to store as payload (int() or c_uint16()).
Returns:
0 on success or a negative error code.
"""
data = bytearray(value if isinstance(value, c_uint16) else c_uint16(value))
return nla_put(msg, attrtype, SIZEOF_U16, data) | python | def nla_put_u16(msg, attrtype, value):
"""Add 16 bit integer attribute to Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L588
Positional arguments:
msg -- Netlink message (nl_msg class instance).
attrtype -- attribute type (integer).
value -- numeric value to store as payload (int() or c_uint16()).
Returns:
0 on success or a negative error code.
"""
data = bytearray(value if isinstance(value, c_uint16) else c_uint16(value))
return nla_put(msg, attrtype, SIZEOF_U16, data) | ['def', 'nla_put_u16', '(', 'msg', ',', 'attrtype', ',', 'value', ')', ':', 'data', '=', 'bytearray', '(', 'value', 'if', 'isinstance', '(', 'value', ',', 'c_uint16', ')', 'else', 'c_uint16', '(', 'value', ')', ')', 'return', 'nla_put', '(', 'msg', ',', 'attrtype', ',', 'SIZEOF_U16', ',', 'data', ')'] | Add 16 bit integer attribute to Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L588
Positional arguments:
msg -- Netlink message (nl_msg class instance).
attrtype -- attribute type (integer).
value -- numeric value to store as payload (int() or c_uint16()).
Returns:
0 on success or a negative error code. | ['Add', '16', 'bit', 'integer', 'attribute', 'to', 'Netlink', 'message', '.'] | train | https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/attr.py#L473-L487 |
6,759 | softlayer/softlayer-python | SoftLayer/CLI/helpers.py | resolve_id | def resolve_id(resolver, identifier, name='object'):
"""Resolves a single id using a resolver function.
:param resolver: function that resolves ids. Should return None or a list of ids.
:param string identifier: a string identifier used to resolve ids
:param string name: the object type, to be used in error messages
"""
try:
return int(identifier)
except ValueError:
pass # It was worth a shot
ids = resolver(identifier)
if len(ids) == 0:
raise exceptions.CLIAbort("Error: Unable to find %s '%s'" % (name, identifier))
if len(ids) > 1:
raise exceptions.CLIAbort(
"Error: Multiple %s found for '%s': %s" %
(name, identifier, ', '.join([str(_id) for _id in ids])))
return ids[0] | python | def resolve_id(resolver, identifier, name='object'):
"""Resolves a single id using a resolver function.
:param resolver: function that resolves ids. Should return None or a list of ids.
:param string identifier: a string identifier used to resolve ids
:param string name: the object type, to be used in error messages
"""
try:
return int(identifier)
except ValueError:
pass # It was worth a shot
ids = resolver(identifier)
if len(ids) == 0:
raise exceptions.CLIAbort("Error: Unable to find %s '%s'" % (name, identifier))
if len(ids) > 1:
raise exceptions.CLIAbort(
"Error: Multiple %s found for '%s': %s" %
(name, identifier, ', '.join([str(_id) for _id in ids])))
return ids[0] | ['def', 'resolve_id', '(', 'resolver', ',', 'identifier', ',', 'name', '=', "'object'", ')', ':', 'try', ':', 'return', 'int', '(', 'identifier', ')', 'except', 'ValueError', ':', 'pass', '# It was worth a shot', 'ids', '=', 'resolver', '(', 'identifier', ')', 'if', 'len', '(', 'ids', ')', '==', '0', ':', 'raise', 'exceptions', '.', 'CLIAbort', '(', '"Error: Unable to find %s \'%s\'"', '%', '(', 'name', ',', 'identifier', ')', ')', 'if', 'len', '(', 'ids', ')', '>', '1', ':', 'raise', 'exceptions', '.', 'CLIAbort', '(', '"Error: Multiple %s found for \'%s\': %s"', '%', '(', 'name', ',', 'identifier', ',', "', '", '.', 'join', '(', '[', 'str', '(', '_id', ')', 'for', '_id', 'in', 'ids', ']', ')', ')', ')', 'return', 'ids', '[', '0', ']'] | Resolves a single id using a resolver function.
:param resolver: function that resolves ids. Should return None or a list of ids.
:param string identifier: a string identifier used to resolve ids
:param string name: the object type, to be used in error messages | ['Resolves', 'a', 'single', 'id', 'using', 'a', 'resolver', 'function', '.'] | train | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/helpers.py#L30-L53 |
6,760 | theislab/anndata | anndata/base.py | AnnDataFileManager._to_memory_mode | def _to_memory_mode(self):
"""Close the backing file, forget filename, *do* change to memory mode."""
self._adata.__X = self._adata.X[()]
self._file.close()
self._file = None
self._filename = None | python | def _to_memory_mode(self):
"""Close the backing file, forget filename, *do* change to memory mode."""
self._adata.__X = self._adata.X[()]
self._file.close()
self._file = None
self._filename = None | ['def', '_to_memory_mode', '(', 'self', ')', ':', 'self', '.', '_adata', '.', '__X', '=', 'self', '.', '_adata', '.', 'X', '[', '(', ')', ']', 'self', '.', '_file', '.', 'close', '(', ')', 'self', '.', '_file', '=', 'None', 'self', '.', '_filename', '=', 'None'] | Close the backing file, forget filename, *do* change to memory mode. | ['Close', 'the', 'backing', 'file', 'forget', 'filename', '*', 'do', '*', 'change', 'to', 'memory', 'mode', '.'] | train | https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/base.py#L358-L363 |
6,761 | Datary/scrapbag | scrapbag/files.py | is_remote_file_modified | def is_remote_file_modified(web_file, destination):
"""
Check if online file has been modified.
Args:
:web_file: online file to check.
:destination: path of the offline file to compare.
"""
try:
# check datetime of last modified in file.
last_mod = web_file.headers.get('last-modified')
if last_mod:
web_file_time = time.strptime(
web_file.headers.get(
'last-modified'), '%a, %d %b %Y %H:%M:%S %Z')
else:
web_file_time = time.gmtime()
web_file_size = int(web_file.headers.get('content-length', -1))
if os.path.exists(destination):
file_time = time.gmtime(os.path.getmtime(destination))
file_size = os.path.getsize(destination)
if file_time >= web_file_time and file_size == web_file_size:
return False
except Exception as ex:
msg = ('Fail checking if remote file is modified default returns TRUE'
' - {}'.format(ex))
logger.debug(msg)
return True | python | def is_remote_file_modified(web_file, destination):
"""
Check if online file has been modified.
Args:
:web_file: online file to check.
:destination: path of the offline file to compare.
"""
try:
# check datetime of last modified in file.
last_mod = web_file.headers.get('last-modified')
if last_mod:
web_file_time = time.strptime(
web_file.headers.get(
'last-modified'), '%a, %d %b %Y %H:%M:%S %Z')
else:
web_file_time = time.gmtime()
web_file_size = int(web_file.headers.get('content-length', -1))
if os.path.exists(destination):
file_time = time.gmtime(os.path.getmtime(destination))
file_size = os.path.getsize(destination)
if file_time >= web_file_time and file_size == web_file_size:
return False
except Exception as ex:
msg = ('Fail checking if remote file is modified default returns TRUE'
' - {}'.format(ex))
logger.debug(msg)
return True | ['def', 'is_remote_file_modified', '(', 'web_file', ',', 'destination', ')', ':', 'try', ':', '# check datetime of last modified in file.', 'last_mod', '=', 'web_file', '.', 'headers', '.', 'get', '(', "'last-modified'", ')', 'if', 'last_mod', ':', 'web_file_time', '=', 'time', '.', 'strptime', '(', 'web_file', '.', 'headers', '.', 'get', '(', "'last-modified'", ')', ',', "'%a, %d %b %Y %H:%M:%S %Z'", ')', 'else', ':', 'web_file_time', '=', 'time', '.', 'gmtime', '(', ')', 'web_file_size', '=', 'int', '(', 'web_file', '.', 'headers', '.', 'get', '(', "'content-length'", ',', '-', '1', ')', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'destination', ')', ':', 'file_time', '=', 'time', '.', 'gmtime', '(', 'os', '.', 'path', '.', 'getmtime', '(', 'destination', ')', ')', 'file_size', '=', 'os', '.', 'path', '.', 'getsize', '(', 'destination', ')', 'if', 'file_time', '>=', 'web_file_time', 'and', 'file_size', '==', 'web_file_size', ':', 'return', 'False', 'except', 'Exception', 'as', 'ex', ':', 'msg', '=', '(', "'Fail checking if remote file is modified default returns TRUE'", "' - {}'", '.', 'format', '(', 'ex', ')', ')', 'logger', '.', 'debug', '(', 'msg', ')', 'return', 'True'] | Check if online file has been modified.
Args:
:web_file: online file to check.
:destination: path of the offline file to compare. | ['Check', 'if', 'online', 'file', 'has', 'been', 'modified', '.', 'Args', ':', ':', 'web_file', ':', 'online', 'file', 'to', 'check', '.', ':', 'destination', ':', 'path', 'of', 'the', 'offline', 'file', 'to', 'compare', '.'] | train | https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/files.py#L112-L141 |
6,762 | quantopian/zipline | zipline/pipeline/data/dataset.py | Column.bind | def bind(self, name):
"""
Bind a `Column` object to its name.
"""
return _BoundColumnDescr(
dtype=self.dtype,
missing_value=self.missing_value,
name=name,
doc=self.doc,
metadata=self.metadata,
) | python | def bind(self, name):
"""
Bind a `Column` object to its name.
"""
return _BoundColumnDescr(
dtype=self.dtype,
missing_value=self.missing_value,
name=name,
doc=self.doc,
metadata=self.metadata,
) | ['def', 'bind', '(', 'self', ',', 'name', ')', ':', 'return', '_BoundColumnDescr', '(', 'dtype', '=', 'self', '.', 'dtype', ',', 'missing_value', '=', 'self', '.', 'missing_value', ',', 'name', '=', 'name', ',', 'doc', '=', 'self', '.', 'doc', ',', 'metadata', '=', 'self', '.', 'metadata', ',', ')'] | Bind a `Column` object to its name. | ['Bind', 'a', 'Column', 'object', 'to', 'its', 'name', '.'] | train | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/data/dataset.py#L49-L59 |
6,763 | inasafe/inasafe | safe/gui/tools/wizard/step_fc35_explayer_from_canvas.py | StepFcExpLayerFromCanvas.selected_canvas_explayer | def selected_canvas_explayer(self):
"""Obtain the canvas exposure layer selected by user.
:returns: The currently selected map layer in the list.
:rtype: QgsMapLayer
"""
if self.lstCanvasExpLayers.selectedItems():
item = self.lstCanvasExpLayers.currentItem()
else:
return None
try:
layer_id = item.data(Qt.UserRole)
except (AttributeError, NameError):
layer_id = None
# noinspection PyArgumentList
layer = QgsProject.instance().mapLayer(layer_id)
return layer | python | def selected_canvas_explayer(self):
"""Obtain the canvas exposure layer selected by user.
:returns: The currently selected map layer in the list.
:rtype: QgsMapLayer
"""
if self.lstCanvasExpLayers.selectedItems():
item = self.lstCanvasExpLayers.currentItem()
else:
return None
try:
layer_id = item.data(Qt.UserRole)
except (AttributeError, NameError):
layer_id = None
# noinspection PyArgumentList
layer = QgsProject.instance().mapLayer(layer_id)
return layer | ['def', 'selected_canvas_explayer', '(', 'self', ')', ':', 'if', 'self', '.', 'lstCanvasExpLayers', '.', 'selectedItems', '(', ')', ':', 'item', '=', 'self', '.', 'lstCanvasExpLayers', '.', 'currentItem', '(', ')', 'else', ':', 'return', 'None', 'try', ':', 'layer_id', '=', 'item', '.', 'data', '(', 'Qt', '.', 'UserRole', ')', 'except', '(', 'AttributeError', ',', 'NameError', ')', ':', 'layer_id', '=', 'None', '# noinspection PyArgumentList', 'layer', '=', 'QgsProject', '.', 'instance', '(', ')', '.', 'mapLayer', '(', 'layer_id', ')', 'return', 'layer'] | Obtain the canvas exposure layer selected by user.
:returns: The currently selected map layer in the list.
:rtype: QgsMapLayer | ['Obtain', 'the', 'canvas', 'exposure', 'layer', 'selected', 'by', 'user', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc35_explayer_from_canvas.py#L73-L90 |
6,764 | thombashi/thutils | thutils/common.py | dump_dict | def dump_dict(dict_input, indent=4):
"""
辞書型変数を文字列に変換して返す
"""
dict_work = dict(dict_input)
"""
for key, value in six.iteritems(dict_input):
if any([f(value) for f in (is_float, isDict, is_list_or_tuple)]):
dict_work[key] = value
continue
try:
dict_work[key] = str(value)
except:
dict_work[key] = str(type(value))
dict_work[key] = _convert_dump_dict(value)
"""
try:
import json
return json.dumps(dict_work, sort_keys=True, indent=indent)
except ImportError:
pass
try:
import simplejson as json
return json.dumps(dict_work, sort_keys=True, indent=indent)
except ImportError:
pass
try:
import pprint
return pprint.pformat(dict_work, indent=indent)
except ImportError:
pass
return str(dict_work) | python | def dump_dict(dict_input, indent=4):
"""
辞書型変数を文字列に変換して返す
"""
dict_work = dict(dict_input)
"""
for key, value in six.iteritems(dict_input):
if any([f(value) for f in (is_float, isDict, is_list_or_tuple)]):
dict_work[key] = value
continue
try:
dict_work[key] = str(value)
except:
dict_work[key] = str(type(value))
dict_work[key] = _convert_dump_dict(value)
"""
try:
import json
return json.dumps(dict_work, sort_keys=True, indent=indent)
except ImportError:
pass
try:
import simplejson as json
return json.dumps(dict_work, sort_keys=True, indent=indent)
except ImportError:
pass
try:
import pprint
return pprint.pformat(dict_work, indent=indent)
except ImportError:
pass
return str(dict_work) | ['def', 'dump_dict', '(', 'dict_input', ',', 'indent', '=', '4', ')', ':', 'dict_work', '=', 'dict', '(', 'dict_input', ')', '"""\n for key, value in six.iteritems(dict_input):\n if any([f(value) for f in (is_float, isDict, is_list_or_tuple)]):\n dict_work[key] = value\n continue\n\n try:\n dict_work[key] = str(value)\n except:\n dict_work[key] = str(type(value))\n\n dict_work[key] = _convert_dump_dict(value)\n """', 'try', ':', 'import', 'json', 'return', 'json', '.', 'dumps', '(', 'dict_work', ',', 'sort_keys', '=', 'True', ',', 'indent', '=', 'indent', ')', 'except', 'ImportError', ':', 'pass', 'try', ':', 'import', 'simplejson', 'as', 'json', 'return', 'json', '.', 'dumps', '(', 'dict_work', ',', 'sort_keys', '=', 'True', ',', 'indent', '=', 'indent', ')', 'except', 'ImportError', ':', 'pass', 'try', ':', 'import', 'pprint', 'return', 'pprint', '.', 'pformat', '(', 'dict_work', ',', 'indent', '=', 'indent', ')', 'except', 'ImportError', ':', 'pass', 'return', 'str', '(', 'dict_work', ')'] | 辞書型変数を文字列に変換して返す | ['辞書型変数を文字列に変換して返す'] | train | https://github.com/thombashi/thutils/blob/9eba767cfc26b38cd66b83b99aee0c31b8b90dec/thutils/common.py#L192-L230 |
6,765 | keon/algorithms | algorithms/sort/counting_sort.py | counting_sort | def counting_sort(arr):
"""
Counting_sort
Sorting a array which has no element greater than k
Creating a new temp_arr,where temp_arr[i] contain the number of
element less than or equal to i in the arr
Then placing the number i into a correct position in the result_arr
return the result_arr
Complexity: 0(n)
"""
m = min(arr)
# in case there are negative elements, change the array to all positive element
different = 0
if m < 0:
# save the change, so that we can convert the array back to all positive number
different = -m
for i in range(len(arr)):
arr[i] += -m
k = max(arr)
temp_arr = [0] * (k + 1)
for i in range(0, len(arr)):
temp_arr[arr[i]] = temp_arr[arr[i]] + 1
# temp_array[i] contain the times the number i appear in arr
for i in range(1, k + 1):
temp_arr[i] = temp_arr[i] + temp_arr[i - 1]
# temp_array[i] contain the number of element less than or equal i in arr
result_arr = arr.copy()
# creating a result_arr an put the element in a correct positon
for i in range(len(arr) - 1, -1, -1):
result_arr[temp_arr[arr[i]] - 1] = arr[i] - different
temp_arr[arr[i]] = temp_arr[arr[i]] - 1
return result_arr | python | def counting_sort(arr):
"""
Counting_sort
Sorting a array which has no element greater than k
Creating a new temp_arr,where temp_arr[i] contain the number of
element less than or equal to i in the arr
Then placing the number i into a correct position in the result_arr
return the result_arr
Complexity: 0(n)
"""
m = min(arr)
# in case there are negative elements, change the array to all positive element
different = 0
if m < 0:
# save the change, so that we can convert the array back to all positive number
different = -m
for i in range(len(arr)):
arr[i] += -m
k = max(arr)
temp_arr = [0] * (k + 1)
for i in range(0, len(arr)):
temp_arr[arr[i]] = temp_arr[arr[i]] + 1
# temp_array[i] contain the times the number i appear in arr
for i in range(1, k + 1):
temp_arr[i] = temp_arr[i] + temp_arr[i - 1]
# temp_array[i] contain the number of element less than or equal i in arr
result_arr = arr.copy()
# creating a result_arr an put the element in a correct positon
for i in range(len(arr) - 1, -1, -1):
result_arr[temp_arr[arr[i]] - 1] = arr[i] - different
temp_arr[arr[i]] = temp_arr[arr[i]] - 1
return result_arr | ['def', 'counting_sort', '(', 'arr', ')', ':', 'm', '=', 'min', '(', 'arr', ')', '# in case there are negative elements, change the array to all positive element', 'different', '=', '0', 'if', 'm', '<', '0', ':', '# save the change, so that we can convert the array back to all positive number', 'different', '=', '-', 'm', 'for', 'i', 'in', 'range', '(', 'len', '(', 'arr', ')', ')', ':', 'arr', '[', 'i', ']', '+=', '-', 'm', 'k', '=', 'max', '(', 'arr', ')', 'temp_arr', '=', '[', '0', ']', '*', '(', 'k', '+', '1', ')', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'arr', ')', ')', ':', 'temp_arr', '[', 'arr', '[', 'i', ']', ']', '=', 'temp_arr', '[', 'arr', '[', 'i', ']', ']', '+', '1', '# temp_array[i] contain the times the number i appear in arr', 'for', 'i', 'in', 'range', '(', '1', ',', 'k', '+', '1', ')', ':', 'temp_arr', '[', 'i', ']', '=', 'temp_arr', '[', 'i', ']', '+', 'temp_arr', '[', 'i', '-', '1', ']', '# temp_array[i] contain the number of element less than or equal i in arr', 'result_arr', '=', 'arr', '.', 'copy', '(', ')', '# creating a result_arr an put the element in a correct positon', 'for', 'i', 'in', 'range', '(', 'len', '(', 'arr', ')', '-', '1', ',', '-', '1', ',', '-', '1', ')', ':', 'result_arr', '[', 'temp_arr', '[', 'arr', '[', 'i', ']', ']', '-', '1', ']', '=', 'arr', '[', 'i', ']', '-', 'different', 'temp_arr', '[', 'arr', '[', 'i', ']', ']', '=', 'temp_arr', '[', 'arr', '[', 'i', ']', ']', '-', '1', 'return', 'result_arr'] | Counting_sort
Sorting a array which has no element greater than k
Creating a new temp_arr,where temp_arr[i] contain the number of
element less than or equal to i in the arr
Then placing the number i into a correct position in the result_arr
return the result_arr
Complexity: 0(n) | ['Counting_sort', 'Sorting', 'a', 'array', 'which', 'has', 'no', 'element', 'greater', 'than', 'k', 'Creating', 'a', 'new', 'temp_arr', 'where', 'temp_arr', '[', 'i', ']', 'contain', 'the', 'number', 'of', 'element', 'less', 'than', 'or', 'equal', 'to', 'i', 'in', 'the', 'arr', 'Then', 'placing', 'the', 'number', 'i', 'into', 'a', 'correct', 'position', 'in', 'the', 'result_arr', 'return', 'the', 'result_arr', 'Complexity', ':', '0', '(', 'n', ')'] | train | https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/sort/counting_sort.py#L1-L36 |
6,766 | ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_restserver.py | mavlink_to_json | def mavlink_to_json(msg):
'''Translate mavlink python messages in json string'''
ret = '\"%s\": {' % msg._type
for fieldname in msg._fieldnames:
data = getattr(msg, fieldname)
ret += '\"%s\" : \"%s\", ' % (fieldname, data)
ret = ret[0:-2] + '}'
return ret | python | def mavlink_to_json(msg):
'''Translate mavlink python messages in json string'''
ret = '\"%s\": {' % msg._type
for fieldname in msg._fieldnames:
data = getattr(msg, fieldname)
ret += '\"%s\" : \"%s\", ' % (fieldname, data)
ret = ret[0:-2] + '}'
return ret | ['def', 'mavlink_to_json', '(', 'msg', ')', ':', 'ret', '=', '\'\\"%s\\": {\'', '%', 'msg', '.', '_type', 'for', 'fieldname', 'in', 'msg', '.', '_fieldnames', ':', 'data', '=', 'getattr', '(', 'msg', ',', 'fieldname', ')', 'ret', '+=', '\'\\"%s\\" : \\"%s\\", \'', '%', '(', 'fieldname', ',', 'data', ')', 'ret', '=', 'ret', '[', '0', ':', '-', '2', ']', '+', "'}'", 'return', 'ret'] | Translate mavlink python messages in json string | ['Translate', 'mavlink', 'python', 'messages', 'in', 'json', 'string'] | train | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_restserver.py#L18-L25 |
6,767 | DistrictDataLabs/yellowbrick | yellowbrick/cluster/icdm.py | InterclusterDistance._make_size_legend | def _make_size_legend(self):
"""
Draw a legend that shows relative sizes of the clusters at the 25th,
50th, and 75th percentile based on the current scoring metric.
"""
# Compute the size of the markers and scale them to our figure size
# NOTE: the marker size is the area of the plot, we need to compute the
# radius of the markers.
areas = self._get_cluster_sizes()
radii = np.sqrt(areas / np.pi)
scaled = np.interp(radii, (radii.min(), radii.max()), (.1, 1))
# Compute the locations of the 25th, 50th, and 75th percentile scores
indices = np.array([
percentile_index(self.scores_, p) for p in (25, 50, 75)
])
# Draw size circles annotated with the percentile score as the legend.
for idx in indices:
# TODO: should the size circle's center be hard coded like this?
center = (-0.30, 1-scaled[idx])
c = Circle(
center, scaled[idx], facecolor="none", edgecolor="#2e7193",
linewidth=1.5, linestyle="--"
)
self.lax.add_patch(c)
# Add annotation to the size circle with the value of the score
self.lax.annotate(
self.scores_[idx], (-0.30, 1-(2*scaled[idx])), xytext=(1, 1-(2*scaled[idx])),
arrowprops=dict(arrowstyle="wedge", color="#2e7193"), va='center', ha='center',
)
# Draw size legend title
self.lax.text(s="membership", x=0, y=1.2, va='center', ha='center')
# Ensure the current axes is always the main axes after modifying the
# inset axes and while drawing.
plt.sca(self.ax) | python | def _make_size_legend(self):
"""
Draw a legend that shows relative sizes of the clusters at the 25th,
50th, and 75th percentile based on the current scoring metric.
"""
# Compute the size of the markers and scale them to our figure size
# NOTE: the marker size is the area of the plot, we need to compute the
# radius of the markers.
areas = self._get_cluster_sizes()
radii = np.sqrt(areas / np.pi)
scaled = np.interp(radii, (radii.min(), radii.max()), (.1, 1))
# Compute the locations of the 25th, 50th, and 75th percentile scores
indices = np.array([
percentile_index(self.scores_, p) for p in (25, 50, 75)
])
# Draw size circles annotated with the percentile score as the legend.
for idx in indices:
# TODO: should the size circle's center be hard coded like this?
center = (-0.30, 1-scaled[idx])
c = Circle(
center, scaled[idx], facecolor="none", edgecolor="#2e7193",
linewidth=1.5, linestyle="--"
)
self.lax.add_patch(c)
# Add annotation to the size circle with the value of the score
self.lax.annotate(
self.scores_[idx], (-0.30, 1-(2*scaled[idx])), xytext=(1, 1-(2*scaled[idx])),
arrowprops=dict(arrowstyle="wedge", color="#2e7193"), va='center', ha='center',
)
# Draw size legend title
self.lax.text(s="membership", x=0, y=1.2, va='center', ha='center')
# Ensure the current axes is always the main axes after modifying the
# inset axes and while drawing.
plt.sca(self.ax) | ['def', '_make_size_legend', '(', 'self', ')', ':', '# Compute the size of the markers and scale them to our figure size', '# NOTE: the marker size is the area of the plot, we need to compute the', '# radius of the markers.', 'areas', '=', 'self', '.', '_get_cluster_sizes', '(', ')', 'radii', '=', 'np', '.', 'sqrt', '(', 'areas', '/', 'np', '.', 'pi', ')', 'scaled', '=', 'np', '.', 'interp', '(', 'radii', ',', '(', 'radii', '.', 'min', '(', ')', ',', 'radii', '.', 'max', '(', ')', ')', ',', '(', '.1', ',', '1', ')', ')', '# Compute the locations of the 25th, 50th, and 75th percentile scores', 'indices', '=', 'np', '.', 'array', '(', '[', 'percentile_index', '(', 'self', '.', 'scores_', ',', 'p', ')', 'for', 'p', 'in', '(', '25', ',', '50', ',', '75', ')', ']', ')', '# Draw size circles annotated with the percentile score as the legend.', 'for', 'idx', 'in', 'indices', ':', "# TODO: should the size circle's center be hard coded like this?", 'center', '=', '(', '-', '0.30', ',', '1', '-', 'scaled', '[', 'idx', ']', ')', 'c', '=', 'Circle', '(', 'center', ',', 'scaled', '[', 'idx', ']', ',', 'facecolor', '=', '"none"', ',', 'edgecolor', '=', '"#2e7193"', ',', 'linewidth', '=', '1.5', ',', 'linestyle', '=', '"--"', ')', 'self', '.', 'lax', '.', 'add_patch', '(', 'c', ')', '# Add annotation to the size circle with the value of the score', 'self', '.', 'lax', '.', 'annotate', '(', 'self', '.', 'scores_', '[', 'idx', ']', ',', '(', '-', '0.30', ',', '1', '-', '(', '2', '*', 'scaled', '[', 'idx', ']', ')', ')', ',', 'xytext', '=', '(', '1', ',', '1', '-', '(', '2', '*', 'scaled', '[', 'idx', ']', ')', ')', ',', 'arrowprops', '=', 'dict', '(', 'arrowstyle', '=', '"wedge"', ',', 'color', '=', '"#2e7193"', ')', ',', 'va', '=', "'center'", ',', 'ha', '=', "'center'", ',', ')', '# Draw size legend title', 'self', '.', 'lax', '.', 'text', '(', 's', '=', '"membership"', ',', 'x', '=', '0', ',', 'y', '=', '1.2', ',', 'va', '=', "'center'", ',', 'ha', '=', "'center'", ')', '# Ensure the current axes is always the main axes after modifying the', '# inset axes and while drawing.', 'plt', '.', 'sca', '(', 'self', '.', 'ax', ')'] | Draw a legend that shows relative sizes of the clusters at the 25th,
50th, and 75th percentile based on the current scoring metric. | ['Draw', 'a', 'legend', 'that', 'shows', 'relative', 'sizes', 'of', 'the', 'clusters', 'at', 'the', '25th', '50th', 'and', '75th', 'percentile', 'based', 'on', 'the', 'current', 'scoring', 'metric', '.'] | train | https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/cluster/icdm.py#L342-L380 |
6,768 | PMEAL/OpenPNM | openpnm/models/phases/vapor_pressure.py | antoine | def antoine(target, A, B, C, temperature='pore.temperature'):
r"""
Uses Antoine equation [1] to estimate vapor pressure of a pure component
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
A, B, C : scalars
Antoine vapor pressure coefficients for pure compounds. Since virtually
all Antoine coefficients are reported for units of mmHg and C for
historical reaons, this method assumes these A, B and C values are for
mmHg and C, but converts all properties internally to return Pascals.
temperature : string
The dictionary key containing the phase temperature values in Kelvin
[K]. Can be either pore or throat values.
[1] Antoine, C. (1888), Vapor Pressure: a new relationship between pressure
and temperature, Comptes Rendus des Séances de l'Académie des Sciences
(in French) 107: 681–684, 778–780, 836–837
"""
T = target[temperature] - 273.15
value = (10**(A-B/(C+T)))/760*101325
return value | python | def antoine(target, A, B, C, temperature='pore.temperature'):
r"""
Uses Antoine equation [1] to estimate vapor pressure of a pure component
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
A, B, C : scalars
Antoine vapor pressure coefficients for pure compounds. Since virtually
all Antoine coefficients are reported for units of mmHg and C for
historical reaons, this method assumes these A, B and C values are for
mmHg and C, but converts all properties internally to return Pascals.
temperature : string
The dictionary key containing the phase temperature values in Kelvin
[K]. Can be either pore or throat values.
[1] Antoine, C. (1888), Vapor Pressure: a new relationship between pressure
and temperature, Comptes Rendus des Séances de l'Académie des Sciences
(in French) 107: 681–684, 778–780, 836–837
"""
T = target[temperature] - 273.15
value = (10**(A-B/(C+T)))/760*101325
return value | ['def', 'antoine', '(', 'target', ',', 'A', ',', 'B', ',', 'C', ',', 'temperature', '=', "'pore.temperature'", ')', ':', 'T', '=', 'target', '[', 'temperature', ']', '-', '273.15', 'value', '=', '(', '10', '**', '(', 'A', '-', 'B', '/', '(', 'C', '+', 'T', ')', ')', ')', '/', '760', '*', '101325', 'return', 'value'] | r"""
Uses Antoine equation [1] to estimate vapor pressure of a pure component
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
A, B, C : scalars
Antoine vapor pressure coefficients for pure compounds. Since virtually
all Antoine coefficients are reported for units of mmHg and C for
historical reaons, this method assumes these A, B and C values are for
mmHg and C, but converts all properties internally to return Pascals.
temperature : string
The dictionary key containing the phase temperature values in Kelvin
[K]. Can be either pore or throat values.
[1] Antoine, C. (1888), Vapor Pressure: a new relationship between pressure
and temperature, Comptes Rendus des Séances de l'Académie des Sciences
(in French) 107: 681–684, 778–780, 836–837 | ['r', 'Uses', 'Antoine', 'equation', '[', '1', ']', 'to', 'estimate', 'vapor', 'pressure', 'of', 'a', 'pure', 'component'] | train | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/phases/vapor_pressure.py#L4-L32 |
6,769 | holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getPhysicalMinimum | def getPhysicalMinimum(self,chn=None):
"""
Returns the minimum physical value of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getPhysicalMinimum(0)==-1000.0
True
>>> f._close()
>>> del f
"""
if chn is not None:
if 0 <= chn < self.signals_in_file:
return self.physical_min(chn)
else:
return 0
else:
physMin = np.zeros(self.signals_in_file)
for i in np.arange(self.signals_in_file):
physMin[i] = self.physical_min(i)
return physMin | python | def getPhysicalMinimum(self,chn=None):
"""
Returns the minimum physical value of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getPhysicalMinimum(0)==-1000.0
True
>>> f._close()
>>> del f
"""
if chn is not None:
if 0 <= chn < self.signals_in_file:
return self.physical_min(chn)
else:
return 0
else:
physMin = np.zeros(self.signals_in_file)
for i in np.arange(self.signals_in_file):
physMin[i] = self.physical_min(i)
return physMin | ['def', 'getPhysicalMinimum', '(', 'self', ',', 'chn', '=', 'None', ')', ':', 'if', 'chn', 'is', 'not', 'None', ':', 'if', '0', '<=', 'chn', '<', 'self', '.', 'signals_in_file', ':', 'return', 'self', '.', 'physical_min', '(', 'chn', ')', 'else', ':', 'return', '0', 'else', ':', 'physMin', '=', 'np', '.', 'zeros', '(', 'self', '.', 'signals_in_file', ')', 'for', 'i', 'in', 'np', '.', 'arange', '(', 'self', '.', 'signals_in_file', ')', ':', 'physMin', '[', 'i', ']', '=', 'self', '.', 'physical_min', '(', 'i', ')', 'return', 'physMin'] | Returns the minimum physical value of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getPhysicalMinimum(0)==-1000.0
True
>>> f._close()
>>> del f | ['Returns', 'the', 'minimum', 'physical', 'value', 'of', 'signal', 'edfsignal', '.'] | train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L506-L534 |
6,770 | Azure/blobxfer | blobxfer/operations/synccopy.py | SyncCopy._update_progress_bar | def _update_progress_bar(self):
# type: (SyncCopy) -> None
"""Update progress bar
:param SyncCopy self: this
"""
blobxfer.operations.progress.update_progress_bar(
self._general_options,
'synccopy',
self._synccopy_start_time,
self._synccopy_total,
self._synccopy_sofar,
self._synccopy_bytes_total,
self._synccopy_bytes_sofar,
) | python | def _update_progress_bar(self):
# type: (SyncCopy) -> None
"""Update progress bar
:param SyncCopy self: this
"""
blobxfer.operations.progress.update_progress_bar(
self._general_options,
'synccopy',
self._synccopy_start_time,
self._synccopy_total,
self._synccopy_sofar,
self._synccopy_bytes_total,
self._synccopy_bytes_sofar,
) | ['def', '_update_progress_bar', '(', 'self', ')', ':', '# type: (SyncCopy) -> None', 'blobxfer', '.', 'operations', '.', 'progress', '.', 'update_progress_bar', '(', 'self', '.', '_general_options', ',', "'synccopy'", ',', 'self', '.', '_synccopy_start_time', ',', 'self', '.', '_synccopy_total', ',', 'self', '.', '_synccopy_sofar', ',', 'self', '.', '_synccopy_bytes_total', ',', 'self', '.', '_synccopy_bytes_sofar', ',', ')'] | Update progress bar
:param SyncCopy self: this | ['Update', 'progress', 'bar', ':', 'param', 'SyncCopy', 'self', ':', 'this'] | train | https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/synccopy.py#L138-L151 |
6,771 | reingart/pyafipws | wsctg.py | WSCTG.RechazarCTG | def RechazarCTG(self, carta_porte, ctg, motivo):
"El Destino puede rechazar el CTG a través de la siguiente operatoria"
response = self.client.rechazarCTG(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
datosRechazarCTG={
'cartaPorte': carta_porte,
'ctg': ctg, 'motivoRechazo': motivo,
}))['response']
datos = response.get('datosResponse')
self.__analizar_errores(response)
if datos:
self.CartaPorte = str(datos['cartaPorte'])
self.NumeroCTG = str(datos['CTG'])
self.FechaHora = str(datos['fechaHora'])
self.CodigoOperacion = str(datos['codigoOperacion']) | python | def RechazarCTG(self, carta_porte, ctg, motivo):
"El Destino puede rechazar el CTG a través de la siguiente operatoria"
response = self.client.rechazarCTG(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
datosRechazarCTG={
'cartaPorte': carta_porte,
'ctg': ctg, 'motivoRechazo': motivo,
}))['response']
datos = response.get('datosResponse')
self.__analizar_errores(response)
if datos:
self.CartaPorte = str(datos['cartaPorte'])
self.NumeroCTG = str(datos['CTG'])
self.FechaHora = str(datos['fechaHora'])
self.CodigoOperacion = str(datos['codigoOperacion']) | ['def', 'RechazarCTG', '(', 'self', ',', 'carta_porte', ',', 'ctg', ',', 'motivo', ')', ':', 'response', '=', 'self', '.', 'client', '.', 'rechazarCTG', '(', 'request', '=', 'dict', '(', 'auth', '=', '{', "'token'", ':', 'self', '.', 'Token', ',', "'sign'", ':', 'self', '.', 'Sign', ',', "'cuitRepresentado'", ':', 'self', '.', 'Cuit', ',', '}', ',', 'datosRechazarCTG', '=', '{', "'cartaPorte'", ':', 'carta_porte', ',', "'ctg'", ':', 'ctg', ',', "'motivoRechazo'", ':', 'motivo', ',', '}', ')', ')', '[', "'response'", ']', 'datos', '=', 'response', '.', 'get', '(', "'datosResponse'", ')', 'self', '.', '__analizar_errores', '(', 'response', ')', 'if', 'datos', ':', 'self', '.', 'CartaPorte', '=', 'str', '(', 'datos', '[', "'cartaPorte'", ']', ')', 'self', '.', 'NumeroCTG', '=', 'str', '(', 'datos', '[', "'CTG'", ']', ')', 'self', '.', 'FechaHora', '=', 'str', '(', 'datos', '[', "'fechaHora'", ']', ')', 'self', '.', 'CodigoOperacion', '=', 'str', '(', 'datos', '[', "'codigoOperacion'", ']', ')'] | El Destino puede rechazar el CTG a través de la siguiente operatoria | ['El', 'Destino', 'puede', 'rechazar', 'el', 'CTG', 'a', 'través', 'de', 'la', 'siguiente', 'operatoria'] | train | https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wsctg.py#L248-L264 |
6,772 | wummel/linkchecker | third_party/miniboa-r42/miniboa/telnet.py | TelnetClient._note_reply_pending | def _note_reply_pending(self, option, state):
"""Record the status of requested Telnet options."""
if not self.telnet_opt_dict.has_key(option):
self.telnet_opt_dict[option] = TelnetOption()
self.telnet_opt_dict[option].reply_pending = state | python | def _note_reply_pending(self, option, state):
"""Record the status of requested Telnet options."""
if not self.telnet_opt_dict.has_key(option):
self.telnet_opt_dict[option] = TelnetOption()
self.telnet_opt_dict[option].reply_pending = state | ['def', '_note_reply_pending', '(', 'self', ',', 'option', ',', 'state', ')', ':', 'if', 'not', 'self', '.', 'telnet_opt_dict', '.', 'has_key', '(', 'option', ')', ':', 'self', '.', 'telnet_opt_dict', '[', 'option', ']', '=', 'TelnetOption', '(', ')', 'self', '.', 'telnet_opt_dict', '[', 'option', ']', '.', 'reply_pending', '=', 'state'] | Record the status of requested Telnet options. | ['Record', 'the', 'status', 'of', 'requested', 'Telnet', 'options', '.'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/miniboa-r42/miniboa/telnet.py#L714-L718 |
6,773 | SUNCAT-Center/CatHub | cathub/cathubsqlite.py | CathubSQLite._initialize | def _initialize(self, con):
"""Set up tables in SQL"""
if self.initialized:
return
SQLite3Database()._initialize(con) # ASE db initialization
cur = con.execute(
'SELECT COUNT(*) FROM sqlite_master WHERE name="reaction"')
if cur.fetchone()[0] == 0: # no reaction table
for init_command in init_commands:
con.execute(init_command) # Create tables
con.commit()
self.initialized = True | python | def _initialize(self, con):
"""Set up tables in SQL"""
if self.initialized:
return
SQLite3Database()._initialize(con) # ASE db initialization
cur = con.execute(
'SELECT COUNT(*) FROM sqlite_master WHERE name="reaction"')
if cur.fetchone()[0] == 0: # no reaction table
for init_command in init_commands:
con.execute(init_command) # Create tables
con.commit()
self.initialized = True | ['def', '_initialize', '(', 'self', ',', 'con', ')', ':', 'if', 'self', '.', 'initialized', ':', 'return', 'SQLite3Database', '(', ')', '.', '_initialize', '(', 'con', ')', '# ASE db initialization', 'cur', '=', 'con', '.', 'execute', '(', '\'SELECT COUNT(*) FROM sqlite_master WHERE name="reaction"\'', ')', 'if', 'cur', '.', 'fetchone', '(', ')', '[', '0', ']', '==', '0', ':', '# no reaction table', 'for', 'init_command', 'in', 'init_commands', ':', 'con', '.', 'execute', '(', 'init_command', ')', '# Create tables', 'con', '.', 'commit', '(', ')', 'self', '.', 'initialized', '=', 'True'] | Set up tables in SQL | ['Set', 'up', 'tables', 'in', 'SQL'] | train | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cathubsqlite.py#L118-L133 |
6,774 | vpelletier/python-functionfs | examples/usbcat/device.py | USBCat.onEnable | def onEnable(self):
"""
The configuration containing this function has been enabled by host.
Endpoints become working files, so submit some read operations.
"""
trace('onEnable')
self._disable()
self._aio_context.submit(self._aio_recv_block_list)
self._real_onCanSend()
self._enabled = True | python | def onEnable(self):
"""
The configuration containing this function has been enabled by host.
Endpoints become working files, so submit some read operations.
"""
trace('onEnable')
self._disable()
self._aio_context.submit(self._aio_recv_block_list)
self._real_onCanSend()
self._enabled = True | ['def', 'onEnable', '(', 'self', ')', ':', 'trace', '(', "'onEnable'", ')', 'self', '.', '_disable', '(', ')', 'self', '.', '_aio_context', '.', 'submit', '(', 'self', '.', '_aio_recv_block_list', ')', 'self', '.', '_real_onCanSend', '(', ')', 'self', '.', '_enabled', '=', 'True'] | The configuration containing this function has been enabled by host.
Endpoints become working files, so submit some read operations. | ['The', 'configuration', 'containing', 'this', 'function', 'has', 'been', 'enabled', 'by', 'host', '.', 'Endpoints', 'become', 'working', 'files', 'so', 'submit', 'some', 'read', 'operations', '.'] | train | https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/examples/usbcat/device.py#L121-L130 |
6,775 | fjwCode/cerium | cerium/androiddriver.py | BaseAndroidDriver.find_element_by_name | def find_element_by_name(self, name, update=False) -> Elements:
'''Finds an element by name.
Args:
name: The name of the element to be found.
update: If the interface has changed, this option should be True.
Returns:
The element if it was found.
Raises:
NoSuchElementException - If the element wasn't found.
Usage:
element = driver.find_element_by_name('foo')
'''
return self.find_element(by=By.NAME, value=name, update=update) | python | def find_element_by_name(self, name, update=False) -> Elements:
'''Finds an element by name.
Args:
name: The name of the element to be found.
update: If the interface has changed, this option should be True.
Returns:
The element if it was found.
Raises:
NoSuchElementException - If the element wasn't found.
Usage:
element = driver.find_element_by_name('foo')
'''
return self.find_element(by=By.NAME, value=name, update=update) | ['def', 'find_element_by_name', '(', 'self', ',', 'name', ',', 'update', '=', 'False', ')', '->', 'Elements', ':', 'return', 'self', '.', 'find_element', '(', 'by', '=', 'By', '.', 'NAME', ',', 'value', '=', 'name', ',', 'update', '=', 'update', ')'] | Finds an element by name.
Args:
name: The name of the element to be found.
update: If the interface has changed, this option should be True.
Returns:
The element if it was found.
Raises:
NoSuchElementException - If the element wasn't found.
Usage:
element = driver.find_element_by_name('foo') | ['Finds', 'an', 'element', 'by', 'name', '.'] | train | https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/androiddriver.py#L649-L665 |
6,776 | ejeschke/ginga | ginga/Bindings.py | ImageViewBindings.ms_panset | def ms_panset(self, viewer, event, data_x, data_y,
msg=True):
"""An interactive way to set the pan position. The location
(data_x, data_y) will be centered in the window.
"""
if self.canpan and (event.state == 'down'):
self._panset(viewer, data_x, data_y, msg=msg)
return True | python | def ms_panset(self, viewer, event, data_x, data_y,
msg=True):
"""An interactive way to set the pan position. The location
(data_x, data_y) will be centered in the window.
"""
if self.canpan and (event.state == 'down'):
self._panset(viewer, data_x, data_y, msg=msg)
return True | ['def', 'ms_panset', '(', 'self', ',', 'viewer', ',', 'event', ',', 'data_x', ',', 'data_y', ',', 'msg', '=', 'True', ')', ':', 'if', 'self', '.', 'canpan', 'and', '(', 'event', '.', 'state', '==', "'down'", ')', ':', 'self', '.', '_panset', '(', 'viewer', ',', 'data_x', ',', 'data_y', ',', 'msg', '=', 'msg', ')', 'return', 'True'] | An interactive way to set the pan position. The location
(data_x, data_y) will be centered in the window. | ['An', 'interactive', 'way', 'to', 'set', 'the', 'pan', 'position', '.', 'The', 'location', '(', 'data_x', 'data_y', ')', 'will', 'be', 'centered', 'in', 'the', 'window', '.'] | train | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/Bindings.py#L1840-L1847 |
6,777 | mapbox/mapboxgl-jupyter | mapboxgl/viz.py | CircleViz.add_unique_template_variables | def add_unique_template_variables(self, options):
"""Update map template variables specific to circle visual"""
options.update(dict(
geojson_data=json.dumps(self.data, ensure_ascii=False),
colorProperty=self.color_property,
colorType=self.color_function_type,
colorStops=self.color_stops,
strokeWidth=self.stroke_width,
strokeColor=self.stroke_color,
radius=self.radius,
defaultColor=self.color_default,
highlightColor=self.highlight_color
))
if self.vector_source:
options.update(vectorColorStops=self.generate_vector_color_map()) | python | def add_unique_template_variables(self, options):
"""Update map template variables specific to circle visual"""
options.update(dict(
geojson_data=json.dumps(self.data, ensure_ascii=False),
colorProperty=self.color_property,
colorType=self.color_function_type,
colorStops=self.color_stops,
strokeWidth=self.stroke_width,
strokeColor=self.stroke_color,
radius=self.radius,
defaultColor=self.color_default,
highlightColor=self.highlight_color
))
if self.vector_source:
options.update(vectorColorStops=self.generate_vector_color_map()) | ['def', 'add_unique_template_variables', '(', 'self', ',', 'options', ')', ':', 'options', '.', 'update', '(', 'dict', '(', 'geojson_data', '=', 'json', '.', 'dumps', '(', 'self', '.', 'data', ',', 'ensure_ascii', '=', 'False', ')', ',', 'colorProperty', '=', 'self', '.', 'color_property', ',', 'colorType', '=', 'self', '.', 'color_function_type', ',', 'colorStops', '=', 'self', '.', 'color_stops', ',', 'strokeWidth', '=', 'self', '.', 'stroke_width', ',', 'strokeColor', '=', 'self', '.', 'stroke_color', ',', 'radius', '=', 'self', '.', 'radius', ',', 'defaultColor', '=', 'self', '.', 'color_default', ',', 'highlightColor', '=', 'self', '.', 'highlight_color', ')', ')', 'if', 'self', '.', 'vector_source', ':', 'options', '.', 'update', '(', 'vectorColorStops', '=', 'self', '.', 'generate_vector_color_map', '(', ')', ')'] | Update map template variables specific to circle visual | ['Update', 'map', 'template', 'variables', 'specific', 'to', 'circle', 'visual'] | train | https://github.com/mapbox/mapboxgl-jupyter/blob/f6e403c13eaa910e70659c7d179e8e32ce95ae34/mapboxgl/viz.py#L403-L418 |
6,778 | googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | _dataset_line | def _dataset_line(args):
"""Implements the BigQuery dataset magic subcommand used to operate on datasets
The supported syntax is:
%bq datasets <command> <args>
Commands:
{list, create, delete}
Args:
args: the optional arguments following '%bq datasets command'.
"""
if args['command'] == 'list':
filter_ = args['filter'] if args['filter'] else '*'
context = google.datalab.Context.default()
if args['project']:
context = google.datalab.Context(args['project'], context.credentials)
return _render_list([str(dataset) for dataset in bigquery.Datasets(context)
if fnmatch.fnmatch(str(dataset), filter_)])
elif args['command'] == 'create':
try:
bigquery.Dataset(args['name']).create(friendly_name=args['friendly'])
except Exception as e:
print('Failed to create dataset %s: %s' % (args['name'], e))
elif args['command'] == 'delete':
try:
bigquery.Dataset(args['name']).delete()
except Exception as e:
print('Failed to delete dataset %s: %s' % (args['name'], e)) | python | def _dataset_line(args):
"""Implements the BigQuery dataset magic subcommand used to operate on datasets
The supported syntax is:
%bq datasets <command> <args>
Commands:
{list, create, delete}
Args:
args: the optional arguments following '%bq datasets command'.
"""
if args['command'] == 'list':
filter_ = args['filter'] if args['filter'] else '*'
context = google.datalab.Context.default()
if args['project']:
context = google.datalab.Context(args['project'], context.credentials)
return _render_list([str(dataset) for dataset in bigquery.Datasets(context)
if fnmatch.fnmatch(str(dataset), filter_)])
elif args['command'] == 'create':
try:
bigquery.Dataset(args['name']).create(friendly_name=args['friendly'])
except Exception as e:
print('Failed to create dataset %s: %s' % (args['name'], e))
elif args['command'] == 'delete':
try:
bigquery.Dataset(args['name']).delete()
except Exception as e:
print('Failed to delete dataset %s: %s' % (args['name'], e)) | ['def', '_dataset_line', '(', 'args', ')', ':', 'if', 'args', '[', "'command'", ']', '==', "'list'", ':', 'filter_', '=', 'args', '[', "'filter'", ']', 'if', 'args', '[', "'filter'", ']', 'else', "'*'", 'context', '=', 'google', '.', 'datalab', '.', 'Context', '.', 'default', '(', ')', 'if', 'args', '[', "'project'", ']', ':', 'context', '=', 'google', '.', 'datalab', '.', 'Context', '(', 'args', '[', "'project'", ']', ',', 'context', '.', 'credentials', ')', 'return', '_render_list', '(', '[', 'str', '(', 'dataset', ')', 'for', 'dataset', 'in', 'bigquery', '.', 'Datasets', '(', 'context', ')', 'if', 'fnmatch', '.', 'fnmatch', '(', 'str', '(', 'dataset', ')', ',', 'filter_', ')', ']', ')', 'elif', 'args', '[', "'command'", ']', '==', "'create'", ':', 'try', ':', 'bigquery', '.', 'Dataset', '(', 'args', '[', "'name'", ']', ')', '.', 'create', '(', 'friendly_name', '=', 'args', '[', "'friendly'", ']', ')', 'except', 'Exception', 'as', 'e', ':', 'print', '(', "'Failed to create dataset %s: %s'", '%', '(', 'args', '[', "'name'", ']', ',', 'e', ')', ')', 'elif', 'args', '[', "'command'", ']', '==', "'delete'", ':', 'try', ':', 'bigquery', '.', 'Dataset', '(', 'args', '[', "'name'", ']', ')', '.', 'delete', '(', ')', 'except', 'Exception', 'as', 'e', ':', 'print', '(', "'Failed to delete dataset %s: %s'", '%', '(', 'args', '[', "'name'", ']', ',', 'e', ')', ')'] | Implements the BigQuery dataset magic subcommand used to operate on datasets
The supported syntax is:
%bq datasets <command> <args>
Commands:
{list, create, delete}
Args:
args: the optional arguments following '%bq datasets command'. | ['Implements', 'the', 'BigQuery', 'dataset', 'magic', 'subcommand', 'used', 'to', 'operate', 'on', 'datasets'] | train | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L650-L680 |
6,779 | ArchiveTeam/wpull | wpull/pipeline/session.py | ItemSession.set_status | def set_status(self, status: Status, increment_try_count: bool=True,
filename: str=None):
'''Mark the item with the given status.
Args:
status: a value from :class:`Status`.
increment_try_count: if True, increment the ``try_count``
value
'''
url = self.url_record.url
assert not self._try_count_incremented, (url, status)
if increment_try_count:
self._try_count_incremented = True
_logger.debug(__('Marking URL {0} status {1}.', url, status))
url_result = URLResult()
url_result.filename = filename
self.app_session.factory['URLTable'].check_in(
url,
status,
increment_try_count=increment_try_count,
url_result=url_result,
)
self._processed = True | python | def set_status(self, status: Status, increment_try_count: bool=True,
filename: str=None):
'''Mark the item with the given status.
Args:
status: a value from :class:`Status`.
increment_try_count: if True, increment the ``try_count``
value
'''
url = self.url_record.url
assert not self._try_count_incremented, (url, status)
if increment_try_count:
self._try_count_incremented = True
_logger.debug(__('Marking URL {0} status {1}.', url, status))
url_result = URLResult()
url_result.filename = filename
self.app_session.factory['URLTable'].check_in(
url,
status,
increment_try_count=increment_try_count,
url_result=url_result,
)
self._processed = True | ['def', 'set_status', '(', 'self', ',', 'status', ':', 'Status', ',', 'increment_try_count', ':', 'bool', '=', 'True', ',', 'filename', ':', 'str', '=', 'None', ')', ':', 'url', '=', 'self', '.', 'url_record', '.', 'url', 'assert', 'not', 'self', '.', '_try_count_incremented', ',', '(', 'url', ',', 'status', ')', 'if', 'increment_try_count', ':', 'self', '.', '_try_count_incremented', '=', 'True', '_logger', '.', 'debug', '(', '__', '(', "'Marking URL {0} status {1}.'", ',', 'url', ',', 'status', ')', ')', 'url_result', '=', 'URLResult', '(', ')', 'url_result', '.', 'filename', '=', 'filename', 'self', '.', 'app_session', '.', 'factory', '[', "'URLTable'", ']', '.', 'check_in', '(', 'url', ',', 'status', ',', 'increment_try_count', '=', 'increment_try_count', ',', 'url_result', '=', 'url_result', ',', ')', 'self', '.', '_processed', '=', 'True'] | Mark the item with the given status.
Args:
status: a value from :class:`Status`.
increment_try_count: if True, increment the ``try_count``
value | ['Mark', 'the', 'item', 'with', 'the', 'given', 'status', '.'] | train | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/pipeline/session.py#L65-L92 |
6,780 | iDigBio/idigbio-python-client | idigbio/pandas_client.py | iDbApiPandas.search_records | def search_records(self, **kwargs):
"""
rq Search Query in iDigBio Query Format, using Record Query Fields
sort field to sort on, pick from Record Query Fields
fields a list of fields to return, specified using the fieldName parameter from Fields with type records
fields_exclude a list of fields to exclude, specified using the fieldName parameter from Fields with type records
limit max results
offset skip results
Returns idigbio record format (legacy api), plus additional top level keys with parsed index terms. Returns None on error.
"""
return self.__search_base(apifn=self.__api.search_records, **kwargs) | python | def search_records(self, **kwargs):
"""
rq Search Query in iDigBio Query Format, using Record Query Fields
sort field to sort on, pick from Record Query Fields
fields a list of fields to return, specified using the fieldName parameter from Fields with type records
fields_exclude a list of fields to exclude, specified using the fieldName parameter from Fields with type records
limit max results
offset skip results
Returns idigbio record format (legacy api), plus additional top level keys with parsed index terms. Returns None on error.
"""
return self.__search_base(apifn=self.__api.search_records, **kwargs) | ['def', 'search_records', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', '__search_base', '(', 'apifn', '=', 'self', '.', '__api', '.', 'search_records', ',', '*', '*', 'kwargs', ')'] | rq Search Query in iDigBio Query Format, using Record Query Fields
sort field to sort on, pick from Record Query Fields
fields a list of fields to return, specified using the fieldName parameter from Fields with type records
fields_exclude a list of fields to exclude, specified using the fieldName parameter from Fields with type records
limit max results
offset skip results
Returns idigbio record format (legacy api), plus additional top level keys with parsed index terms. Returns None on error. | ['rq', 'Search', 'Query', 'in', 'iDigBio', 'Query', 'Format', 'using', 'Record', 'Query', 'Fields', 'sort', 'field', 'to', 'sort', 'on', 'pick', 'from', 'Record', 'Query', 'Fields', 'fields', 'a', 'list', 'of', 'fields', 'to', 'return', 'specified', 'using', 'the', 'fieldName', 'parameter', 'from', 'Fields', 'with', 'type', 'records', 'fields_exclude', 'a', 'list', 'of', 'fields', 'to', 'exclude', 'specified', 'using', 'the', 'fieldName', 'parameter', 'from', 'Fields', 'with', 'type', 'records', 'limit', 'max', 'results', 'offset', 'skip', 'results'] | train | https://github.com/iDigBio/idigbio-python-client/blob/e896075b9fed297fc420caf303b3bb5a2298d969/idigbio/pandas_client.py#L51-L63 |
6,781 | major/supernova | supernova/supernova.py | execute_executable | def execute_executable(nova_args, env_vars):
"""
Executes the executable given by the user.
Hey, I know this method has a silly name, but I write the code here and
I'm silly.
"""
process = subprocess.Popen(nova_args,
stdout=sys.stdout,
stderr=subprocess.PIPE,
env=env_vars)
process.wait()
return process | python | def execute_executable(nova_args, env_vars):
"""
Executes the executable given by the user.
Hey, I know this method has a silly name, but I write the code here and
I'm silly.
"""
process = subprocess.Popen(nova_args,
stdout=sys.stdout,
stderr=subprocess.PIPE,
env=env_vars)
process.wait()
return process | ['def', 'execute_executable', '(', 'nova_args', ',', 'env_vars', ')', ':', 'process', '=', 'subprocess', '.', 'Popen', '(', 'nova_args', ',', 'stdout', '=', 'sys', '.', 'stdout', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ',', 'env', '=', 'env_vars', ')', 'process', '.', 'wait', '(', ')', 'return', 'process'] | Executes the executable given by the user.
Hey, I know this method has a silly name, but I write the code here and
I'm silly. | ['Executes', 'the', 'executable', 'given', 'by', 'the', 'user', '.'] | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/supernova.py#L32-L44 |
6,782 | r0x0r/pywebview | webview/__init__.py | create_window | def create_window(title, url=None, js_api=None, width=800, height=600,
resizable=True, fullscreen=False, min_size=(200, 100), strings={}, confirm_quit=False,
background_color='#FFFFFF', text_select=False, frameless=False, debug=False):
"""
Create a web view window using a native GUI. The execution blocks after this function is invoked, so other
program logic must be executed in a separate thread.
:param title: Window title
:param url: URL to load
:param width: window width. Default is 800px
:param height:window height. Default is 600px
:param resizable True if window can be resized, False otherwise. Default is True
:param fullscreen: True if start in fullscreen mode. Default is False
:param min_size: a (width, height) tuple that specifies a minimum window size. Default is 200x100
:param strings: a dictionary with localized strings
:param confirm_quit: Display a quit confirmation dialog. Default is False
:param background_color: Background color as a hex string that is displayed before the content of webview is loaded. Default is white.
:param text_select: Allow text selection on page. Default is False.
:param frameless: Whether the window should have a frame.
:return: The uid of the created window.
"""
valid_color = r'^#(?:[0-9a-fA-F]{3}){1,2}$'
if not re.match(valid_color, background_color):
raise ValueError('{0} is not a valid hex triplet color'.format(background_color))
# Check if starting up from main thread; if not, wait; finally raise exception
if current_thread().name == 'MainThread':
uid = 'master'
if not _initialized:
_initialize_imports()
localization.update(strings)
else:
uid = 'child_' + uuid4().hex[:8]
if not _webview_ready.wait(5):
raise Exception('Call create_window from the main thread first')
_webview_ready.clear() # Make API calls wait while the new window is created
gui.create_window(uid, make_unicode(title), transform_url(url),
width, height, resizable, fullscreen, min_size, confirm_quit,
background_color, debug, js_api, text_select, frameless, _webview_ready)
if uid == 'master':
_webview_ready.clear()
else:
return uid | python | def create_window(title, url=None, js_api=None, width=800, height=600,
resizable=True, fullscreen=False, min_size=(200, 100), strings={}, confirm_quit=False,
background_color='#FFFFFF', text_select=False, frameless=False, debug=False):
"""
Create a web view window using a native GUI. The execution blocks after this function is invoked, so other
program logic must be executed in a separate thread.
:param title: Window title
:param url: URL to load
:param width: window width. Default is 800px
:param height:window height. Default is 600px
:param resizable True if window can be resized, False otherwise. Default is True
:param fullscreen: True if start in fullscreen mode. Default is False
:param min_size: a (width, height) tuple that specifies a minimum window size. Default is 200x100
:param strings: a dictionary with localized strings
:param confirm_quit: Display a quit confirmation dialog. Default is False
:param background_color: Background color as a hex string that is displayed before the content of webview is loaded. Default is white.
:param text_select: Allow text selection on page. Default is False.
:param frameless: Whether the window should have a frame.
:return: The uid of the created window.
"""
valid_color = r'^#(?:[0-9a-fA-F]{3}){1,2}$'
if not re.match(valid_color, background_color):
raise ValueError('{0} is not a valid hex triplet color'.format(background_color))
# Check if starting up from main thread; if not, wait; finally raise exception
if current_thread().name == 'MainThread':
uid = 'master'
if not _initialized:
_initialize_imports()
localization.update(strings)
else:
uid = 'child_' + uuid4().hex[:8]
if not _webview_ready.wait(5):
raise Exception('Call create_window from the main thread first')
_webview_ready.clear() # Make API calls wait while the new window is created
gui.create_window(uid, make_unicode(title), transform_url(url),
width, height, resizable, fullscreen, min_size, confirm_quit,
background_color, debug, js_api, text_select, frameless, _webview_ready)
if uid == 'master':
_webview_ready.clear()
else:
return uid | ['def', 'create_window', '(', 'title', ',', 'url', '=', 'None', ',', 'js_api', '=', 'None', ',', 'width', '=', '800', ',', 'height', '=', '600', ',', 'resizable', '=', 'True', ',', 'fullscreen', '=', 'False', ',', 'min_size', '=', '(', '200', ',', '100', ')', ',', 'strings', '=', '{', '}', ',', 'confirm_quit', '=', 'False', ',', 'background_color', '=', "'#FFFFFF'", ',', 'text_select', '=', 'False', ',', 'frameless', '=', 'False', ',', 'debug', '=', 'False', ')', ':', 'valid_color', '=', "r'^#(?:[0-9a-fA-F]{3}){1,2}$'", 'if', 'not', 're', '.', 'match', '(', 'valid_color', ',', 'background_color', ')', ':', 'raise', 'ValueError', '(', "'{0} is not a valid hex triplet color'", '.', 'format', '(', 'background_color', ')', ')', '# Check if starting up from main thread; if not, wait; finally raise exception', 'if', 'current_thread', '(', ')', '.', 'name', '==', "'MainThread'", ':', 'uid', '=', "'master'", 'if', 'not', '_initialized', ':', '_initialize_imports', '(', ')', 'localization', '.', 'update', '(', 'strings', ')', 'else', ':', 'uid', '=', "'child_'", '+', 'uuid4', '(', ')', '.', 'hex', '[', ':', '8', ']', 'if', 'not', '_webview_ready', '.', 'wait', '(', '5', ')', ':', 'raise', 'Exception', '(', "'Call create_window from the main thread first'", ')', '_webview_ready', '.', 'clear', '(', ')', '# Make API calls wait while the new window is created', 'gui', '.', 'create_window', '(', 'uid', ',', 'make_unicode', '(', 'title', ')', ',', 'transform_url', '(', 'url', ')', ',', 'width', ',', 'height', ',', 'resizable', ',', 'fullscreen', ',', 'min_size', ',', 'confirm_quit', ',', 'background_color', ',', 'debug', ',', 'js_api', ',', 'text_select', ',', 'frameless', ',', '_webview_ready', ')', 'if', 'uid', '==', "'master'", ':', '_webview_ready', '.', 'clear', '(', ')', 'else', ':', 'return', 'uid'] | Create a web view window using a native GUI. The execution blocks after this function is invoked, so other
program logic must be executed in a separate thread.
:param title: Window title
:param url: URL to load
:param width: window width. Default is 800px
:param height:window height. Default is 600px
:param resizable True if window can be resized, False otherwise. Default is True
:param fullscreen: True if start in fullscreen mode. Default is False
:param min_size: a (width, height) tuple that specifies a minimum window size. Default is 200x100
:param strings: a dictionary with localized strings
:param confirm_quit: Display a quit confirmation dialog. Default is False
:param background_color: Background color as a hex string that is displayed before the content of webview is loaded. Default is white.
:param text_select: Allow text selection on page. Default is False.
:param frameless: Whether the window should have a frame.
:return: The uid of the created window. | ['Create', 'a', 'web', 'view', 'window', 'using', 'a', 'native', 'GUI', '.', 'The', 'execution', 'blocks', 'after', 'this', 'function', 'is', 'invoked', 'so', 'other', 'program', 'logic', 'must', 'be', 'executed', 'in', 'a', 'separate', 'thread', '.', ':', 'param', 'title', ':', 'Window', 'title', ':', 'param', 'url', ':', 'URL', 'to', 'load', ':', 'param', 'width', ':', 'window', 'width', '.', 'Default', 'is', '800px', ':', 'param', 'height', ':', 'window', 'height', '.', 'Default', 'is', '600px', ':', 'param', 'resizable', 'True', 'if', 'window', 'can', 'be', 'resized', 'False', 'otherwise', '.', 'Default', 'is', 'True', ':', 'param', 'fullscreen', ':', 'True', 'if', 'start', 'in', 'fullscreen', 'mode', '.', 'Default', 'is', 'False', ':', 'param', 'min_size', ':', 'a', '(', 'width', 'height', ')', 'tuple', 'that', 'specifies', 'a', 'minimum', 'window', 'size', '.', 'Default', 'is', '200x100', ':', 'param', 'strings', ':', 'a', 'dictionary', 'with', 'localized', 'strings', ':', 'param', 'confirm_quit', ':', 'Display', 'a', 'quit', 'confirmation', 'dialog', '.', 'Default', 'is', 'False', ':', 'param', 'background_color', ':', 'Background', 'color', 'as', 'a', 'hex', 'string', 'that', 'is', 'displayed', 'before', 'the', 'content', 'of', 'webview', 'is', 'loaded', '.', 'Default', 'is', 'white', '.', ':', 'param', 'text_select', ':', 'Allow', 'text', 'selection', 'on', 'page', '.', 'Default', 'is', 'False', '.', ':', 'param', 'frameless', ':', 'Whether', 'the', 'window', 'should', 'have', 'a', 'frame', '.', ':', 'return', ':', 'The', 'uid', 'of', 'the', 'created', 'window', '.'] | train | https://github.com/r0x0r/pywebview/blob/fc44d84656e88f83ca496abb50ee75e95540996e/webview/__init__.py#L197-L242 |
6,783 | codeinn/vcs | vcs/backends/hg/changeset.py | MercurialChangeset.parents | def parents(self):
"""
Returns list of parents changesets.
"""
return [self.repository.get_changeset(parent.rev())
for parent in self._ctx.parents() if parent.rev() >= 0] | python | def parents(self):
"""
Returns list of parents changesets.
"""
return [self.repository.get_changeset(parent.rev())
for parent in self._ctx.parents() if parent.rev() >= 0] | ['def', 'parents', '(', 'self', ')', ':', 'return', '[', 'self', '.', 'repository', '.', 'get_changeset', '(', 'parent', '.', 'rev', '(', ')', ')', 'for', 'parent', 'in', 'self', '.', '_ctx', '.', 'parents', '(', ')', 'if', 'parent', '.', 'rev', '(', ')', '>=', '0', ']'] | Returns list of parents changesets. | ['Returns', 'list', 'of', 'parents', 'changesets', '.'] | train | https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/backends/hg/changeset.py#L97-L102 |
6,784 | lk-geimfari/mimesis | mimesis/random.py | Random.urandom | def urandom(*args: Any, **kwargs: Any) -> bytes:
"""Return a bytes object containing random bytes.
:return: Bytes.
"""
return os.urandom(*args, **kwargs) | python | def urandom(*args: Any, **kwargs: Any) -> bytes:
"""Return a bytes object containing random bytes.
:return: Bytes.
"""
return os.urandom(*args, **kwargs) | ['def', 'urandom', '(', '*', 'args', ':', 'Any', ',', '*', '*', 'kwargs', ':', 'Any', ')', '->', 'bytes', ':', 'return', 'os', '.', 'urandom', '(', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Return a bytes object containing random bytes.
:return: Bytes. | ['Return', 'a', 'bytes', 'object', 'containing', 'random', 'bytes', '.'] | train | https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/random.py#L46-L51 |
6,785 | MillionIntegrals/vel | vel/rl/algo/dqn.py | DeepQLearning.post_optimization_step | def post_optimization_step(self, batch_info, device, model, rollout):
""" Steps to take after optimization has been done"""
if batch_info.aggregate_batch_number % self.target_update_frequency == 0:
self.target_model.load_state_dict(model.state_dict())
self.target_model.eval() | python | def post_optimization_step(self, batch_info, device, model, rollout):
""" Steps to take after optimization has been done"""
if batch_info.aggregate_batch_number % self.target_update_frequency == 0:
self.target_model.load_state_dict(model.state_dict())
self.target_model.eval() | ['def', 'post_optimization_step', '(', 'self', ',', 'batch_info', ',', 'device', ',', 'model', ',', 'rollout', ')', ':', 'if', 'batch_info', '.', 'aggregate_batch_number', '%', 'self', '.', 'target_update_frequency', '==', '0', ':', 'self', '.', 'target_model', '.', 'load_state_dict', '(', 'model', '.', 'state_dict', '(', ')', ')', 'self', '.', 'target_model', '.', 'eval', '(', ')'] | Steps to take after optimization has been done | ['Steps', 'to', 'take', 'after', 'optimization', 'has', 'been', 'done'] | train | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/algo/dqn.py#L77-L81 |
6,786 | CybOXProject/mixbox | mixbox/datautils.py | needkwargs | def needkwargs(*argnames):
"""Function decorator which checks that the decorated function is called
with a set of required kwargs.
Args:
*argnames: String keyword argument names.
Raises:
ValueError: If a required kwarg is missing in the decorated function
call.
"""
required = set(argnames)
def decorator(func):
def inner(*args, **kwargs):
missing = required - set(kwargs)
if missing:
err = "%s kwargs are missing." % list(missing)
raise ValueError(err)
return func(*args, **kwargs)
return inner
return decorator | python | def needkwargs(*argnames):
"""Function decorator which checks that the decorated function is called
with a set of required kwargs.
Args:
*argnames: String keyword argument names.
Raises:
ValueError: If a required kwarg is missing in the decorated function
call.
"""
required = set(argnames)
def decorator(func):
def inner(*args, **kwargs):
missing = required - set(kwargs)
if missing:
err = "%s kwargs are missing." % list(missing)
raise ValueError(err)
return func(*args, **kwargs)
return inner
return decorator | ['def', 'needkwargs', '(', '*', 'argnames', ')', ':', 'required', '=', 'set', '(', 'argnames', ')', 'def', 'decorator', '(', 'func', ')', ':', 'def', 'inner', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'missing', '=', 'required', '-', 'set', '(', 'kwargs', ')', 'if', 'missing', ':', 'err', '=', '"%s kwargs are missing."', '%', 'list', '(', 'missing', ')', 'raise', 'ValueError', '(', 'err', ')', 'return', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'inner', 'return', 'decorator'] | Function decorator which checks that the decorated function is called
with a set of required kwargs.
Args:
*argnames: String keyword argument names.
Raises:
ValueError: If a required kwarg is missing in the decorated function
call. | ['Function', 'decorator', 'which', 'checks', 'that', 'the', 'decorated', 'function', 'is', 'called', 'with', 'a', 'set', 'of', 'required', 'kwargs', '.'] | train | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/datautils.py#L83-L104 |
6,787 | pkkid/python-plexapi | plexapi/library.py | PhotoSection.sync | def sync(self, resolution, limit=None, **kwargs):
""" Add current Music library section as sync item for specified device.
See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and
:func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions.
Parameters:
resolution (str): maximum allowed resolution for synchronized photos, see PHOTO_QUALITY_* values in the
module :mod:`plexapi.sync`.
limit (int): maximum count of tracks to sync, unlimited if `None`.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import PHOTO_QUALITY_HIGH
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('Photos')
section.sync(PHOTO_QUALITY_HIGH, client=target, limit=100, sort='addedAt:desc',
title='Fresh photos')
"""
from plexapi.sync import Policy, MediaSettings
kwargs['mediaSettings'] = MediaSettings.createPhoto(resolution)
kwargs['policy'] = Policy.create(limit)
return super(PhotoSection, self).sync(**kwargs) | python | def sync(self, resolution, limit=None, **kwargs):
""" Add current Music library section as sync item for specified device.
See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and
:func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions.
Parameters:
resolution (str): maximum allowed resolution for synchronized photos, see PHOTO_QUALITY_* values in the
module :mod:`plexapi.sync`.
limit (int): maximum count of tracks to sync, unlimited if `None`.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import PHOTO_QUALITY_HIGH
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('Photos')
section.sync(PHOTO_QUALITY_HIGH, client=target, limit=100, sort='addedAt:desc',
title='Fresh photos')
"""
from plexapi.sync import Policy, MediaSettings
kwargs['mediaSettings'] = MediaSettings.createPhoto(resolution)
kwargs['policy'] = Policy.create(limit)
return super(PhotoSection, self).sync(**kwargs) | ['def', 'sync', '(', 'self', ',', 'resolution', ',', 'limit', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'from', 'plexapi', '.', 'sync', 'import', 'Policy', ',', 'MediaSettings', 'kwargs', '[', "'mediaSettings'", ']', '=', 'MediaSettings', '.', 'createPhoto', '(', 'resolution', ')', 'kwargs', '[', "'policy'", ']', '=', 'Policy', '.', 'create', '(', 'limit', ')', 'return', 'super', '(', 'PhotoSection', ',', 'self', ')', '.', 'sync', '(', '*', '*', 'kwargs', ')'] | Add current Music library section as sync item for specified device.
See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and
:func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions.
Parameters:
resolution (str): maximum allowed resolution for synchronized photos, see PHOTO_QUALITY_* values in the
module :mod:`plexapi.sync`.
limit (int): maximum count of tracks to sync, unlimited if `None`.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import PHOTO_QUALITY_HIGH
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('Photos')
section.sync(PHOTO_QUALITY_HIGH, client=target, limit=100, sort='addedAt:desc',
title='Fresh photos') | ['Add', 'current', 'Music', 'library', 'section', 'as', 'sync', 'item', 'for', 'specified', 'device', '.', 'See', 'description', 'of', ':', 'func', ':', 'plexapi', '.', 'library', '.', 'LibrarySection', '.', 'search', '()', 'for', 'details', 'about', 'filtering', '/', 'sorting', 'and', ':', 'func', ':', 'plexapi', '.', 'library', '.', 'LibrarySection', '.', 'sync', '()', 'for', 'details', 'on', 'syncing', 'libraries', 'and', 'possible', 'exceptions', '.'] | train | https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/library.py#L876-L908 |
6,788 | saltstack/salt | salt/states/panos.py | download_software | def download_software(name, version=None, synch=False, check=False):
'''
Ensures that a software version is downloaded.
name: The name of the module function to execute.
version(str): The software version to check. If this version is not already downloaded, it will attempt to download
the file from Palo Alto.
synch(bool): If true, after downloading the file it will be synched to its peer.
check(bool): If true, the PANOS device will first attempt to pull the most recent software inventory list from Palo
Alto.
SLS Example:
.. code-block:: yaml
panos/version8.0.0:
panos.download_software:
- version: 8.0.0
- synch: False
- check: True
'''
ret = _default_ret(name)
if check is True:
__salt__['panos.check_software']()
versions = __salt__['panos.get_software_info']()['result']
if 'sw-updates' not in versions \
or 'versions' not in versions['sw-updates'] \
or 'entry' not in versions['sw-updates']['versions']:
ret.update({
'comment': 'Software version is not found in the local software list.',
'result': False
})
return ret
for entry in versions['sw-updates']['versions']['entry']:
if entry['version'] == version and entry['downloaded'] == "yes":
ret.update({
'comment': 'Software version is already downloaded.',
'result': True
})
return ret
ret.update({
'changes': __salt__['panos.download_software_version'](version=version, synch=synch)
})
versions = __salt__['panos.get_software_info']()['result']
if 'sw-updates' not in versions \
or 'versions' not in versions['sw-updates'] \
or 'entry' not in versions['sw-updates']['versions']:
ret.update({
'result': False
})
return ret
for entry in versions['sw-updates']['versions']['entry']:
if entry['version'] == version and entry['downloaded'] == "yes":
ret.update({
'result': True
})
return ret
return ret | python | def download_software(name, version=None, synch=False, check=False):
'''
Ensures that a software version is downloaded.
name: The name of the module function to execute.
version(str): The software version to check. If this version is not already downloaded, it will attempt to download
the file from Palo Alto.
synch(bool): If true, after downloading the file it will be synched to its peer.
check(bool): If true, the PANOS device will first attempt to pull the most recent software inventory list from Palo
Alto.
SLS Example:
.. code-block:: yaml
panos/version8.0.0:
panos.download_software:
- version: 8.0.0
- synch: False
- check: True
'''
ret = _default_ret(name)
if check is True:
__salt__['panos.check_software']()
versions = __salt__['panos.get_software_info']()['result']
if 'sw-updates' not in versions \
or 'versions' not in versions['sw-updates'] \
or 'entry' not in versions['sw-updates']['versions']:
ret.update({
'comment': 'Software version is not found in the local software list.',
'result': False
})
return ret
for entry in versions['sw-updates']['versions']['entry']:
if entry['version'] == version and entry['downloaded'] == "yes":
ret.update({
'comment': 'Software version is already downloaded.',
'result': True
})
return ret
ret.update({
'changes': __salt__['panos.download_software_version'](version=version, synch=synch)
})
versions = __salt__['panos.get_software_info']()['result']
if 'sw-updates' not in versions \
or 'versions' not in versions['sw-updates'] \
or 'entry' not in versions['sw-updates']['versions']:
ret.update({
'result': False
})
return ret
for entry in versions['sw-updates']['versions']['entry']:
if entry['version'] == version and entry['downloaded'] == "yes":
ret.update({
'result': True
})
return ret
return ret | ['def', 'download_software', '(', 'name', ',', 'version', '=', 'None', ',', 'synch', '=', 'False', ',', 'check', '=', 'False', ')', ':', 'ret', '=', '_default_ret', '(', 'name', ')', 'if', 'check', 'is', 'True', ':', '__salt__', '[', "'panos.check_software'", ']', '(', ')', 'versions', '=', '__salt__', '[', "'panos.get_software_info'", ']', '(', ')', '[', "'result'", ']', 'if', "'sw-updates'", 'not', 'in', 'versions', 'or', "'versions'", 'not', 'in', 'versions', '[', "'sw-updates'", ']', 'or', "'entry'", 'not', 'in', 'versions', '[', "'sw-updates'", ']', '[', "'versions'", ']', ':', 'ret', '.', 'update', '(', '{', "'comment'", ':', "'Software version is not found in the local software list.'", ',', "'result'", ':', 'False', '}', ')', 'return', 'ret', 'for', 'entry', 'in', 'versions', '[', "'sw-updates'", ']', '[', "'versions'", ']', '[', "'entry'", ']', ':', 'if', 'entry', '[', "'version'", ']', '==', 'version', 'and', 'entry', '[', "'downloaded'", ']', '==', '"yes"', ':', 'ret', '.', 'update', '(', '{', "'comment'", ':', "'Software version is already downloaded.'", ',', "'result'", ':', 'True', '}', ')', 'return', 'ret', 'ret', '.', 'update', '(', '{', "'changes'", ':', '__salt__', '[', "'panos.download_software_version'", ']', '(', 'version', '=', 'version', ',', 'synch', '=', 'synch', ')', '}', ')', 'versions', '=', '__salt__', '[', "'panos.get_software_info'", ']', '(', ')', '[', "'result'", ']', 'if', "'sw-updates'", 'not', 'in', 'versions', 'or', "'versions'", 'not', 'in', 'versions', '[', "'sw-updates'", ']', 'or', "'entry'", 'not', 'in', 'versions', '[', "'sw-updates'", ']', '[', "'versions'", ']', ':', 'ret', '.', 'update', '(', '{', "'result'", ':', 'False', '}', ')', 'return', 'ret', 'for', 'entry', 'in', 'versions', '[', "'sw-updates'", ']', '[', "'versions'", ']', '[', "'entry'", ']', ':', 'if', 'entry', '[', "'version'", ']', '==', 'version', 'and', 'entry', '[', "'downloaded'", ']', '==', '"yes"', ':', 'ret', '.', 'update', '(', '{', "'result'", ':', 'True', '}', ')', 'return', 'ret', 'return', 'ret'] | Ensures that a software version is downloaded.
name: The name of the module function to execute.
version(str): The software version to check. If this version is not already downloaded, it will attempt to download
the file from Palo Alto.
synch(bool): If true, after downloading the file it will be synched to its peer.
check(bool): If true, the PANOS device will first attempt to pull the most recent software inventory list from Palo
Alto.
SLS Example:
.. code-block:: yaml
panos/version8.0.0:
panos.download_software:
- version: 8.0.0
- synch: False
- check: True | ['Ensures', 'that', 'a', 'software', 'version', 'is', 'downloaded', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/panos.py#L663-L733 |
6,789 | phoebe-project/phoebe2 | phoebe/frontend/bundle.py | Bundle.add_star | def add_star(self, component=None, **kwargs):
"""
Shortcut to :meth:`add_component` but with kind='star'
"""
kwargs.setdefault('component', component)
return self.add_component('star', **kwargs) | python | def add_star(self, component=None, **kwargs):
"""
Shortcut to :meth:`add_component` but with kind='star'
"""
kwargs.setdefault('component', component)
return self.add_component('star', **kwargs) | ['def', 'add_star', '(', 'self', ',', 'component', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '.', 'setdefault', '(', "'component'", ',', 'component', ')', 'return', 'self', '.', 'add_component', '(', "'star'", ',', '*', '*', 'kwargs', ')'] | Shortcut to :meth:`add_component` but with kind='star' | ['Shortcut', 'to', ':', 'meth', ':', 'add_component', 'but', 'with', 'kind', '=', 'star'] | train | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L1999-L2004 |
6,790 | miyakogi/wdom | wdom/element.py | getElementsByClassName | def getElementsByClassName(start_node: ParentNode, class_name: str
) -> NodeList:
"""Get child nodes which has ``class_name`` class attribute."""
classes = set(class_name.split(' '))
return getElementsBy(
start_node,
lambda node: classes.issubset(set(node.classList))
) | python | def getElementsByClassName(start_node: ParentNode, class_name: str
) -> NodeList:
"""Get child nodes which has ``class_name`` class attribute."""
classes = set(class_name.split(' '))
return getElementsBy(
start_node,
lambda node: classes.issubset(set(node.classList))
) | ['def', 'getElementsByClassName', '(', 'start_node', ':', 'ParentNode', ',', 'class_name', ':', 'str', ')', '->', 'NodeList', ':', 'classes', '=', 'set', '(', 'class_name', '.', 'split', '(', "' '", ')', ')', 'return', 'getElementsBy', '(', 'start_node', ',', 'lambda', 'node', ':', 'classes', '.', 'issubset', '(', 'set', '(', 'node', '.', 'classList', ')', ')', ')'] | Get child nodes which has ``class_name`` class attribute. | ['Get', 'child', 'nodes', 'which', 'has', 'class_name', 'class', 'attribute', '.'] | train | https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/element.py#L402-L409 |
6,791 | gc3-uzh-ch/elasticluster | elasticluster/providers/openstack.py | OpenStackCloudProvider.start_instance | def start_instance(self, key_name, public_key_path, private_key_path,
security_group, flavor, image_id, image_userdata,
username=None, node_name=None, **kwargs):
"""Starts a new instance on the cloud using the given properties.
The following tasks are done to start an instance:
* establish a connection to the cloud web service
* check ssh keypair and upload it if it does not yet exist. This is
a locked process, since this function might be called in multiple
threads and we only want the key to be stored once.
* check if the security group exists
* run the instance with the given properties
:param str key_name: name of the ssh key to connect
:param str public_key_path: path to ssh public key
:param str private_key_path: path to ssh private key
:param str security_group: firewall rule definition to apply on the
instance
:param str flavor: machine type to use for the instance
:param str image_id: image type (os) to use for the instance
:param str image_userdata: command to execute after startup
:param str username: username for the given ssh key, default None
:return: str - instance id of the started instance
"""
self._init_os_api()
vm_start_args = {}
log.debug("Checking keypair `%s` ...", key_name)
with OpenStackCloudProvider.__node_start_lock:
self._check_keypair(key_name, public_key_path, private_key_path)
vm_start_args['key_name'] = key_name
security_groups = [sg.strip() for sg in security_group.split(',')]
self._check_security_groups(security_groups)
vm_start_args['security_groups'] = security_groups
# Check if the image id is present.
if image_id not in [img.id for img in self._get_images()]:
raise ImageError(
"No image found with ID `{0}` in project `{1}` of cloud {2}"
.format(image_id, self._os_tenant_name, self._os_auth_url))
vm_start_args['userdata'] = image_userdata
# Check if the flavor exists
flavors = [fl for fl in self._get_flavors() if fl.name == flavor]
if not flavors:
raise FlavorError(
"No flavor found with name `{0}` in project `{1}` of cloud {2}"
.format(flavor, self._os_tenant_name, self._os_auth_url))
flavor = flavors[0]
availability_zone = kwargs.pop('availability_zone','')
vm_start_args['availability_zone']=availability_zone
network_ids = [net_id.strip()
for net_id in kwargs.pop('network_ids', '').split(',')]
if network_ids:
nics = [{'net-id': net_id, 'v4-fixed-ip': ''}
for net_id in network_ids ]
log.debug("Specifying networks for node %s: %s",
node_name, ', '.join([nic['net-id'] for nic in nics]))
else:
nics = None
vm_start_args['nics'] = nics
if 'boot_disk_size' in kwargs:
# check if the backing volume is already there
volume_name = '{name}-{id}'.format(name=node_name, id=image_id)
if volume_name in [v.name for v in self._get_volumes()]:
raise ImageError(
"Volume `{0}` already exists in project `{1}` of cloud {2}"
.format(volume_name, self._os_tenant_name, self._os_auth_url))
log.info('Creating volume `%s` to use as VM disk ...', volume_name)
try:
bds = int(kwargs['boot_disk_size'])
if bds < 1:
raise ValueError('non-positive int')
except (ValueError, TypeError):
raise ConfigurationError(
"Invalid `boot_disk_size` specified:"
" should be a positive integer, got {0} instead"
.format(kwargs['boot_disk_size']))
volume = self.cinder_client.volumes.create(
size=bds, name=volume_name, imageRef=image_id,
volume_type=kwargs.pop('boot_disk_type'))
# wait for volume to come up
volume_available = False
while not volume_available:
for v in self._get_volumes():
if v.name == volume_name and v.status == 'available':
volume_available = True
break
sleep(1) # FIXME: hard-coded waiting time
# ok, use volume as VM disk
vm_start_args['block_device_mapping'] = {
# FIXME: is it possible that `vda` is not the boot disk? e.g. if
# a non-paravirtualized kernel is being used? should we allow
# to set the boot device as an image parameter?
'vda': ('{id}:::{delete_on_terminate}'
.format(id=volume.id, delete_on_terminate=1)),
}
# due to some `nova_client.servers.create()` implementation weirdness,
# the first three args need to be spelt out explicitly and cannot be
# conflated into `**vm_start_args`
vm = self.nova_client.servers.create(node_name, image_id, flavor, **vm_start_args)
# allocate and attach a floating IP, if requested
request_floating_ip = kwargs.get(
'request_floating_ip',
self._request_floating_ip_default)
if request_floating_ip:
# wait for server to come up (otherwise floating IP can't be associated)
log.info("Waiting for instance `%s` (%s) to come up ...", node_name, vm.id)
max_wait = int(kwargs.get('max_wait', 300))
waited = 0
while waited < max_wait:
if vm.status == 'ACTIVE':
break
if vm.status == 'ERROR':
raise RuntimeError(
"Failed to start VM {0}:"
" OpenStack scheduling error."
.format(vm.id))
vm = self.nova_client.servers.get(vm.id)
# FIXME: Configurable poll interval
sleep(3)
waited += 3
else:
raise RuntimeError(
"VM {0} didn't come up in {1:d} seconds"
.format(vm.id, max_wait))
# We need to list the floating IPs for this instance
try:
# python-novaclient <8.0.0
floating_ips = [ip for ip in self.nova_client.floating_ips.list()
if ip.instance_id == vm.id]
except AttributeError:
floating_ips = (
self.neutron_client
.list_floatingips(id=vm.id)
.get('floating_ips', []))
# allocate new floating IP if none given
if not floating_ips:
if 'floating_network_id' in kwargs:
floating_networks = [kwargs.pop('floating_network_id')]
else:
floating_networks = network_ids[:]
ip_addr = self._allocate_address(vm, floating_networks)
log.debug("VM `%s` was allocated floating IP: %r", vm.id, ip_addr)
else:
log.debug("VM `%s` already allocated floating IPs: %r", vm.id, floating_ips)
self._instances[vm.id] = vm
return vm.id | python | def start_instance(self, key_name, public_key_path, private_key_path,
security_group, flavor, image_id, image_userdata,
username=None, node_name=None, **kwargs):
"""Starts a new instance on the cloud using the given properties.
The following tasks are done to start an instance:
* establish a connection to the cloud web service
* check ssh keypair and upload it if it does not yet exist. This is
a locked process, since this function might be called in multiple
threads and we only want the key to be stored once.
* check if the security group exists
* run the instance with the given properties
:param str key_name: name of the ssh key to connect
:param str public_key_path: path to ssh public key
:param str private_key_path: path to ssh private key
:param str security_group: firewall rule definition to apply on the
instance
:param str flavor: machine type to use for the instance
:param str image_id: image type (os) to use for the instance
:param str image_userdata: command to execute after startup
:param str username: username for the given ssh key, default None
:return: str - instance id of the started instance
"""
self._init_os_api()
vm_start_args = {}
log.debug("Checking keypair `%s` ...", key_name)
with OpenStackCloudProvider.__node_start_lock:
self._check_keypair(key_name, public_key_path, private_key_path)
vm_start_args['key_name'] = key_name
security_groups = [sg.strip() for sg in security_group.split(',')]
self._check_security_groups(security_groups)
vm_start_args['security_groups'] = security_groups
# Check if the image id is present.
if image_id not in [img.id for img in self._get_images()]:
raise ImageError(
"No image found with ID `{0}` in project `{1}` of cloud {2}"
.format(image_id, self._os_tenant_name, self._os_auth_url))
vm_start_args['userdata'] = image_userdata
# Check if the flavor exists
flavors = [fl for fl in self._get_flavors() if fl.name == flavor]
if not flavors:
raise FlavorError(
"No flavor found with name `{0}` in project `{1}` of cloud {2}"
.format(flavor, self._os_tenant_name, self._os_auth_url))
flavor = flavors[0]
availability_zone = kwargs.pop('availability_zone','')
vm_start_args['availability_zone']=availability_zone
network_ids = [net_id.strip()
for net_id in kwargs.pop('network_ids', '').split(',')]
if network_ids:
nics = [{'net-id': net_id, 'v4-fixed-ip': ''}
for net_id in network_ids ]
log.debug("Specifying networks for node %s: %s",
node_name, ', '.join([nic['net-id'] for nic in nics]))
else:
nics = None
vm_start_args['nics'] = nics
if 'boot_disk_size' in kwargs:
# check if the backing volume is already there
volume_name = '{name}-{id}'.format(name=node_name, id=image_id)
if volume_name in [v.name for v in self._get_volumes()]:
raise ImageError(
"Volume `{0}` already exists in project `{1}` of cloud {2}"
.format(volume_name, self._os_tenant_name, self._os_auth_url))
log.info('Creating volume `%s` to use as VM disk ...', volume_name)
try:
bds = int(kwargs['boot_disk_size'])
if bds < 1:
raise ValueError('non-positive int')
except (ValueError, TypeError):
raise ConfigurationError(
"Invalid `boot_disk_size` specified:"
" should be a positive integer, got {0} instead"
.format(kwargs['boot_disk_size']))
volume = self.cinder_client.volumes.create(
size=bds, name=volume_name, imageRef=image_id,
volume_type=kwargs.pop('boot_disk_type'))
# wait for volume to come up
volume_available = False
while not volume_available:
for v in self._get_volumes():
if v.name == volume_name and v.status == 'available':
volume_available = True
break
sleep(1) # FIXME: hard-coded waiting time
# ok, use volume as VM disk
vm_start_args['block_device_mapping'] = {
# FIXME: is it possible that `vda` is not the boot disk? e.g. if
# a non-paravirtualized kernel is being used? should we allow
# to set the boot device as an image parameter?
'vda': ('{id}:::{delete_on_terminate}'
.format(id=volume.id, delete_on_terminate=1)),
}
# due to some `nova_client.servers.create()` implementation weirdness,
# the first three args need to be spelt out explicitly and cannot be
# conflated into `**vm_start_args`
vm = self.nova_client.servers.create(node_name, image_id, flavor, **vm_start_args)
# allocate and attach a floating IP, if requested
request_floating_ip = kwargs.get(
'request_floating_ip',
self._request_floating_ip_default)
if request_floating_ip:
# wait for server to come up (otherwise floating IP can't be associated)
log.info("Waiting for instance `%s` (%s) to come up ...", node_name, vm.id)
max_wait = int(kwargs.get('max_wait', 300))
waited = 0
while waited < max_wait:
if vm.status == 'ACTIVE':
break
if vm.status == 'ERROR':
raise RuntimeError(
"Failed to start VM {0}:"
" OpenStack scheduling error."
.format(vm.id))
vm = self.nova_client.servers.get(vm.id)
# FIXME: Configurable poll interval
sleep(3)
waited += 3
else:
raise RuntimeError(
"VM {0} didn't come up in {1:d} seconds"
.format(vm.id, max_wait))
# We need to list the floating IPs for this instance
try:
# python-novaclient <8.0.0
floating_ips = [ip for ip in self.nova_client.floating_ips.list()
if ip.instance_id == vm.id]
except AttributeError:
floating_ips = (
self.neutron_client
.list_floatingips(id=vm.id)
.get('floating_ips', []))
# allocate new floating IP if none given
if not floating_ips:
if 'floating_network_id' in kwargs:
floating_networks = [kwargs.pop('floating_network_id')]
else:
floating_networks = network_ids[:]
ip_addr = self._allocate_address(vm, floating_networks)
log.debug("VM `%s` was allocated floating IP: %r", vm.id, ip_addr)
else:
log.debug("VM `%s` already allocated floating IPs: %r", vm.id, floating_ips)
self._instances[vm.id] = vm
return vm.id | ['def', 'start_instance', '(', 'self', ',', 'key_name', ',', 'public_key_path', ',', 'private_key_path', ',', 'security_group', ',', 'flavor', ',', 'image_id', ',', 'image_userdata', ',', 'username', '=', 'None', ',', 'node_name', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', '_init_os_api', '(', ')', 'vm_start_args', '=', '{', '}', 'log', '.', 'debug', '(', '"Checking keypair `%s` ..."', ',', 'key_name', ')', 'with', 'OpenStackCloudProvider', '.', '__node_start_lock', ':', 'self', '.', '_check_keypair', '(', 'key_name', ',', 'public_key_path', ',', 'private_key_path', ')', 'vm_start_args', '[', "'key_name'", ']', '=', 'key_name', 'security_groups', '=', '[', 'sg', '.', 'strip', '(', ')', 'for', 'sg', 'in', 'security_group', '.', 'split', '(', "','", ')', ']', 'self', '.', '_check_security_groups', '(', 'security_groups', ')', 'vm_start_args', '[', "'security_groups'", ']', '=', 'security_groups', '# Check if the image id is present.', 'if', 'image_id', 'not', 'in', '[', 'img', '.', 'id', 'for', 'img', 'in', 'self', '.', '_get_images', '(', ')', ']', ':', 'raise', 'ImageError', '(', '"No image found with ID `{0}` in project `{1}` of cloud {2}"', '.', 'format', '(', 'image_id', ',', 'self', '.', '_os_tenant_name', ',', 'self', '.', '_os_auth_url', ')', ')', 'vm_start_args', '[', "'userdata'", ']', '=', 'image_userdata', '# Check if the flavor exists', 'flavors', '=', '[', 'fl', 'for', 'fl', 'in', 'self', '.', '_get_flavors', '(', ')', 'if', 'fl', '.', 'name', '==', 'flavor', ']', 'if', 'not', 'flavors', ':', 'raise', 'FlavorError', '(', '"No flavor found with name `{0}` in project `{1}` of cloud {2}"', '.', 'format', '(', 'flavor', ',', 'self', '.', '_os_tenant_name', ',', 'self', '.', '_os_auth_url', ')', ')', 'flavor', '=', 'flavors', '[', '0', ']', 'availability_zone', '=', 'kwargs', '.', 'pop', '(', "'availability_zone'", ',', "''", ')', 'vm_start_args', '[', "'availability_zone'", ']', '=', 'availability_zone', 'network_ids', '=', '[', 'net_id', '.', 'strip', '(', ')', 'for', 'net_id', 'in', 'kwargs', '.', 'pop', '(', "'network_ids'", ',', "''", ')', '.', 'split', '(', "','", ')', ']', 'if', 'network_ids', ':', 'nics', '=', '[', '{', "'net-id'", ':', 'net_id', ',', "'v4-fixed-ip'", ':', "''", '}', 'for', 'net_id', 'in', 'network_ids', ']', 'log', '.', 'debug', '(', '"Specifying networks for node %s: %s"', ',', 'node_name', ',', "', '", '.', 'join', '(', '[', 'nic', '[', "'net-id'", ']', 'for', 'nic', 'in', 'nics', ']', ')', ')', 'else', ':', 'nics', '=', 'None', 'vm_start_args', '[', "'nics'", ']', '=', 'nics', 'if', "'boot_disk_size'", 'in', 'kwargs', ':', '# check if the backing volume is already there', 'volume_name', '=', "'{name}-{id}'", '.', 'format', '(', 'name', '=', 'node_name', ',', 'id', '=', 'image_id', ')', 'if', 'volume_name', 'in', '[', 'v', '.', 'name', 'for', 'v', 'in', 'self', '.', '_get_volumes', '(', ')', ']', ':', 'raise', 'ImageError', '(', '"Volume `{0}` already exists in project `{1}` of cloud {2}"', '.', 'format', '(', 'volume_name', ',', 'self', '.', '_os_tenant_name', ',', 'self', '.', '_os_auth_url', ')', ')', 'log', '.', 'info', '(', "'Creating volume `%s` to use as VM disk ...'", ',', 'volume_name', ')', 'try', ':', 'bds', '=', 'int', '(', 'kwargs', '[', "'boot_disk_size'", ']', ')', 'if', 'bds', '<', '1', ':', 'raise', 'ValueError', '(', "'non-positive int'", ')', 'except', '(', 'ValueError', ',', 'TypeError', ')', ':', 'raise', 'ConfigurationError', '(', '"Invalid `boot_disk_size` specified:"', '" should be a positive integer, got {0} instead"', '.', 'format', '(', 'kwargs', '[', "'boot_disk_size'", ']', ')', ')', 'volume', '=', 'self', '.', 'cinder_client', '.', 'volumes', '.', 'create', '(', 'size', '=', 'bds', ',', 'name', '=', 'volume_name', ',', 'imageRef', '=', 'image_id', ',', 'volume_type', '=', 'kwargs', '.', 'pop', '(', "'boot_disk_type'", ')', ')', '# wait for volume to come up', 'volume_available', '=', 'False', 'while', 'not', 'volume_available', ':', 'for', 'v', 'in', 'self', '.', '_get_volumes', '(', ')', ':', 'if', 'v', '.', 'name', '==', 'volume_name', 'and', 'v', '.', 'status', '==', "'available'", ':', 'volume_available', '=', 'True', 'break', 'sleep', '(', '1', ')', '# FIXME: hard-coded waiting time', '# ok, use volume as VM disk', 'vm_start_args', '[', "'block_device_mapping'", ']', '=', '{', '# FIXME: is it possible that `vda` is not the boot disk? e.g. if', '# a non-paravirtualized kernel is being used? should we allow', '# to set the boot device as an image parameter?', "'vda'", ':', '(', "'{id}:::{delete_on_terminate}'", '.', 'format', '(', 'id', '=', 'volume', '.', 'id', ',', 'delete_on_terminate', '=', '1', ')', ')', ',', '}', '# due to some `nova_client.servers.create()` implementation weirdness,', '# the first three args need to be spelt out explicitly and cannot be', '# conflated into `**vm_start_args`', 'vm', '=', 'self', '.', 'nova_client', '.', 'servers', '.', 'create', '(', 'node_name', ',', 'image_id', ',', 'flavor', ',', '*', '*', 'vm_start_args', ')', '# allocate and attach a floating IP, if requested', 'request_floating_ip', '=', 'kwargs', '.', 'get', '(', "'request_floating_ip'", ',', 'self', '.', '_request_floating_ip_default', ')', 'if', 'request_floating_ip', ':', "# wait for server to come up (otherwise floating IP can't be associated)", 'log', '.', 'info', '(', '"Waiting for instance `%s` (%s) to come up ..."', ',', 'node_name', ',', 'vm', '.', 'id', ')', 'max_wait', '=', 'int', '(', 'kwargs', '.', 'get', '(', "'max_wait'", ',', '300', ')', ')', 'waited', '=', '0', 'while', 'waited', '<', 'max_wait', ':', 'if', 'vm', '.', 'status', '==', "'ACTIVE'", ':', 'break', 'if', 'vm', '.', 'status', '==', "'ERROR'", ':', 'raise', 'RuntimeError', '(', '"Failed to start VM {0}:"', '" OpenStack scheduling error."', '.', 'format', '(', 'vm', '.', 'id', ')', ')', 'vm', '=', 'self', '.', 'nova_client', '.', 'servers', '.', 'get', '(', 'vm', '.', 'id', ')', '# FIXME: Configurable poll interval', 'sleep', '(', '3', ')', 'waited', '+=', '3', 'else', ':', 'raise', 'RuntimeError', '(', '"VM {0} didn\'t come up in {1:d} seconds"', '.', 'format', '(', 'vm', '.', 'id', ',', 'max_wait', ')', ')', '# We need to list the floating IPs for this instance', 'try', ':', '# python-novaclient <8.0.0', 'floating_ips', '=', '[', 'ip', 'for', 'ip', 'in', 'self', '.', 'nova_client', '.', 'floating_ips', '.', 'list', '(', ')', 'if', 'ip', '.', 'instance_id', '==', 'vm', '.', 'id', ']', 'except', 'AttributeError', ':', 'floating_ips', '=', '(', 'self', '.', 'neutron_client', '.', 'list_floatingips', '(', 'id', '=', 'vm', '.', 'id', ')', '.', 'get', '(', "'floating_ips'", ',', '[', ']', ')', ')', '# allocate new floating IP if none given', 'if', 'not', 'floating_ips', ':', 'if', "'floating_network_id'", 'in', 'kwargs', ':', 'floating_networks', '=', '[', 'kwargs', '.', 'pop', '(', "'floating_network_id'", ')', ']', 'else', ':', 'floating_networks', '=', 'network_ids', '[', ':', ']', 'ip_addr', '=', 'self', '.', '_allocate_address', '(', 'vm', ',', 'floating_networks', ')', 'log', '.', 'debug', '(', '"VM `%s` was allocated floating IP: %r"', ',', 'vm', '.', 'id', ',', 'ip_addr', ')', 'else', ':', 'log', '.', 'debug', '(', '"VM `%s` already allocated floating IPs: %r"', ',', 'vm', '.', 'id', ',', 'floating_ips', ')', 'self', '.', '_instances', '[', 'vm', '.', 'id', ']', '=', 'vm', 'return', 'vm', '.', 'id'] | Starts a new instance on the cloud using the given properties.
The following tasks are done to start an instance:
* establish a connection to the cloud web service
* check ssh keypair and upload it if it does not yet exist. This is
a locked process, since this function might be called in multiple
threads and we only want the key to be stored once.
* check if the security group exists
* run the instance with the given properties
:param str key_name: name of the ssh key to connect
:param str public_key_path: path to ssh public key
:param str private_key_path: path to ssh private key
:param str security_group: firewall rule definition to apply on the
instance
:param str flavor: machine type to use for the instance
:param str image_id: image type (os) to use for the instance
:param str image_userdata: command to execute after startup
:param str username: username for the given ssh key, default None
:return: str - instance id of the started instance | ['Starts', 'a', 'new', 'instance', 'on', 'the', 'cloud', 'using', 'the', 'given', 'properties', '.', 'The', 'following', 'tasks', 'are', 'done', 'to', 'start', 'an', 'instance', ':'] | train | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/openstack.py#L440-L604 |
6,792 | ambitioninc/django-entity | entity/sync.py | _get_model_objs_to_sync | def _get_model_objs_to_sync(model_ids_to_sync, model_objs_map, sync_all):
"""
Given the model IDs to sync, fetch all model objects to sync
"""
model_objs_to_sync = {}
for ctype, model_ids_to_sync_for_ctype in model_ids_to_sync.items():
model_qset = entity_registry.entity_registry.get(ctype.model_class()).queryset
if not sync_all:
model_objs_to_sync[ctype] = model_qset.filter(id__in=model_ids_to_sync_for_ctype)
else:
model_objs_to_sync[ctype] = [
model_objs_map[ctype, model_id] for model_id in model_ids_to_sync_for_ctype
]
return model_objs_to_sync | python | def _get_model_objs_to_sync(model_ids_to_sync, model_objs_map, sync_all):
"""
Given the model IDs to sync, fetch all model objects to sync
"""
model_objs_to_sync = {}
for ctype, model_ids_to_sync_for_ctype in model_ids_to_sync.items():
model_qset = entity_registry.entity_registry.get(ctype.model_class()).queryset
if not sync_all:
model_objs_to_sync[ctype] = model_qset.filter(id__in=model_ids_to_sync_for_ctype)
else:
model_objs_to_sync[ctype] = [
model_objs_map[ctype, model_id] for model_id in model_ids_to_sync_for_ctype
]
return model_objs_to_sync | ['def', '_get_model_objs_to_sync', '(', 'model_ids_to_sync', ',', 'model_objs_map', ',', 'sync_all', ')', ':', 'model_objs_to_sync', '=', '{', '}', 'for', 'ctype', ',', 'model_ids_to_sync_for_ctype', 'in', 'model_ids_to_sync', '.', 'items', '(', ')', ':', 'model_qset', '=', 'entity_registry', '.', 'entity_registry', '.', 'get', '(', 'ctype', '.', 'model_class', '(', ')', ')', '.', 'queryset', 'if', 'not', 'sync_all', ':', 'model_objs_to_sync', '[', 'ctype', ']', '=', 'model_qset', '.', 'filter', '(', 'id__in', '=', 'model_ids_to_sync_for_ctype', ')', 'else', ':', 'model_objs_to_sync', '[', 'ctype', ']', '=', '[', 'model_objs_map', '[', 'ctype', ',', 'model_id', ']', 'for', 'model_id', 'in', 'model_ids_to_sync_for_ctype', ']', 'return', 'model_objs_to_sync'] | Given the model IDs to sync, fetch all model objects to sync | ['Given', 'the', 'model', 'IDs', 'to', 'sync', 'fetch', 'all', 'model', 'objects', 'to', 'sync'] | train | https://github.com/ambitioninc/django-entity/blob/ebc61f34313c52f4ef5819eb1da25b2ad837e80c/entity/sync.py#L120-L135 |
6,793 | django-fluent/django-fluent-contents | fluent_contents/extensions/pluginpool.py | PluginPool.get_model_classes | def get_model_classes(self):
"""
Return all :class:`~fluent_contents.models.ContentItem` model classes which are exposed by plugins.
"""
self._import_plugins()
return [plugin.model for plugin in self.plugins.values()] | python | def get_model_classes(self):
"""
Return all :class:`~fluent_contents.models.ContentItem` model classes which are exposed by plugins.
"""
self._import_plugins()
return [plugin.model for plugin in self.plugins.values()] | ['def', 'get_model_classes', '(', 'self', ')', ':', 'self', '.', '_import_plugins', '(', ')', 'return', '[', 'plugin', '.', 'model', 'for', 'plugin', 'in', 'self', '.', 'plugins', '.', 'values', '(', ')', ']'] | Return all :class:`~fluent_contents.models.ContentItem` model classes which are exposed by plugins. | ['Return', 'all', ':', 'class', ':', '~fluent_contents', '.', 'models', '.', 'ContentItem', 'model', 'classes', 'which', 'are', 'exposed', 'by', 'plugins', '.'] | train | https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/extensions/pluginpool.py#L147-L152 |
6,794 | GNS3/gns3-server | gns3server/compute/vmware/vmware_vm.py | VMwareVM.adapter_type | def adapter_type(self, adapter_type):
"""
Sets the adapter type for this VMware VM instance.
:param adapter_type: adapter type (string)
"""
self._adapter_type = adapter_type
log.info("VMware VM '{name}' [{id}]: adapter type changed to {adapter_type}".format(name=self.name,
id=self.id,
adapter_type=adapter_type)) | python | def adapter_type(self, adapter_type):
"""
Sets the adapter type for this VMware VM instance.
:param adapter_type: adapter type (string)
"""
self._adapter_type = adapter_type
log.info("VMware VM '{name}' [{id}]: adapter type changed to {adapter_type}".format(name=self.name,
id=self.id,
adapter_type=adapter_type)) | ['def', 'adapter_type', '(', 'self', ',', 'adapter_type', ')', ':', 'self', '.', '_adapter_type', '=', 'adapter_type', 'log', '.', 'info', '(', '"VMware VM \'{name}\' [{id}]: adapter type changed to {adapter_type}"', '.', 'format', '(', 'name', '=', 'self', '.', 'name', ',', 'id', '=', 'self', '.', 'id', ',', 'adapter_type', '=', 'adapter_type', ')', ')'] | Sets the adapter type for this VMware VM instance.
:param adapter_type: adapter type (string) | ['Sets', 'the', 'adapter', 'type', 'for', 'this', 'VMware', 'VM', 'instance', '.'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vmware/vmware_vm.py#L686-L696 |
6,795 | UCL-INGI/INGInious | inginious/frontend/pages/course_admin/webdav.py | WebDavInfoPage.page | def page(self, course):
""" Get all data and display the page """
if not self.webdav_host:
raise web.notfound()
url = self.webdav_host + "/" + course.get_id()
username = self.user_manager.session_username()
apikey = self.user_manager.session_api_key()
return self.template_helper.get_renderer().course_admin.webdav(course, url, username, apikey) | python | def page(self, course):
""" Get all data and display the page """
if not self.webdav_host:
raise web.notfound()
url = self.webdav_host + "/" + course.get_id()
username = self.user_manager.session_username()
apikey = self.user_manager.session_api_key()
return self.template_helper.get_renderer().course_admin.webdav(course, url, username, apikey) | ['def', 'page', '(', 'self', ',', 'course', ')', ':', 'if', 'not', 'self', '.', 'webdav_host', ':', 'raise', 'web', '.', 'notfound', '(', ')', 'url', '=', 'self', '.', 'webdav_host', '+', '"/"', '+', 'course', '.', 'get_id', '(', ')', 'username', '=', 'self', '.', 'user_manager', '.', 'session_username', '(', ')', 'apikey', '=', 'self', '.', 'user_manager', '.', 'session_api_key', '(', ')', 'return', 'self', '.', 'template_helper', '.', 'get_renderer', '(', ')', '.', 'course_admin', '.', 'webdav', '(', 'course', ',', 'url', ',', 'username', ',', 'apikey', ')'] | Get all data and display the page | ['Get', 'all', 'data', 'and', 'display', 'the', 'page'] | train | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/course_admin/webdav.py#L19-L27 |
6,796 | pandas-dev/pandas | pandas/core/series.py | Series.update | def update(self, other):
"""
Modify Series in place using non-NA values from passed
Series. Aligns on index.
Parameters
----------
other : Series
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6]))
>>> s
0 4
1 5
2 6
dtype: int64
>>> s = pd.Series(['a', 'b', 'c'])
>>> s.update(pd.Series(['d', 'e'], index=[0, 2]))
>>> s
0 d
1 b
2 e
dtype: object
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6, 7, 8]))
>>> s
0 4
1 5
2 6
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, np.nan, 6]))
>>> s
0 4
1 2
2 6
dtype: int64
"""
other = other.reindex_like(self)
mask = notna(other)
self._data = self._data.putmask(mask=mask, new=other, inplace=True)
self._maybe_update_cacher() | python | def update(self, other):
"""
Modify Series in place using non-NA values from passed
Series. Aligns on index.
Parameters
----------
other : Series
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6]))
>>> s
0 4
1 5
2 6
dtype: int64
>>> s = pd.Series(['a', 'b', 'c'])
>>> s.update(pd.Series(['d', 'e'], index=[0, 2]))
>>> s
0 d
1 b
2 e
dtype: object
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6, 7, 8]))
>>> s
0 4
1 5
2 6
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, np.nan, 6]))
>>> s
0 4
1 2
2 6
dtype: int64
"""
other = other.reindex_like(self)
mask = notna(other)
self._data = self._data.putmask(mask=mask, new=other, inplace=True)
self._maybe_update_cacher() | ['def', 'update', '(', 'self', ',', 'other', ')', ':', 'other', '=', 'other', '.', 'reindex_like', '(', 'self', ')', 'mask', '=', 'notna', '(', 'other', ')', 'self', '.', '_data', '=', 'self', '.', '_data', '.', 'putmask', '(', 'mask', '=', 'mask', ',', 'new', '=', 'other', ',', 'inplace', '=', 'True', ')', 'self', '.', '_maybe_update_cacher', '(', ')'] | Modify Series in place using non-NA values from passed
Series. Aligns on index.
Parameters
----------
other : Series
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6]))
>>> s
0 4
1 5
2 6
dtype: int64
>>> s = pd.Series(['a', 'b', 'c'])
>>> s.update(pd.Series(['d', 'e'], index=[0, 2]))
>>> s
0 d
1 b
2 e
dtype: object
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6, 7, 8]))
>>> s
0 4
1 5
2 6
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, np.nan, 6]))
>>> s
0 4
1 2
2 6
dtype: int64 | ['Modify', 'Series', 'in', 'place', 'using', 'non', '-', 'NA', 'values', 'from', 'passed', 'Series', '.', 'Aligns', 'on', 'index', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2692-L2742 |
6,797 | gagneurlab/concise | concise/legacy/concise.py | Concise.get_weights | def get_weights(self):
"""
Returns:
dict: Model's trained weights.
"""
if self.is_trained() is False:
# print("Model not fitted yet. Use object.fit() to fit the model.")
return None
var_res = self._var_res
weights = self._var_res_to_weights(var_res)
# save to the side
weights["final_bias_fit"] = weights["final_bias"]
weights["feature_weights_fit"] = weights["feature_weights"]
return weights | python | def get_weights(self):
"""
Returns:
dict: Model's trained weights.
"""
if self.is_trained() is False:
# print("Model not fitted yet. Use object.fit() to fit the model.")
return None
var_res = self._var_res
weights = self._var_res_to_weights(var_res)
# save to the side
weights["final_bias_fit"] = weights["final_bias"]
weights["feature_weights_fit"] = weights["feature_weights"]
return weights | ['def', 'get_weights', '(', 'self', ')', ':', 'if', 'self', '.', 'is_trained', '(', ')', 'is', 'False', ':', '# print("Model not fitted yet. Use object.fit() to fit the model.")', 'return', 'None', 'var_res', '=', 'self', '.', '_var_res', 'weights', '=', 'self', '.', '_var_res_to_weights', '(', 'var_res', ')', '# save to the side', 'weights', '[', '"final_bias_fit"', ']', '=', 'weights', '[', '"final_bias"', ']', 'weights', '[', '"feature_weights_fit"', ']', '=', 'weights', '[', '"feature_weights"', ']', 'return', 'weights'] | Returns:
dict: Model's trained weights. | ['Returns', ':', 'dict', ':', 'Model', 's', 'trained', 'weights', '.'] | train | https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L412-L427 |
6,798 | annayqho/TheCannon | TheCannon/find_continuum_pixels.py | _find_contpix | def _find_contpix(wl, fluxes, ivars, target_frac):
""" Find continuum pix in spec, meeting a set target fraction
Parameters
----------
wl: numpy ndarray
rest-frame wavelength vector
fluxes: numpy ndarray
pixel intensities
ivars: numpy ndarray
inverse variances, parallel to fluxes
target_frac: float
the fraction of pixels in spectrum desired to be continuum
Returns
-------
contmask: boolean numpy ndarray
True corresponds to continuum pixels
"""
print("Target frac: %s" %(target_frac))
bad1 = np.median(ivars, axis=0) == SMALL
bad2 = np.var(ivars, axis=0) == 0
bad = np.logical_and(bad1, bad2)
npixels = len(wl)-sum(bad)
f_cut = 0.0001
stepsize = 0.0001
sig_cut = 0.0001
contmask = _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars)
if npixels > 0:
frac = sum(contmask)/float(npixels)
else:
frac = 0
while (frac < target_frac):
f_cut += stepsize
sig_cut += stepsize
contmask = _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars)
if npixels > 0:
frac = sum(contmask)/float(npixels)
else:
frac = 0
if frac > 0.10*npixels:
print("Warning: Over 10% of pixels identified as continuum.")
print("%s out of %s pixels identified as continuum" %(sum(contmask),
npixels))
print("Cuts: f_cut %s, sig_cut %s" %(f_cut, sig_cut))
return contmask | python | def _find_contpix(wl, fluxes, ivars, target_frac):
""" Find continuum pix in spec, meeting a set target fraction
Parameters
----------
wl: numpy ndarray
rest-frame wavelength vector
fluxes: numpy ndarray
pixel intensities
ivars: numpy ndarray
inverse variances, parallel to fluxes
target_frac: float
the fraction of pixels in spectrum desired to be continuum
Returns
-------
contmask: boolean numpy ndarray
True corresponds to continuum pixels
"""
print("Target frac: %s" %(target_frac))
bad1 = np.median(ivars, axis=0) == SMALL
bad2 = np.var(ivars, axis=0) == 0
bad = np.logical_and(bad1, bad2)
npixels = len(wl)-sum(bad)
f_cut = 0.0001
stepsize = 0.0001
sig_cut = 0.0001
contmask = _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars)
if npixels > 0:
frac = sum(contmask)/float(npixels)
else:
frac = 0
while (frac < target_frac):
f_cut += stepsize
sig_cut += stepsize
contmask = _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars)
if npixels > 0:
frac = sum(contmask)/float(npixels)
else:
frac = 0
if frac > 0.10*npixels:
print("Warning: Over 10% of pixels identified as continuum.")
print("%s out of %s pixels identified as continuum" %(sum(contmask),
npixels))
print("Cuts: f_cut %s, sig_cut %s" %(f_cut, sig_cut))
return contmask | ['def', '_find_contpix', '(', 'wl', ',', 'fluxes', ',', 'ivars', ',', 'target_frac', ')', ':', 'print', '(', '"Target frac: %s"', '%', '(', 'target_frac', ')', ')', 'bad1', '=', 'np', '.', 'median', '(', 'ivars', ',', 'axis', '=', '0', ')', '==', 'SMALL', 'bad2', '=', 'np', '.', 'var', '(', 'ivars', ',', 'axis', '=', '0', ')', '==', '0', 'bad', '=', 'np', '.', 'logical_and', '(', 'bad1', ',', 'bad2', ')', 'npixels', '=', 'len', '(', 'wl', ')', '-', 'sum', '(', 'bad', ')', 'f_cut', '=', '0.0001', 'stepsize', '=', '0.0001', 'sig_cut', '=', '0.0001', 'contmask', '=', '_find_contpix_given_cuts', '(', 'f_cut', ',', 'sig_cut', ',', 'wl', ',', 'fluxes', ',', 'ivars', ')', 'if', 'npixels', '>', '0', ':', 'frac', '=', 'sum', '(', 'contmask', ')', '/', 'float', '(', 'npixels', ')', 'else', ':', 'frac', '=', '0', 'while', '(', 'frac', '<', 'target_frac', ')', ':', 'f_cut', '+=', 'stepsize', 'sig_cut', '+=', 'stepsize', 'contmask', '=', '_find_contpix_given_cuts', '(', 'f_cut', ',', 'sig_cut', ',', 'wl', ',', 'fluxes', ',', 'ivars', ')', 'if', 'npixels', '>', '0', ':', 'frac', '=', 'sum', '(', 'contmask', ')', '/', 'float', '(', 'npixels', ')', 'else', ':', 'frac', '=', '0', 'if', 'frac', '>', '0.10', '*', 'npixels', ':', 'print', '(', '"Warning: Over 10% of pixels identified as continuum."', ')', 'print', '(', '"%s out of %s pixels identified as continuum"', '%', '(', 'sum', '(', 'contmask', ')', ',', 'npixels', ')', ')', 'print', '(', '"Cuts: f_cut %s, sig_cut %s"', '%', '(', 'f_cut', ',', 'sig_cut', ')', ')', 'return', 'contmask'] | Find continuum pix in spec, meeting a set target fraction
Parameters
----------
wl: numpy ndarray
rest-frame wavelength vector
fluxes: numpy ndarray
pixel intensities
ivars: numpy ndarray
inverse variances, parallel to fluxes
target_frac: float
the fraction of pixels in spectrum desired to be continuum
Returns
-------
contmask: boolean numpy ndarray
True corresponds to continuum pixels | ['Find', 'continuum', 'pix', 'in', 'spec', 'meeting', 'a', 'set', 'target', 'fraction'] | train | https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/find_continuum_pixels.py#L37-L85 |
6,799 | etcher-be/emiz | emiz/avwx/core.py | get_taf_remarks | def get_taf_remarks(txt: str) -> (str, str): # type: ignore
"""
Returns report and remarks separated if found
"""
remarks_start = find_first_in_list(txt, TAF_RMK)
if remarks_start == -1:
return txt, ''
remarks = txt[remarks_start:]
txt = txt[:remarks_start].strip()
return txt, remarks | python | def get_taf_remarks(txt: str) -> (str, str): # type: ignore
"""
Returns report and remarks separated if found
"""
remarks_start = find_first_in_list(txt, TAF_RMK)
if remarks_start == -1:
return txt, ''
remarks = txt[remarks_start:]
txt = txt[:remarks_start].strip()
return txt, remarks | ['def', 'get_taf_remarks', '(', 'txt', ':', 'str', ')', '->', '(', 'str', ',', 'str', ')', ':', '# type: ignore', 'remarks_start', '=', 'find_first_in_list', '(', 'txt', ',', 'TAF_RMK', ')', 'if', 'remarks_start', '==', '-', '1', ':', 'return', 'txt', ',', "''", 'remarks', '=', 'txt', '[', 'remarks_start', ':', ']', 'txt', '=', 'txt', '[', ':', 'remarks_start', ']', '.', 'strip', '(', ')', 'return', 'txt', ',', 'remarks'] | Returns report and remarks separated if found | ['Returns', 'report', 'and', 'remarks', 'separated', 'if', 'found'] | train | https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/core.py#L165-L174 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.