code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _get_path_params(match):
result = {}
for (var_name, value) in match.groupdict().iteritems():
actual_var_name = ApiConfigManager._from_safe_path_param_name(var_name)
result[actual_var_name] = urllib.unquote_plus(value)
return result
|
Gets path parameters from a regular expression match.
Args:
match: A regular expression Match object for a path.
Returns:
A dictionary containing the variable names converted from base64.
|
codesearchnet
|
def infer_shapes(nlp: Pipeline, framework: str) -> tuple[list[str], list[str], dict, BatchEncoding]:
def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int):
if isinstance(tensor, (tuple, list)):
return [build_shape_dict(name, t, is_input, seq_len) for t in tensor]
else:
axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == 1][0]: 'batch'}
if is_input:
if len(tensor.shape) == 2:
axes[1] = 'sequence'
else:
raise ValueError(f'Unable to infer tensor axes ({len(tensor.shape)})')
else:
seq_axes = [dim for dim, shape in enumerate(tensor.shape) if shape == seq_len]
axes.update(dict.fromkeys(seq_axes, 'sequence'))
print(f'Found {('input' if is_input else 'output')} {name} with shape: {axes}')
return axes
tokens = nlp.tokenizer('This is a sample output', return_tensors=framework)
seq_len = tokens.input_ids.shape[-1]
outputs = nlp.model(**tokens) if framework == 'pt' else nlp.model(tokens)
if isinstance(outputs, ModelOutput):
outputs = outputs.to_tuple()
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
input_vars = list(tokens.keys())
input_dynamic_axes = {k: build_shape_dict(k, v, True, seq_len) for k, v in tokens.items()}
outputs_flat = []
for output in outputs:
if isinstance(output, (tuple, list)):
outputs_flat.extend(output)
else:
outputs_flat.append(output)
output_names = [f'output_{i}' for i in range(len(outputs_flat))]
output_dynamic_axes = {k: build_shape_dict(k, v, False, seq_len) for k, v in zip(output_names, outputs_flat)}
dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes)
return (input_vars, output_names, dynamic_axes, tokens)
|
Attempt to infer the static vs dynamic axes for each input and output tensors for a specific model
Args:
nlp: The pipeline object holding the model to be exported
framework: The framework identifier to dispatch to the correct inference scheme (pt/tf)
Returns:
- List of the inferred input variable names
- List of the inferred output variable names
- Dictionary with input/output variables names as key and shape tensor as value
- a BatchEncoding reference which was used to infer all the above information
|
github-repos
|
def GetVolumeByIdentifier(self, volume_identifier):
if (not self._is_parsed):
self._Parse()
self._is_parsed = True
return self._volumes[volume_identifier]
|
Retrieves a specific volume based on the identifier.
Args:
volume_identifier (str): identifier of the volume within
the volume system.
Returns:
Volume: a volume.
|
codesearchnet
|
def pixel_shuffle(self, vision_features: torch.Tensor, scale_factor: float=0.5):
batch_size, width, height, channels = vision_features.size()
if height % scale_factor != 0 or width % scale_factor != 0:
raise ValueError('Height and width must be divisible by scale_factor for proper downsampling.')
vision_features = vision_features.view(batch_size, width, int(height * scale_factor), int(channels / scale_factor))
vision_features = vision_features.permute(0, 2, 1, 3).contiguous()
vision_features = vision_features.view(batch_size, int(height * scale_factor), int(width * scale_factor), int(channels / scale_factor ** 2))
vision_features = vision_features.permute(0, 2, 1, 3).contiguous()
return vision_features
|
Perform pixel shuffle downsampling on vision features.
Args:
vision_features (`torch.Tensor`):
Input tensor of shape (batch_size, width, height, channels).
scale_factor (`float`, *optional*, defaults to `0.5`):
Factor by which to downsample. Default is 0.5, which halves the dimensions.
Returns:
vision_features (`torch.Tensor`):
Downsampled tensor of shape (batch_size, height*scale_factor, width*scale_factor, channels/(scale_factor^2)).
|
github-repos
|
def _merge_precomputed_encodings(self, other, validate=True):
if self is other or (self._row_splits is other._row_splits and self._row_lengths is other._row_lengths and (self._value_rowids is other._value_rowids) and (self._nrows is other._nrows) and (self._nvals is other._nvals) and (self._uniform_row_length is other._uniform_row_length)):
return self
nrows, nrows_validated = _merge_tensors(self._nrows, other._nrows, 'nrows', validate)
nvals, _ = _merge_tensors(self._nvals, other._nvals, 'nvals', validate)
uniform_row_length, uniform_row_length_validated = _merge_tensors(self._uniform_row_length, other._uniform_row_length, 'uniform_row_length', validate)
if uniform_row_length_validated and nrows_validated:
validate = False
row_splits, row_splits_validated = _merge_tensors(self._row_splits, other._row_splits, 'row_splits', validate)
if row_splits_validated:
validate = False
row_lengths, row_lengths_validated = _merge_tensors(self._row_lengths, other._row_lengths, 'row_lengths', validate)
if row_lengths_validated:
validate = False
value_rowids, value_rowids_validated = _merge_tensors(self._value_rowids, other._value_rowids, 'value_rowids', validate)
if value_rowids_validated and nrows_validated:
validate = False
if row_splits is self._row_splits and row_lengths is self._row_lengths and (value_rowids is self._value_rowids) and (nrows is self._nrows) and (uniform_row_length is self._uniform_row_length):
return self
if row_splits is other._row_splits and row_lengths is other._row_lengths and (value_rowids is other._value_rowids) and (nrows is other._nrows) and (uniform_row_length is other._uniform_row_length):
return other
return RowPartition(row_splits=row_splits, row_lengths=row_lengths, value_rowids=value_rowids, nrows=nrows, uniform_row_length=uniform_row_length, nvals=nvals, internal=_row_partition_factory_key)
|
Returns a RowPartition that merges encodings from `self` and `other`.
Requires that `self` and `other` describe the same partition.
Args:
other: A `RowPartition` that encodes the same partition as `self`.
validate: If true, then add runtime checks to verify that `self` and
`other` encode the same row partition.
Returns:
A `RowPartition`.
|
github-repos
|
def disassemble(self, annotate=False, blocks=False):
ops = disassemble(self.co_code, self.internals)
if annotate:
ops = [self.annotate_op(op) for op in ops]
if blocks:
return blocks_from_ops(ops)
else:
return ops
|
Disassemble the bytecode of this code object into a series of
opcodes and labels. Can also annotate the opcodes and group
the opcodes into blocks based on the labels.
Arguments:
annotate(bool): Whether to annotate the operations.
blocks(bool): Whether to group the operations into blocks.
Returns:
list: A list of :class:`Op` (or :class:`AnnotatedOp`) instances
and labels.
|
codesearchnet
|
def get_connection_id_by_endpoint(self, endpoint):
with self._connections_lock:
for connection_id in self._connections:
connection_info = self._connections[connection_id]
if (connection_info.uri == endpoint):
return connection_id
raise KeyError()
|
Returns the connection id associated with a publically
reachable endpoint or raises KeyError if the endpoint is not
found.
Args:
endpoint (str): A zmq-style uri which identifies a publically
reachable endpoint.
|
codesearchnet
|
class CategoricalHinge(MeanMetricWrapper):
def __init__(self, name='categorical_hinge', dtype=None):
super(CategoricalHinge, self).__init__(categorical_hinge, name, dtype=dtype)
|
Computes the categorical hinge metric between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.CategoricalHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.4000001
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.2
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalHinge()])
```
|
github-repos
|
def __init__(self, position=(0., 0., 0.), rotation=(0., 0., 0.), scale=1., orientation0=(1., 0., 0.),
**kwargs):
super(Physical, self).__init__(**kwargs)
self.orientation0 = np.array(orientation0, dtype=np.float32)
self.rotation = coordinates.RotationEulerDegrees(*rotation)
self.position = coordinates.Translation(*position)
if hasattr(scale, '__iter__'):
if 0 in scale:
raise ValueError("Scale can not be set to 0")
self.scale = coordinates.Scale(*scale)
else:
if scale is 0:
raise ValueError("Scale can not be set to 0")
self.scale = coordinates.Scale(scale)
self._model_matrix = np.identity(4, dtype=np.float32)
self._normal_matrix = np.identity(4, dtype=np.float32)
self._view_matrix = np.identity(4, dtype=np.float32)
|
XYZ Position, Scale and XYZEuler Rotation Class.
Args:
position: (x, y, z) translation values.
rotation: (x, y, z) rotation values
scale (float): uniform scale factor. 1 = no scaling.
|
juraj-google-style
|
def erfinv(x, name=None):
with ops.name_scope(name, 'erfinv', [x]):
return gen_math_ops.erfinv(x)
|
Compute inverse error function.
Given `x`, compute the inverse error function of `x`. This function
is the inverse of `tf.math.erf`.
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse error function of `x`.
|
github-repos
|
def multinomial_sample(x, vocab_size=None, sampling_method='random', temperature=1.0):
vocab_size = (vocab_size or common_layers.shape_list(x)[(- 1)])
if ((sampling_method == 'random') and (temperature > 0.0)):
samples = tf.multinomial((tf.reshape(x, [(- 1), vocab_size]) / temperature), 1)
else:
samples = tf.argmax(x, axis=(- 1))
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:(- 1)])
return reshaped_samples
|
Multinomial sampling from a n-dimensional tensor.
Args:
x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial.
vocab_size: Number of classes in multinomial distribution.
sampling_method: String, "random" or otherwise deterministic.
temperature: Positive float.
Returns:
Tensor of shape [...].
|
codesearchnet
|
def task_table(self, task_id=None):
self._check_connected()
if (task_id is not None):
task_id = ray.TaskID(hex_to_binary(task_id))
return self._task_table(task_id)
else:
task_table_keys = self._keys((ray.gcs_utils.TablePrefix_RAYLET_TASK_string + '*'))
task_ids_binary = [key[len(ray.gcs_utils.TablePrefix_RAYLET_TASK_string):] for key in task_table_keys]
results = {}
for task_id_binary in task_ids_binary:
results[binary_to_hex(task_id_binary)] = self._task_table(ray.TaskID(task_id_binary))
return results
|
Fetch and parse the task table information for one or more task IDs.
Args:
task_id: A hex string of the task ID to fetch information about. If
this is None, then the task object table is fetched.
Returns:
Information from the task table.
|
codesearchnet
|
def _search_step(self, state):
(new_seq, new_log_probs, new_cache) = self._grow_alive_seq(state)
alive_state = self._get_new_alive_state(new_seq, new_log_probs, new_cache)
finished_state = self._get_new_finished_state(state, new_seq, new_log_probs)
new_state = {_StateKeys.CUR_INDEX: (state[_StateKeys.CUR_INDEX] + 1)}
new_state.update(alive_state)
new_state.update(finished_state)
return [new_state]
|
Beam search loop body.
Grow alive sequences by a single ID. Sequences that have reached the EOS
token are marked as finished. The alive and finished sequences with the
highest log probabilities and scores are returned.
A sequence's finished score is calculating by dividing the log probability
by the length normalization factor. Without length normalization, the
search is more likely to return shorter sequences.
Args:
state: A dictionary with the current loop state.
Returns:
new state dictionary.
|
codesearchnet
|
def get_location_from_HDX_code(code, locations=None, configuration=None):
if locations is None:
locations = Locations.validlocations(configuration)
for locdict in locations:
if code.upper() == locdict['name'].upper():
return locdict['title']
return None
|
Get location from HDX location code
Args:
code (str): code for which to get location name
locations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX.
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[str]: location name
|
juraj-google-style
|
def from_obj(cls, cls_obj):
if (not cls_obj):
return None
typekey = cls.objkey(cls_obj)
klass = cls.entity_class(typekey)
return klass.from_obj(cls_obj)
|
Parse the generateDS object and return an Entity instance.
This will attempt to extract type information from the input
object and pass it to entity_class to resolve the correct class
for the type.
Args:
cls_obj: A generateDS object.
Returns:
An Entity instance.
|
codesearchnet
|
def upgrade(self, remote=None):
if self.enabled:
raise errors.DockerError(
'Plugin must be disabled before upgrading.'
)
if remote is None:
remote = self.name
privileges = self.client.api.plugin_privileges(remote)
for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
yield d
self._reload()
|
Upgrade the plugin.
Args:
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
Default: this plugin's name.
Returns:
A generator streaming the decoded API logs
|
juraj-google-style
|
def parse_view(query):
try:
idx = query.lower().index('where')
query = query[:idx]
except ValueError:
pass
if (not query.endswith(';')):
query = query.strip()
query += ';'
result = _view_stmt.parseString(query)
return View(result)
|
Parses asql query to view object.
Args:
query (str): asql query
Returns:
View instance: parsed view.
|
codesearchnet
|
def _RunOsLoginControl(self, params):
try:
return subprocess.call([constants.OSLOGIN_CONTROL_SCRIPT] + params)
except OSError as e:
if e.errno == errno.ENOENT:
return None
else:
raise
|
Run the OS Login control script.
Args:
params: list, the params to pass to the script
Returns:
int, the return code from the call, or None if the script is not found.
|
juraj-google-style
|
def combine_metadata(*metadata_objects, **kwargs):
average_times = kwargs.get('average_times', True)
shared_keys = None
info_dicts = []
for metadata_object in metadata_objects:
if isinstance(metadata_object, dict):
metadata_dict = metadata_object
elif hasattr(metadata_object, 'attrs'):
metadata_dict = metadata_object.attrs
else:
continue
info_dicts.append(metadata_dict)
if (shared_keys is None):
shared_keys = set(metadata_dict.keys())
else:
shared_keys &= set(metadata_dict.keys())
shared_info = {}
for k in shared_keys:
values = [nfo[k] for nfo in info_dicts]
any_arrays = any([isinstance(val, np.ndarray) for val in values])
if any_arrays:
if all((np.all((val == values[0])) for val in values[1:])):
shared_info[k] = values[0]
elif (('time' in k) and isinstance(values[0], datetime) and average_times):
shared_info[k] = average_datetimes(values)
elif all(((val == values[0]) for val in values[1:])):
shared_info[k] = values[0]
return shared_info
|
Combine the metadata of two or more Datasets.
If any keys are not equal or do not exist in all provided dictionaries
then they are not included in the returned dictionary.
By default any keys with the word 'time' in them and consisting
of datetime objects will be averaged. This is to handle cases where
data were observed at almost the same time but not exactly.
Args:
*metadata_objects: MetadataObject or dict objects to combine
average_times (bool): Average any keys with 'time' in the name
Returns:
dict: the combined metadata
|
codesearchnet
|
async def _overlap(items, overlap_attr, client=None, get_method=None):
overlap = set.intersection(*(getattr(item, overlap_attr) for item in items))
if ((client is None) or (get_method is None)):
return overlap
results = []
for item in overlap:
result = (await getattr(client, get_method)(id_=item.id_))
results.append(result)
return results
|
Generic overlap implementation.
Arguments:
item (:py:class:`collections.abc.Sequence`): The objects to
find overlaps for.
overlap_attr (:py:class:`str`): The attribute of the items to use
as input for the overlap.
client (:py:class:`~.TMDbClient`, optional): The TMDb client
to extract additional information about the overlap.
get_method (:py:class:`str`, optional): The method of the
client to use for extracting additional information.
Returns:
:py:class:`list`: The relevant result objects.
|
codesearchnet
|
class _TextEmbeddingHandler(_EmbeddingHandler):
def _validate_column_data(self, batch):
if not isinstance(batch[0], (str, bytes)):
raise TypeError(f'Embeddings can only be generated on dict[str, str].Got dict[str, {type(batch[0])}] instead.')
def get_metrics_namespace(self) -> str:
return self._underlying.get_metrics_namespace() or 'BeamML_TextEmbeddingHandler'
|
A ModelHandler intended to be work on list[dict[str, str]] inputs.
The inputs to the model handler are expected to be a list of dicts.
For example, if the original mode is used with RunInference to take a
PCollection[E] to a PCollection[P], this ModelHandler would take a
PCollection[dict[str, E]] to a PCollection[dict[str, P]].
_TextEmbeddingHandler will accept an EmbeddingsManager instance, which
contains the details of the model to be loaded and the inference_fn to be
used. The purpose of _TextEmbeddingHandler is to generate embeddings for
text inputs using the EmbeddingsManager instance.
If the input is not a text column, a RuntimeError will be raised.
This is an internal class and offers no backwards compatibility guarantees.
Args:
embeddings_manager: An EmbeddingsManager instance.
|
github-repos
|
def expectation(self, observable: Union[tf.Tensor, hamiltonian.Hamiltonian]):
raise NotImplementedError()
|
Take the expectation value of an observable against this dataset.
Args:
observable: Hermitian operator to measure. If `tf.Tensor`, it is of type
`tf.string` with shape [1], result of calling `tfq.convert_to_tensor`
on a list of `cirq.PauliSum`, `[op]`. Otherwise, a Hamiltonian.
Returns:
Scalar `tf.Tensor` which is the expectation value of `observable` against
this quantum data source.
|
github-repos
|
def _compute_nfps_real(counts, sizes):
nfps = np.zeros((len(sizes), len(sizes)))
for l in range(len(sizes)):
for u in range(l, len(sizes)):
nfps[l, u] = _compute_nfp_real(l, u, counts, sizes)
return nfps
|
Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes.
Args:
counts: the complete distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1.
|
juraj-google-style
|
def from_version(cls, version, op=None):
lower = None
upper = None
if op is None:
lower = _LowerBound(version, True)
upper = _UpperBound(version.next(), False)
elif op in ("eq", "=="):
lower = _LowerBound(version, True)
upper = _UpperBound(version, True)
elif op in ("gt", ">"):
lower = _LowerBound(version, False)
elif op in ("gte", ">="):
lower = _LowerBound(version, True)
elif op in ("lt", "<"):
upper = _UpperBound(version, False)
elif op in ("lte", "<="):
upper = _UpperBound(version, True)
else:
raise VersionError("Unknown bound operation '%s'" % op)
bound = _Bound(lower, upper)
range = cls(None)
range.bounds = [bound]
return range
|
Create a range from a version.
Args:
version: Version object. This is used as the upper/lower bound of
the range.
op: Operation as a string. One of 'gt'/'>', 'gte'/'>=', lt'/'<',
'lte'/'<=', 'eq'/'=='. If None, a bounded range will be created
that contains the version superset.
Returns:
`VersionRange` object.
|
juraj-google-style
|
def non_fluent_variables(self) -> FluentParamsList:
fluents = self.domain.non_fluents
ordering = self.domain.non_fluent_ordering
return self._fluent_params(fluents, ordering)
|
Returns the instantiated non-fluents in canonical order.
Returns:
Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name
and a list of instantiated fluents represented as strings.
|
codesearchnet
|
def omega(self, structure, n, u):
l0 = np.dot(np.sum(structure.lattice.matrix, axis=0), n)
l0 *= 1e-10
weight = (float(structure.composition.weight) * 1.66054e-27)
vol = (structure.volume * 1e-30)
vel = (((1000000000.0 * self[0].einsum_sequence([n, u, n, u])) / (weight / vol)) ** 0.5)
return (vel / l0)
|
Finds directional frequency contribution to the heat
capacity from direction and polarization
Args:
structure (Structure): Structure to be used in directional heat
capacity determination
n (3x1 array-like): direction for Cv determination
u (3x1 array-like): polarization direction, note that
no attempt for verification of eigenvectors is made
|
codesearchnet
|
def create_header(cls, request_id=None):
header = {
'msgid' : bkserial.make_id(),
'msgtype' : cls.msgtype
}
if request_id is not None:
header['reqid'] = request_id
return header
|
Return a message header fragment dict.
Args:
request_id (str or None) :
Message ID of the message this message replies to
Returns:
dict : a message header
|
juraj-google-style
|
def tensor_not_equals(self, other):
if other is None:
return True
if tensor_lib.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():
self, other = override_binary_operator.maybe_promote_tensors(self, other)
return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
else:
return self is not other
|
The operation invoked by the `Tensor.__ne__` operator.
Compares two tensors element-wise for inequality if they are
broadcast-compatible; or returns True if they are not broadcast-compatible.
(Note that this behavior differs from `tf.math.not_equal`, which raises an
exception if the two tensors are not broadcast-compatible.)
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__ne__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
self: The left-hand side of the `!=` operator.
other: The right-hand side of the `!=` operator.
Returns:
The result of the elementwise `!=` operation, or `True` if the arguments
are not broadcast-compatible.
|
github-repos
|
def logical_downlinks(self):
if (not self.__logical_downlinks):
self.__logical_downlinks = LogicalDownlinks(self.__connection)
return self.__logical_downlinks
|
Gets the LogicalDownlinks API client.
Returns:
LogicalDownlinks:
|
codesearchnet
|
def _remove_native_segments(input_func):
input_graph_def = input_func.graph.as_graph_def()
nodes_deleted = 0
for func_id in reversed(range(len(input_graph_def.library.function))):
f = input_graph_def.library.function[func_id]
if 'native_segment' in f.signature.name:
nodes_deleted += 1
while context.context().has_function(f.signature.name):
context.context().remove_function(f.signature.name)
del input_graph_def.library.function[func_id]
logging.info(f'Found and deleted native segments from {nodes_deleted} TRTEngineOp nodes.')
for node in input_graph_def.node:
if node.op == 'TRTEngineOp':
del node.attr['segment_func']
for func in input_graph_def.library.function:
for node in func.node_def:
if node.op == 'TRTEngineOp':
del node.attr['segment_func']
new_func = _construct_function_from_graph_def(input_func, input_graph_def)
return new_func
|
Remove native segments from the input TF-TRT Converted Function.
Args:
input_func: provide the concrete function with native segment nodes. The
transformed output func will not contain any native segment nodes. All the
TRTEngineOp references will be deleted and reset to default empty func.
|
github-repos
|
def calculate_keys_by_mapreduce_state(cls, mapreduce_state):
if (mapreduce_state is None):
return []
keys = []
for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count):
shard_id = cls.shard_id_from_number(mapreduce_state.key().name(), i)
keys.append(cls.get_key_by_shard_id(shard_id))
return keys
|
Calculate all shard states keys for given mapreduce.
Args:
mapreduce_state: MapreduceState instance
Returns:
A list of keys for shard states, sorted by shard id.
The corresponding shard states may not exist.
|
codesearchnet
|
def try_storage(self, identifier, req, resp, resource, uri_kwargs):
if (identifier is None):
user = None
elif (self.user_storage is not None):
user = self.user_storage.get_user(self, identifier, req, resp, resource, uri_kwargs)
elif ((self.user_storage is None) and (not self.only_with_storage)):
user = {'identified_with': self, 'identifier': identifier}
else:
user = None
return user
|
Try to find user in configured user storage object.
Args:
identifier: User identifier.
Returns:
user object.
|
codesearchnet
|
def eval_adiabatic_limit(YABFGN, Ytilde, P0):
(Y, A, B, F, G, N) = YABFGN
Klim = ((P0 * (B - ((A * Ytilde) * A))) * P0).expand().simplify_scalar()
Hlim = (((Klim - Klim.dag()) / 2) / I).expand().simplify_scalar()
Ldlim = ((P0 * (G - ((A * Ytilde) * F))) * P0).expand().simplify_scalar()
dN = (identity_matrix(N.shape[0]) + ((F.H * Ytilde) * F))
Nlim = (((P0 * N) * dN) * P0).expand().simplify_scalar()
return SLH(Nlim.dag(), Ldlim.dag(), Hlim.dag())
|
Compute the limiting SLH model for the adiabatic approximation
Args:
YABFGN: The tuple (Y, A, B, F, G, N)
as returned by prepare_adiabatic_limit.
Ytilde: The pseudo-inverse of Y, satisfying Y * Ytilde = P0.
P0: The projector onto the null-space of Y.
Returns:
SLH: Limiting SLH model
|
codesearchnet
|
def run(self, variables=None, overrides=None):
old_dir = os.getcwd()
try:
os.chdir(self.run_directory)
initialized_steps = self.prepare(variables)
owned_resources = {}
try:
print(('Running in %s' % self.run_directory))
(initialized_resources, owned_resources) = self._prepare_resources(variables, overrides)
for (i, (step, decl)) in enumerate(zip(initialized_steps, self.steps)):
print(('===> Step %d: %s\t Description: %s' % ((i + 1), self.steps[i][0].__name__, self.steps[i][1].get('description', ''))))
(runtime, out) = _run_step(step, decl, initialized_resources)
print(('======> Time Elapsed: %.2f seconds' % runtime))
if (out is not None):
print(out[1])
finally:
self._cleanup_resources(owned_resources)
finally:
os.chdir(old_dir)
|
Initialize and run this recipe.
By default all necessary shared resources are created and destroyed in
this function unless you pass them preinitizlied in overrides, in
which case they are used as is. The overrides parameter is designed
to allow testability of iotile-ship recipes by inspecting the shared
resources after the recipe has finished to ensure that it was properly
set up.
Args:
variables (dict): An optional dictionary of variable assignments.
There must be a single assignment for all free variables that
do not have a default value, otherwise the recipe will not
run.
overrides (dict): An optional dictionary of shared resource
objects that should be used instead of creating that resource
and destroying it inside this function.
|
codesearchnet
|
def __init__(self, env, past_indices, flatten):
if 0 not in past_indices:
raise KeyError('Past indices should include 0 for the current frame.')
self._env = env
self._past_indices = past_indices
self._step = 0
self._buffer = None
self._capacity = max(past_indices) + 1
self._flatten = flatten
|
Augment the observation with past observations.
Implemented as a Numpy ring buffer holding the necessary past observations.
Args:
env: OpenAI Gym environment to wrap.
past_indices: List of non-negative integers indicating the time offsets
from the current time step of observations to include.
flatten: Concatenate the past observations rather than stacking them.
Raises:
KeyError: The current observation is not included in the indices.
|
juraj-google-style
|
def observe_reward_value(self, state_key, action_key):
reward_value = 0.0
if state_key in self.__state_action_list_dict:
if action_key in self.__state_action_list_dict[state_key]:
reward_value = 1.0
return reward_value
|
Compute the reward value.
Args:
state_key: The key of state.
action_key: The key of action.
Returns:
Reward value.
|
juraj-google-style
|
def add_file_handler(logger,level,tags):
f_formatter = logging.Formatter('%(asctime)s:%(name)s:\t%(message)s')
filename = get_logfile_name(tags)
handler = logging.FileHandler(filename=filename,mode="a")
handler.setLevel(level)
handler.setFormatter(f_formatter)
logger.addHandler(handler)
|
Creates and Adds a file handler (`logging.FileHandler` instance) to the specified logger.
Args:
logger: The `logging.Logger` instance to add the new file handler to.
level: `str`. The logging level for which the handler accepts messages, i.e. `logging.INFO`.
tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag
will be added in the same order as provided.
|
juraj-google-style
|
def from_file(cls, vert, frag, **kwargs):
vert_program = open(vert).read()
frag_program = open(frag).read()
return cls(vert=vert_program, frag=frag_program, **kwargs)
|
Reads the shader programs, given the vert and frag filenames
Arguments:
- vert (str): The filename of the vertex shader program (ex: 'vertshader.vert')
- frag (str): The filename of the fragment shader program (ex: 'fragshader.frag')
Returns:
- shader (Shader): The Shader using these files.
|
juraj-google-style
|
def build_request_relationship(type, ids):
if ids is None:
return {
'data': None
}
elif isinstance(ids, str):
return {
'data': {'id': ids, 'type': type}
}
else:
return {
"data": [{"id": id, "type": type} for id in ids]
}
|
Build a relationship list.
A relationship list is used to update relationships between two
resources. Setting sensors on a label, for example, uses this
function to construct the list of sensor ids to pass to the Helium
API.
Args:
type(string): The resource type for the ids in the relationship
ids([uuid] or uuid): Just one or a list of resource uuids to use
in the relationship
Returns:
A ready to use relationship JSON object.
|
juraj-google-style
|
def converted_call(f, args, kwargs, caller_fn_scope=None, options=None):
logging.log(1, 'Converted call: %s\n args: %s\n kwargs: %s\n', f, args, kwargs)
if options is None:
if caller_fn_scope is None:
raise ValueError('either caller_fn_scope or options must have a value')
options = caller_fn_scope.callopts
if conversion.is_in_allowlist_cache(f, options):
logging.log(2, 'Allowlisted %s: from cache', f)
return _call_unconverted(f, args, kwargs, options, False)
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
logging.log(2, 'Allowlisted: %s: AutoGraph is disabled in context', f)
return _call_unconverted(f, args, kwargs, options, False)
if is_autograph_artifact(f):
logging.log(2, 'Permanently allowed: %s: AutoGraph artifact', f)
return _call_unconverted(f, args, kwargs, options)
if isinstance(f, functools.partial):
new_kwargs = {}
if f.keywords is not None:
new_kwargs = f.keywords.copy()
if kwargs is not None:
new_kwargs.update(kwargs)
new_args = f.args + args
logging.log(3, 'Forwarding call of partial %s with\n%s\n%s\n', f, new_args, new_kwargs)
return converted_call(f.func, new_args, new_kwargs, caller_fn_scope=caller_fn_scope, options=options)
if inspect_utils.isbuiltin(f):
if f is eval:
return py_builtins.eval_in_original_context(f, args, caller_fn_scope)
if f is super:
return py_builtins.super_in_original_context(f, args, caller_fn_scope)
if f is globals:
return py_builtins.globals_in_original_context(caller_fn_scope)
if f is locals:
return py_builtins.locals_in_original_context(caller_fn_scope)
if kwargs:
return py_builtins.overload_of(f)(*args, **kwargs)
else:
return py_builtins.overload_of(f)(*args)
if conversion.is_unsupported(f):
return _call_unconverted(f, args, kwargs, options)
if not options.user_requested and conversion.is_allowlisted(f):
return _call_unconverted(f, args, kwargs, options)
if not options.internal_convert_user_code:
return _call_unconverted(f, args, kwargs, options)
try:
if inspect.ismethod(f) or inspect.isfunction(f):
target_entity = f
effective_args = args
f_self = getattr(f, '__self__', None)
if f_self is not None:
if isinstance(f_self, tf_method_target.TfMethodTarget):
f_self = f_self.target
effective_args = (f_self,) + effective_args
elif hasattr(f, '__class__') and hasattr(f.__class__, '__call__'):
target_entity = f.__class__.__call__
effective_args = (f,) + args
else:
target_entity = f
raise NotImplementedError('unknown callable type "%s"' % type(f))
except Exception as e:
logging.log(1, 'Error transforming entity %s', target_entity, exc_info=True)
if is_autograph_strict_conversion_mode():
raise
return _fall_back_unconverted(f, args, kwargs, options, e)
if not hasattr(target_entity, '__code__'):
logging.log(2, 'Permanently allowed: %s: native binding', target_entity)
return _call_unconverted(f, args, kwargs, options)
elif hasattr(target_entity.__code__, 'co_filename') and target_entity.__code__.co_filename == '<string>':
logging.log(2, 'Permanently allowed: %s: dynamic code (exec?)', target_entity)
return _call_unconverted(f, args, kwargs, options)
try:
program_ctx = converter.ProgramContext(options=options)
converted_f = _convert_actual(target_entity, program_ctx)
if logging.has_verbosity(2):
_log_callargs(converted_f, effective_args, kwargs)
except Exception as e:
logging.log(1, 'Error transforming entity %s', target_entity, exc_info=True)
if is_autograph_strict_conversion_mode():
raise
return _fall_back_unconverted(f, args, kwargs, options, e)
with StackTraceMapper(converted_f), tf_stack.CurrentModuleFilter():
try:
if kwargs is not None:
result = converted_f(*effective_args, **kwargs)
else:
result = converted_f(*effective_args)
except Exception as e:
_attach_error_metadata(e, converted_f)
raise
return result
|
Converts a function call inline.
For internal use only.
Note: The argument list is optimized for readability of generated code, which
may look like this:
ag__.converted_call(f, (arg1, arg2), None, fscope)
ag__.converted_call(f, (), dict(arg1=val1, **kwargs), fscope)
ag__.converted_call(f, (arg1, arg2) + varargs, dict(**kwargs), lscope)
Args:
f: The function to convert.
args: Tuple, the original positional arguments of f
kwargs: Optional[Dict], the original keyword arguments of f
caller_fn_scope: Optional[function_wrappers.FunctionScope], the function
scope of the converted function in which this call was originally made.
options: Optional[converter.ConversionOptions], conversion options. If not
specified, the value of caller_fn_scope.callopts is used. Either options
or caller_fn_scope must be present.
Returns:
Any, the result of executing a possibly-converted `f` with the given
arguments.
|
github-repos
|
def upper_diag_self_prodx(list_):
return [(item1, item2) for (n1, item1) in enumerate(list_) for (n2, item2) in enumerate(list_) if (n1 < n2)]
|
upper diagnoal of cartesian product of self and self.
Weird name. fixme
Args:
list_ (list):
Returns:
list:
CommandLine:
python -m utool.util_alg --exec-upper_diag_self_prodx
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> list_ = [1, 2, 3]
>>> result = upper_diag_self_prodx(list_)
>>> print(result)
[(1, 2), (1, 3), (2, 3)]
|
codesearchnet
|
def get_marginal_distribution(self, index_points=None):
with self._name_scope('get_marginal_distribution'):
index_points = self._get_index_points(index_points)
covariance = self._compute_covariance(index_points)
loc = self._mean_fn(index_points)
if self._is_univariate_marginal(index_points):
scale = tf.sqrt(covariance)
loc = tf.squeeze(loc, axis=(- 1))
return normal.Normal(loc=loc, scale=scale, validate_args=self._validate_args, allow_nan_stats=self._allow_nan_stats, name='marginal_distribution')
else:
scale = tf.linalg.LinearOperatorLowerTriangular(tf.linalg.cholesky(_add_diagonal_shift(covariance, self.jitter)), is_non_singular=True, name='GaussianProcessScaleLinearOperator')
return mvn_linear_operator.MultivariateNormalLinearOperator(loc=loc, scale=scale, validate_args=self._validate_args, allow_nan_stats=self._allow_nan_stats, name='marginal_distribution')
|
Compute the marginal of this GP over function values at `index_points`.
Args:
index_points: `float` `Tensor` representing finite (batch of) vector(s) of
points in the index set over which the GP is defined. Shape has the form
`[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e` is the number
(size) of index points in each batch. Ultimately this distribution
corresponds to a `e`-dimensional multivariate normal. The batch shape
must be broadcastable with `kernel.batch_shape` and any batch dims
yielded by `mean_fn`.
Returns:
marginal: a `Normal` or `MultivariateNormalLinearOperator` distribution,
according to whether `index_points` consists of one or many index
points, respectively.
|
codesearchnet
|
def downstream(self, node):
graph = self.graph
if (node not in graph):
raise KeyError(('node %s is not in graph' % node))
return list(graph[node])
|
Returns a list of all nodes this node has edges towards.
Args:
node (str): The node whose downstream nodes you want to find.
Returns:
list: A list of nodes that are immediately downstream from the
node.
|
codesearchnet
|
def __find_variant(self, value):
if isinstance(value, bool):
return messages.Variant.BOOL
elif isinstance(value, six.integer_types):
return messages.Variant.INT64
elif isinstance(value, float):
return messages.Variant.DOUBLE
elif isinstance(value, six.string_types):
return messages.Variant.STRING
elif isinstance(value, (list, tuple)):
variant_priority = [None, messages.Variant.INT64, messages.Variant.DOUBLE, messages.Variant.STRING]
chosen_priority = 0
for v in value:
variant = self.__find_variant(v)
try:
priority = variant_priority.index(variant)
except IndexError:
priority = (- 1)
if (priority > chosen_priority):
chosen_priority = priority
return variant_priority[chosen_priority]
return None
|
Find the messages.Variant type that describes this value.
Args:
value: The value whose variant type is being determined.
Returns:
The messages.Variant value that best describes value's type,
or None if it's a type we don't know how to handle.
|
codesearchnet
|
def add_common_arguments(self, parser, has_device=False):
if has_device:
parser.add_argument('-t', '--tif', required=True,
type=str.lower, choices=['jtag', 'swd'],
help='target interface (JTAG | SWD)')
parser.add_argument('-d', '--device', required=True,
help='specify the target device name')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('-s', '--serial', dest='serial_no',
help='specify the J-Link serial number')
group.add_argument('-i', '--ip_addr', dest='ip_addr',
help='J-Link IP address')
return None
|
Adds common arguments to the given parser.
Common arguments for a J-Link command are the target interface, and
J-Link serial number or IP address.
Args:
self (Command): the ``Command`` instance
parser (argparse.ArgumentParser): the parser to add the arguments to
has_device (bool): boolean indicating if it has the device argument
Returns:
``None``
|
juraj-google-style
|
def normalize(self, image: 'torch.Tensor', mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], **kwargs) -> 'torch.Tensor':
return F.normalize(image, mean, std)
|
Normalize an image. image = (image - image_mean) / image_std.
Args:
image (`torch.Tensor`):
Image to normalize.
mean (`torch.Tensor`, `float` or `Iterable[float]`):
Image mean to use for normalization.
std (`torch.Tensor`, `float` or `Iterable[float]`):
Image standard deviation to use for normalization.
Returns:
`torch.Tensor`: The normalized image.
|
github-repos
|
def StreamMemory(self, process, offset=0, amount=None):
reader = MemoryReader(process, offset=offset)
return self.Stream(reader, amount=amount)
|
Streams chunks of memory of a given process starting at given offset.
Args:
process: A platform-specific `Process` instance.
offset: An integer offset at which the memory stream should start on.
amount: An upper bound on number of bytes to read.
Returns:
Generator over `Chunk` instances.
|
codesearchnet
|
def _evolve(self, state, qargs=None):
state = self._format_state(state)
if qargs is None:
if state.shape[0] != self._input_dim:
raise QiskitError(
"Operator input dimension is not equal to state dimension."
)
if state.ndim == 1:
return np.dot(self.data, state)
return np.dot(
np.dot(self.data, state), np.transpose(np.conj(self.data)))
return self._evolve_subsystem(state, qargs)
|
Evolve a quantum state by the operator.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions.
|
juraj-google-style
|
def _parse_bro_header(self, logfile):
_line = next(logfile)
while (not _line.startswith('
_line = next(logfile)
_field_names = _line.strip().split(self.delimiter)[1:]
_line = next(logfile)
_field_types = _line.strip().split(self.delimiter)[1:]
return (_field_names, _field_types)
|
This method tries to parse the Bro log header section.
Note: My googling is failing me on the documentation on the format,
so just making a lot of assumptions and skipping some shit.
Assumption 1: The delimeter is a tab.
Assumption 2: Types are either time, string, int or float
Assumption 3: The header always ends with #fields and #types as
the last two lines.
Format example:
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path httpheader_recon
#fields ts origin useragent header_events_json
#types time string string string
Args:
logfile: The Bro log file.
Returns:
A tuple of 2 lists. One for field names and other for field types.
|
codesearchnet
|
def stat_float_times(cls, newvalue=None):
if (newvalue is not None):
cls._stat_float_times = bool(newvalue)
return cls._stat_float_times
|
Determine whether a file's time stamps are reported as floats
or ints.
Calling without arguments returns the current value.
The value is shared by all instances of FakeOsModule.
Args:
newvalue: If `True`, mtime, ctime, atime are reported as floats.
Otherwise, they are returned as ints (rounding down).
|
codesearchnet
|
def get_class_that_defined_method(fun):
if inspect.ismethod(fun):
for cls in inspect.getmro(fun.__self__.__class__):
if cls.__dict__.get(fun.__name__) is fun:
return cls
fun = fun.__func__
if inspect.isfunction(fun):
cls = getattr(inspect.getmodule(fun),
fun.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0], None)
if isinstance(cls, type):
return cls
return getattr(fun, '__objclass__', None)
|
Tries to find the class that defined the specified method. Will not work for nested classes
(locals).
Args:
fun: Function / Method
Returns:
Returns the class which defines the given method / function.
|
juraj-google-style
|
def write_eval_records(bt_table, game_data, last_game):
eval_num = last_game
GAMES_PER_COMMIT = 2000
for games in grouper(tqdm(game_data), GAMES_PER_COMMIT):
assert bt_table.read_row(EVAL_PREFIX.format(eval_num)), "Prev row doesn't exists"
assert (bt_table.read_row(EVAL_PREFIX.format((eval_num + 1))) is None), 'Row already exists'
rows = []
for (i, metadata) in enumerate(games):
eval_num += 1
row_name = EVAL_PREFIX.format(eval_num)
row = bt_table.row(row_name)
for (column, value) in metadata:
row.set_cell(METADATA, column, value)
rows.append(row)
if ((i < 5) or ((i + 5) > len(games))):
print('\t', i, row_name, metadata[6][1])
if (eval_num == (last_game + len(games))):
test = input("Commit ('y'/'yes' required): ")
if (test.lower() not in ('y', 'yes')):
break
game_num_update = bt_table.row(TABLE_STATE)
game_num_update.set_cell(METADATA, EVAL_GAME_COUNTER, eval_num)
print(TABLE_STATE, eval_num)
response = bt_table.mutate_rows(rows)
any_bad = False
for (i, status) in enumerate(response):
if (status.code is not 0):
print('Row number {} failed to write {}'.format(i, status))
any_bad = True
if any_bad:
break
game_num_update.commit()
|
Write all eval_records to eval_table
In addition to writing new rows table_state must be updated in
row `table_state` columns `metadata:eval_game_counter`
Args:
bt_table: bigtable table to add rows to.
game_data: metadata pairs (column name, value) for each eval record.
last_game: last_game in metadata:table_state
|
codesearchnet
|
def remove_item(name, system_wide=False):
desktop_env = system.get_name()
if desktop_env == 'windows':
import winreg
if system_wide:
startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup')
else:
startup_dir = os.path.join(directories.get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup')
for startup_file in os.path.listdir(start_dir):
if startup_file == name or startup_file.split('.')[0] == name:
os.remove(os.path.join(startup_dir, startup_file))
elif desktop_env == 'mac':
sp.Popen(['launchctl', 'remove', name])
else:
if desktop_env == 'unknown':
if system_wide:
login_file = '/etc/profile'
else:
login_file = os.path.expanduser('~/.profile')
with open(login_file) as f:
login_file_contents = f.read()
final_login_file_contents = ''
for line in login_file_contents.split('\n'):
if line.split(' ')[0] != name:
final_login_file_contents += line
with open(login_file, 'w') as f:
f.write(final_login_file_contents)
else:
try:
desktop_file_name = name + '.desktop'
startup_file = os.path.join(directories.get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name)
if not os.path.isfile(startup_file):
for possible_startup_file in os.listdir(directories.get_config_dir('autostart', system_wide=system_wide)[0]):
possible_startup_file_parsed = desktopfile.parse(possible_startup_file)
if possible_startup_file_parsed['Name'] == name:
startup_file = possible_startup_file
os.remove(startup_file)
except IndexError:
pass
|
Removes a program from startup.
Removes a program from startup.
Args:
name (str) : The name of the program (as known to the system) to remove. See :func:``list_items``.
system_wide (bool): Remove it from system-wide startup.
Note:
``system_wide`` requires superuser/admin privileges.
|
juraj-google-style
|
def emit_code_from_ir(sql_query_tree, compiler_metadata):
context = CompilationContext(query_path_to_selectable=dict(), query_path_to_location_info=sql_query_tree.query_path_to_location_info, query_path_to_output_fields=sql_query_tree.query_path_to_output_fields, query_path_to_filters=sql_query_tree.query_path_to_filters, query_path_to_node=sql_query_tree.query_path_to_node, compiler_metadata=compiler_metadata)
return _query_tree_to_query(sql_query_tree.root, context)
|
Return a SQLAlchemy Query from a passed SqlQueryTree.
Args:
sql_query_tree: SqlQueryTree, tree representation of the query to emit.
compiler_metadata: SqlMetadata, SQLAlchemy specific metadata.
Returns:
SQLAlchemy Query
|
codesearchnet
|
def _read_tags(self):
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True
|
Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist.
|
juraj-google-style
|
def run_console(self, authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE, authorization_code_message=_DEFAULT_AUTH_CODE_MESSAGE, **kwargs):
kwargs.setdefault('prompt', 'consent')
self.redirect_uri = self._OOB_REDIRECT_URI
(auth_url, _) = self.authorization_url(**kwargs)
print(authorization_prompt_message.format(url=auth_url))
code = input(authorization_code_message)
self.fetch_token(code=code)
return self.credentials
|
Run the flow using the console strategy.
The console strategy instructs the user to open the authorization URL
in their browser. Once the authorization is complete the authorization
server will give the user a code. The user then must copy & paste this
code into the application. The code is then exchanged for a token.
Args:
authorization_prompt_message (str): The message to display to tell
the user to navigate to the authorization URL.
authorization_code_message (str): The message to display when
prompting the user for the authorization code.
kwargs: Additional keyword arguments passed through to
:meth:`authorization_url`.
Returns:
google.oauth2.credentials.Credentials: The OAuth 2.0 credentials
for the user.
|
codesearchnet
|
def get_missing_simulations(self, param_list, runs=None):
params_to_simulate = []
if (runs is not None):
next_runs = self.db.get_next_rngruns()
available_params = [r['params'] for r in self.db.get_results()]
for param_comb in param_list:
needed_runs = runs
for (i, p) in enumerate(available_params):
if (param_comb == {k: p[k] for k in p.keys() if (k != 'RngRun')}):
needed_runs -= 1
new_param_combs = []
for needed_run in range(needed_runs):
new_param = deepcopy(param_comb)
new_param['RngRun'] = next(next_runs)
new_param_combs += [new_param]
params_to_simulate += new_param_combs
else:
for param_comb in param_list:
if (not self.db.get_results(param_comb)):
params_to_simulate += [param_comb]
return params_to_simulate
|
Return a list of the simulations among the required ones that are not
available in the database.
Args:
param_list (list): a list of dictionaries containing all the
parameters combinations.
runs (int): an integer representing how many repetitions are wanted
for each parameter combination, None if the dictionaries in
param_list already feature the desired RngRun value.
|
codesearchnet
|
def add_variable_from_reference(self, reference_variable, name=None, initializer='zeros'):
name = name or 'var'
if hasattr(reference_variable, 'path'):
name = reference_variable.path.replace('/', '_') + '_' + name
else:
name = str(reference_variable.name).replace('/', '_').replace(':', '_') + '_' + name
return self.add_variable(shape=reference_variable.shape, initializer=initializer, dtype=reference_variable.dtype, name=name, layout=getattr(reference_variable, '_layout', None))
|
Add an optimizer variable from the model variable.
Create an optimizer variable based on the information of model variable.
For example, in SGD optimizer momemtum, for each model variable, a
corresponding momemtum variable is created of the same shape and dtype.
Args:
reference_variable: `keras.Variable`. The corresponding model
variable to the optimizer variable to be created.
name: Optional string. The name prefix of the optimizer variable to
be created. If not provided, it will be set to `"var"`. The
variable name will follow the pattern
`{variable_name}_{reference_variable.name}`,
e.g., `momemtum/dense_1`. Defaults to `None`.
initializer: Initializer object to use to populate the initial
variable value, or string name of a built-in initializer
(e.g. `"random_normal"`). If unspecified, defaults to
`"zeros"`.
Returns:
An optimizer variable, in the format of `keras.Variable`.
|
github-repos
|
def compute_distance(a, b):
if not a:
return len(b)
if not b:
return len(a)
if a == b or str.lower(a) == str.lower(b):
return 0
a = str.lower(a)
b = str.lower(b)
vector_1 = [-1] * (len(b) + 1)
vector_2 = [-1] * (len(b) + 1)
for i in range(len(vector_1)):
vector_1[i] = i
for i in range(len(a)):
vector_2[0] = i + 1
for j in range(len(b)):
penalty = 0 if a[i] == b[j] else compute_qwerty_distance(a[i], b[j])
vector_2[j + 1] = min(vector_2[j] + 1, vector_1[j + 1] + 1, vector_1[j] + penalty)
for j in range(len(vector_1)):
vector_1[j] = vector_2[j]
return vector_2[len(b)]
|
Computes a modified Levenshtein distance between two strings, comparing the
lowercase versions of each string and accounting for QWERTY distance.
Arguments:
- a (str) String to compare to 'b'
- b (str) String to compare to 'a'
Returns:
- (int) Number representing closeness of 'a' and 'b' (lower is better)
|
juraj-google-style
|
def delete_additional_charge(self, recurring_billing_id):
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._delete((self.url + fmt), headers=self.get_headers())
|
Remove an extra charge from an invoice.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns:
|
codesearchnet
|
def aggregate_variables(agg_funcs: t.List[t.Dict[str, str]], ds: xr.Dataset, time_fields: t.List[str], coords_to_squeeze: t.List[str]) -> xr.Dataset:
agg_dataset = xr.Dataset(coords=ds.coords, attrs=ds.attrs)
if len(time_fields):
agg_dataset = agg_dataset.groupby(ds['time'].dt.strftime(timestamp_formats[time_fields[0]]))
agg_dataset = apply_aggregation(agg_dataset, 'avg', None)
agg_dataset = agg_dataset.rename({'strftime': time_fields[0]})
agg_dataset = apply_aggregation(agg_dataset, 'avg', coords_to_squeeze)
for agg_func in agg_funcs:
variable, function = (agg_func['var'], agg_func['func'])
grouped_ds = ds[variable]
dims = [value for value in coords_to_squeeze if value in ds[variable].coords] if coords_to_squeeze else None
if len(time_fields):
groups = grouped_ds.groupby(ds['time'].dt.strftime(timestamp_formats[time_fields[0]]))
grouped_ds = apply_aggregation(groups, function, None)
grouped_ds = grouped_ds.rename({'strftime': time_fields[0]})
agg_dim_ds = apply_aggregation(grouped_ds, function, dims)
agg_dataset = agg_dataset.assign({f'{function}_{variable}': agg_dim_ds})
return agg_dataset
|
Aggregate variables in an xarray dataset based on aggregation functions.
Args:
agg_funcs (List[Dict[str, str]]): List of dictionaries specifying aggregation functions for variables.
ds (xr.Dataset): The input xarray dataset.
time_fields (List[str]): List of time fields to consider for time-based grouping.
coords_to_squeeze (List[str]): List of coordinates to be squeezed during aggregation.
Returns:
xr.Dataset: The aggregated xarray dataset.
|
github-repos
|
def loads(s, single=False, version=_default_version, strict=False, errors='warn'):
ms = deserialize(s, version=version, strict=strict, errors=errors)
if single:
return next(ms)
else:
return ms
|
Deserialize SimpleMRS string representations
Args:
s (str): a SimpleMRS string
single (bool): if `True`, only return the first Xmrs object
Returns:
a generator of Xmrs objects (unless *single* is `True`)
|
codesearchnet
|
def path(self, value):
if not value.endswith('/'):
self._path = '{v}/'.format(v=value)
else:
self._path = value
|
Setter for 'path' property
Args:
value (str): Absolute path to scan
|
juraj-google-style
|
def get_schema_node(self, path: SchemaPath) -> Optional[SchemaNode]:
return self.schema.get_schema_descendant(
self.schema_data.path2route(path))
|
Return the schema node addressed by a schema path.
Args:
path: Schema path.
Returns:
Schema node if found in the schema, or ``None``.
Raises:
InvalidSchemaPath: If the schema path is invalid.
|
juraj-google-style
|
def get_keyvault(access_token, subscription_id, rgname, vault_name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults/', vault_name,
'?api-version=', KEYVAULT_API])
return do_get(endpoint, access_token)
|
Gets details about the named key vault.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
vault_name (str): Name of the key vault.
Returns:
HTTP response. JSON body of key vault properties.
|
juraj-google-style
|
def chunk_constant_value(node: node_def_pb2.NodeDef, size: int):
if node.op == _CONST_OP:
tensor_proto = node.attr['value'].tensor
if tensor_proto.tensor_content:
b = tensor_proto.tensor_content
else:
b = tensor_util.MakeNdarray(tensor_proto).tobytes()
kept_attributes = {key: getattr(tensor_proto, key) for key in _KEEP_TENSOR_PROTO_FIELDS}
tensor_proto.Clear()
for field, val in kept_attributes.items():
if isinstance(val, message.Message):
getattr(tensor_proto, field).MergeFrom(val)
else:
setattr(tensor_proto, field, val)
return b
else:
attributes_and_sizes = ', '.join([f'{key}: {util.format_bytes(val.ByteSize())}' for key, val in node.attr.items()])
raise ValueError(f'Unable to split GraphDef because at least one of the nodes individually exceeds the max size of {util.format_bytes(constants.max_size())}. Currently only Const nodes can be further split.\nNode info:\n\tsize: {util.format_bytes(size)}\n\tname: {node.name}\n\top: {node.op}\n\tinputs: {node.input}\n\top: {node.op}\n\tdevice: {node.device}\n\tattr (and sizes): {attributes_and_sizes}')
|
Extracts and clears the constant value from a NodeDef.
Args:
node: NodeDef with const value to extract.
size: Size of NodeDef (for error reporting).
Returns:
Bytes representation of the Constant tensor content.
|
github-repos
|
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_stream = BytearrayStream()
if self._nonce_id:
self._nonce_id.write(local_stream, kmip_version=kmip_version)
else:
raise ValueError('Nonce struct is missing the nonce ID.')
if self._nonce_value:
self._nonce_value.write(local_stream, kmip_version=kmip_version)
else:
raise ValueError('Nonce struct is missing the nonce value.')
self.length = local_stream.length()
super(Nonce, self).write(output_stream, kmip_version=kmip_version)
output_stream.write(local_stream.buffer)
|
Write the data encoding the Nonce struct to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the nonce ID or nonce value is not defined.
|
codesearchnet
|
def __type_matches(self, obj: Any, type_: Type) -> bool:
if is_generic_union(type_):
for t in generic_type_args(type_):
if self.__type_matches(obj, t):
return True
return False
elif is_generic_list(type_):
if not isinstance(obj, list):
return False
for item in obj:
if not self.__type_matches(item, generic_type_args(type_)[0]):
return False
return True
elif is_generic_dict(type_):
if not isinstance(obj, OrderedDict):
return False
for key, value in obj:
if not isinstance(key, generic_type_args(type_)[0]):
return False
if not self.__type_matches(value, generic_type_args(type_)[1]):
return False
return True
else:
return isinstance(obj, type_)
|
Checks that the object matches the given type.
Like isinstance(), but will work with union types using Union, \
Dict and List.
Args:
obj: The object to check
type_: The type to check against
Returns:
True iff obj is of type type_
|
juraj-google-style
|
def conjugate(x):
if any_symbolic_tensors((x,)):
return Conjugate().symbolic_call(x)
return backend.numpy.conjugate(x)
|
Returns the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the sign
of its imaginary part.
`keras.ops.conj` is a shorthand for this function.
Args:
x: Input tensor.
Returns:
The complex conjugate of each element in `x`.
|
github-repos
|
def WriteFileEntry(self, path):
string = '{0:s}\n'.format(path)
encoded_string = self._EncodeString(string)
self._file_object.write(encoded_string)
|
Writes the file path to file.
Args:
path (str): path of the file.
|
juraj-google-style
|
def _sort_dump_data_by(self, data, sort_by, reverse):
if sort_by == SORT_TENSORS_BY_TIMESTAMP:
return sorted(data, reverse=reverse, key=lambda x: x.timestamp)
elif sort_by == SORT_TENSORS_BY_DUMP_SIZE:
return sorted(data, reverse=reverse, key=lambda x: x.dump_size_bytes)
elif sort_by == SORT_TENSORS_BY_OP_TYPE:
return sorted(data, reverse=reverse, key=lambda x: self._debug_dump.node_op_type(x.node_name))
elif sort_by == SORT_TENSORS_BY_TENSOR_NAME:
return sorted(data, reverse=reverse, key=lambda x: '%s:%d' % (x.node_name, x.output_slot))
else:
raise ValueError('Unsupported key to sort tensors by: %s' % sort_by)
|
Sort a list of DebugTensorDatum in specified order.
Args:
data: (list of DebugTensorDatum) the data to be sorted.
sort_by: The field to sort data by.
reverse: (bool) Whether to use reversed (descending) order.
Returns:
(list of DebugTensorDatum) in sorted order.
Raises:
ValueError: given an invalid value of sort_by.
|
github-repos
|
def speed_info(self):
speed_info = structs.JLinkSpeedInfo()
self._dll.JLINKARM_GetSpeedInfo(ctypes.byref(speed_info))
return speed_info
|
Retrieves information about supported target interface speeds.
Args:
self (JLink): the ``JLink`` instance
Returns:
The ``JLinkSpeedInfo`` instance describing the supported target
interface speeds.
|
codesearchnet
|
def build_transcript(transcript, build='37'):
transcript_id = transcript['transcript_id']
transcript_obj = dict(
transcript_id = transcript_id
)
transcript_obj['hgnc_id'] = transcript['hgnc_id']
if transcript.get('protein_id'):
transcript_obj['protein_id'] = transcript['protein_id']
if transcript.get('sift_prediction'):
transcript_obj['sift_prediction'] = transcript['sift_prediction']
if transcript.get('polyphen_prediction'):
transcript_obj['polyphen_prediction'] = transcript['polyphen_prediction']
if transcript.get('swiss_prot'):
transcript_obj['swiss_prot'] = transcript['swiss_prot']
if transcript.get('pfam_domain'):
transcript_obj['pfam_domain'] = transcript.get('pfam_domain')
if transcript.get('prosite_profile'):
transcript_obj['prosite_profile'] = transcript.get('prosite_profile')
if transcript.get('smart_domain'):
transcript_obj['smart_domain'] = transcript.get('smart_domain')
if transcript.get('biotype'):
transcript_obj['biotype'] = transcript.get('biotype')
if transcript.get('functional_annotations'):
transcript_obj['functional_annotations'] = transcript['functional_annotations']
if transcript.get('region_annotations'):
transcript_obj['region_annotations'] = transcript['region_annotations']
if transcript.get('exon'):
transcript_obj['exon'] = transcript.get('exon')
if transcript.get('intron'):
transcript_obj['intron'] = transcript.get('intron')
if transcript.get('strand'):
transcript_obj['strand'] = transcript.get('strand')
if transcript.get('coding_sequence_name'):
transcript_obj['coding_sequence_name'] = transcript['coding_sequence_name']
if transcript.get('protein_sequence_name'):
transcript_obj['protein_sequence_name'] = transcript['protein_sequence_name']
transcript_obj['is_canonical'] = transcript.get('is_canonical', False)
return transcript_obj
|
Build a transcript object
These represents the transcripts that are parsed from the VCF, not
the transcript definitions that are collected from ensembl.
Args:
transcript(dict): Parsed transcript information
Returns:
transcript_obj(dict)
|
juraj-google-style
|
def dinf_downslope_direction(a):
taud, d = DinfUtil.check_orthogonal(a)
if d != -1:
down = [d]
return down
else:
if a < FlowModelConst.ne:
down = [1, 2]
elif a < FlowModelConst.n:
down = [2, 3]
elif a < FlowModelConst.nw:
down = [3, 4]
elif a < FlowModelConst.w:
down = [4, 5]
elif a < FlowModelConst.sw:
down = [5, 6]
elif a < FlowModelConst.s:
down = [6, 7]
elif a < FlowModelConst.se:
down = [7, 8]
else:
down = [8, 1]
return down
|
Get the downslope directions of an dinf direction value
Args:
a: Dinf value
Returns:
downslope directions
|
juraj-google-style
|
def information_matrix(qhbm: inference.QHBM, modular_hamiltonian: models.Hamiltonian, modular_hamiltonian_copy: models.Hamiltonian, config):
def ebm_block():
samples = qhbm.e_inference.sample(config.training.num_samples)
with tf.GradientTape() as tape:
tape.watch(modular_hamiltonian.energy.trainable_variables[0])
energies = modular_hamiltonian.energy(samples)
energy_jac = tape.jacobian(energies, modular_hamiltonian.energy.trainable_variables[0])
avg_energy_grad = tf.reduce_mean(energy_jac, axis=0)
centered_energy_jac = energy_jac - avg_energy_grad
return tf.matmul(centered_energy_jac, centered_energy_jac, transpose_a=True) / config.training.num_samples
def cross_block():
shift = tf.constant(0.5)
scale = tf.constant(np.pi / 2)
circuit_values = tf.identity(modular_hamiltonian.circuit.trainable_variables[0])
def grad(indices, updates):
modular_hamiltonian.circuit.trainable_variables[0].assign(tf.tensor_scatter_nd_add(circuit_values, indices=indices, updates=updates))
with tf.GradientTape() as tape:
tape.watch(modular_hamiltonian_copy.energy.trainable_variables[0])
expectation = qhbm.expectation(modular_hamiltonian_copy)
return tape.gradient(expectation, modular_hamiltonian_copy.energy.trainable_variables[0])
def row(i):
return scale * (grad([[i]], [-shift]) - grad([[i]], [shift]))
indices = tf.range(tf.shape(modular_hamiltonian.circuit.trainable_variables[0])[0])
block = tf.map_fn(fn=row, elems=indices, fn_output_signature=tf.float32)
modular_hamiltonian.circuit.trainable_variables[0].assign(circuit_values)
return block
def qnn_block():
shift = tf.constant(0.5)
scale = tf.constant(np.pi / 2)
circuit_values = tf.identity(modular_hamiltonian.circuit.trainable_variables[0])
def grad(indices, updates):
modular_hamiltonian.circuit.trainable_variables[0].assign(tf.tensor_scatter_nd_add(circuit_values, indices=indices, updates=updates))
with tf.GradientTape() as tape:
tape.watch(modular_hamiltonian_copy.circuit.trainable_variables[0])
expectation = qhbm.expectation(modular_hamiltonian_copy)
return tape.jacobian(expectation, modular_hamiltonian_copy.circuit.trainable_variables[0])
def row(i):
return scale * (grad([[i]], [-shift]) - grad([[i]], [shift]))
indices = tf.range(tf.shape(modular_hamiltonian.circuit.trainable_variables[0])[0])
block = tf.map_fn(fn=row, elems=indices, fn_output_signature=tf.float32)
modular_hamiltonian.circuit.trainable_variables[0].assign(circuit_values)
return block
block_ebm = ebm_block()
block_cross = tf.squeeze(cross_block())
block_qnn = tf.squeeze(qnn_block())
block_upper = tf.concat([block_ebm, tf.transpose(block_cross)], 1)
block_lower = tf.concat([block_cross, block_qnn], 1)
im = tf.concat([block_upper, block_lower], 0)
return (im + tf.transpose(im)) / 2.0
|
Estimates the Bogoliubov-Kubo-Mori information matrix.
Args:
qhbm: Hamiltonian inference.
modular_hamiltonian: qhbm model. exp(-modular_hamiltonian)/Z(modular_hamiltonian) = rho.
modular_hamiltonian_copy: copy of modular_hamiltonian.
config: config dict.
Returns:
The BKM information matrix. This is tr[d_j rho d_k modular_hamiltonian] element-wise
i.e.
the Hilbert-Schmidt inner product of a mixture coords tangent vector and
an exponential coords tangent vector.
|
github-repos
|
def __init__(self, graph, run_metadata):
self._graph = graph
if not run_metadata:
raise ValueError('No RunMetadata passed for profile analysis.')
self._run_metadata = run_metadata
self._arg_parsers = {}
ap = argparse.ArgumentParser(description='List nodes profile information.', usage=argparse.SUPPRESS)
ap.add_argument('-d', '--%s' % _DEVICE_NAME_FILTER_FLAG, dest=_DEVICE_NAME_FILTER_FLAG, type=str, default='', help='filter device name by regex.')
ap.add_argument('-n', '--%s' % _NODE_NAME_FILTER_FLAG, dest=_NODE_NAME_FILTER_FLAG, type=str, default='', help='filter node name by regex.')
ap.add_argument('-t', '--%s' % _OP_TYPE_FILTER_FLAG, dest=_OP_TYPE_FILTER_FLAG, type=str, default='', help='filter op type by regex.')
ap.add_argument('-f', '--file_path_filter', dest='file_path_filter', type=str, default='', help="filter by file name at the top position of node's creation stack that does not belong to TensorFlow library.")
ap.add_argument('--min_lineno', dest='min_lineno', type=int, default=-1, help='(Inclusive) lower bound for 1-based line number in source file. If <= 0, has no effect.')
ap.add_argument('--max_lineno', dest='max_lineno', type=int, default=-1, help='(Exclusive) upper bound for 1-based line number in source file. If <= 0, has no effect.')
ap.add_argument('-e', '--execution_time', dest='execution_time', type=str, default='', help='Filter by execution time interval (includes compute plus pre- and post -processing time). Supported units are s, ms and us (default). E.g. -e >100s, -e <100, -e [100us,1000ms]')
ap.add_argument('-o', '--op_time', dest='op_time', type=str, default='', help='Filter by op time interval (only includes compute time). Supported units are s, ms and us (default). E.g. -e >100s, -e <100, -e [100us,1000ms]')
ap.add_argument('-s', '--sort_by', dest='sort_by', type=str, default=SORT_OPS_BY_START_TIME, help='the field to sort the data by: (%s)' % ' | '.join([SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE, SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME, SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE]))
ap.add_argument('-r', '--reverse', dest='reverse', action='store_true', help='sort the data in reverse (descending) order')
ap.add_argument('--time_unit', dest='time_unit', type=str, default=cli_shared.TIME_UNIT_US, help='Time unit (' + ' | '.join(cli_shared.TIME_UNITS) + ')')
self._arg_parsers['list_profile'] = ap
ap = argparse.ArgumentParser(description='Print a Python source file with line-level profile information', usage=argparse.SUPPRESS)
ap.add_argument('source_file_path', type=str, help='Path to the source_file_path')
ap.add_argument('--cost_type', type=str, choices=['exec_time', 'op_time'], default='exec_time', help='Type of cost to display')
ap.add_argument('--time_unit', dest='time_unit', type=str, default=cli_shared.TIME_UNIT_US, help='Time unit (' + ' | '.join(cli_shared.TIME_UNITS) + ')')
ap.add_argument('-d', '--%s' % _DEVICE_NAME_FILTER_FLAG, dest=_DEVICE_NAME_FILTER_FLAG, type=str, default='', help='Filter device name by regex.')
ap.add_argument('-n', '--%s' % _NODE_NAME_FILTER_FLAG, dest=_NODE_NAME_FILTER_FLAG, type=str, default='', help='Filter node name by regex.')
ap.add_argument('-t', '--%s' % _OP_TYPE_FILTER_FLAG, dest=_OP_TYPE_FILTER_FLAG, type=str, default='', help='Filter op type by regex.')
ap.add_argument('--init_line', dest='init_line', type=int, default=0, help='The 1-based line number to scroll to initially.')
self._arg_parsers['print_source'] = ap
|
ProfileAnalyzer constructor.
Args:
graph: (tf.Graph) Python graph object.
run_metadata: A `RunMetadata` protobuf object.
Raises:
ValueError: If run_metadata is None.
|
github-repos
|
def __init__(self,
host=None,
port=None,
user=None,
password=None,
database=None):
warnings.filterwarnings("error", category=MySQLdb.Warning)
for message in [
".*Duplicate entry.*",
".*Table '.*' already exists",
".*Duplicate key name.*",
".*Invalid.*character string.*",
]:
warnings.filterwarnings(
"ignore", category=MySQLdb.Warning, message=message)
self._connect_args = dict(
host=host or config.CONFIG["Mysql.host"],
port=port or config.CONFIG["Mysql.port"],
user=user or config.CONFIG["Mysql.username"],
password=password or config.CONFIG["Mysql.password"],
database=database or config.CONFIG["Mysql.database"])
client_key_path = config.CONFIG["Mysql.client_key_path"]
if client_key_path:
logging.debug("Client key file configured, trying to use SSL.")
self._connect_args["client_key_path"] = client_key_path
self._connect_args["client_cert_path"] = config.CONFIG[
"Mysql.client_cert_path"]
self._connect_args["ca_cert_path"] = config.CONFIG["Mysql.ca_cert_path"]
_SetupDatabase(**self._connect_args)
max_pool_size = config.CONFIG.Get("Mysql.conn_pool_max", 10)
self.pool = mysql_pool.Pool(self._Connect, max_size=max_pool_size)
self.handler_thread = None
self.handler_stop = True
self.flow_processing_request_handler_thread = None
self.flow_processing_request_handler_stop = None
self.flow_processing_request_handler_pool = (
threadpool.ThreadPool.Factory(
"flow_processing_pool", min_threads=2, max_threads=50))
self.flow_processing_request_handler_pool.Start()
|
Creates a datastore implementation.
Args:
host: Passed to MySQLdb.Connect when creating a new connection.
port: Passed to MySQLdb.Connect when creating a new connection.
user: Passed to MySQLdb.Connect when creating a new connection.
password: Passed to MySQLdb.Connect when creating a new connection.
database: Passed to MySQLdb.Connect when creating a new connection.
|
juraj-google-style
|
def copy(self, source_file_names, destination_file_names):
err_msg = 'source_file_names and destination_file_names should be equal in length'
assert len(source_file_names) == len(destination_file_names), err_msg
def _copy_path(source, destination):
try:
if os.path.exists(destination):
if os.path.isdir(destination):
shutil.rmtree(destination)
else:
os.remove(destination)
if os.path.isdir(source):
shutil.copytree(source, destination)
else:
shutil.copy2(source, destination)
except OSError as err:
raise IOError(err)
exceptions = {}
for source, destination in zip(source_file_names, destination_file_names):
try:
_copy_path(source, destination)
except Exception as e:
exceptions[source, destination] = e
if exceptions:
raise BeamIOError('Copy operation failed', exceptions)
|
Recursively copy the file tree from the source to the destination
Args:
source_file_names: list of source file objects that needs to be copied
destination_file_names: list of destination of the new object
Raises:
``BeamIOError``: if any of the copy operations fail
|
github-repos
|
def rename_edges(self, old_node_name, new_node_name):
graph = self.graph
for (node, edges) in graph.items():
if (node == old_node_name):
graph[new_node_name] = copy(edges)
del graph[old_node_name]
elif (old_node_name in edges):
edges.remove(old_node_name)
edges.add(new_node_name)
|
Change references to a node in existing edges.
Args:
old_node_name (str): The old name for the node.
new_node_name (str): The new name for the node.
|
codesearchnet
|
def _make_query_from_terms(self, terms, limit=None):
expanded_terms = self._expand_terms(terms)
terms_used = 0
if expanded_terms['doc']:
query_parts = ["SELECT vid, dataset_vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score"]
if expanded_terms['doc'] and expanded_terms['keywords']:
query_parts = ["SELECT vid, dataset_vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) "
" + ts_rank_cd(setweight(to_tsvector(coalesce(keywords::text,'')),'B'), to_tsquery(:keywords))"
' as score']
else:
query_parts = ['SELECT vid, dataset_vid, 1 as score']
query_parts.append('FROM partition_index')
query_params = {}
where_count = 0
if expanded_terms['doc']:
query_parts.append('WHERE doc @@ to_tsquery(:doc)')
query_params['doc'] = self.backend._and_join(expanded_terms['doc'])
where_count += 1
terms_used += 1
if expanded_terms['keywords']:
query_params['keywords'] = self.backend._and_join(expanded_terms['keywords'])
kw_q = "to_tsvector(coalesce(keywords::text,'')) @@ to_tsquery(:keywords)"
query_parts.append(("AND " if where_count else "WHERE ") + kw_q)
where_count += 1
terms_used += 1
if expanded_terms['from']:
query_parts.append(("AND " if where_count else "WHERE ") + ' from_year >= :from_year')
query_params['from_year'] = expanded_terms['from']
where_count += 1
terms_used += 1
if expanded_terms['to']:
query_parts.append(("AND " if where_count else "WHERE ") + ' to_year <= :to_year')
query_params['to_year'] = expanded_terms['to']
where_count += 1
terms_used += 1
query_parts.append('ORDER BY score DESC')
if limit:
query_parts.append('LIMIT :limit')
query_params['limit'] = limit
if not terms_used:
logger.debug('No terms used; not creating query')
return None, None
query_parts.append(';')
deb_msg = 'Dataset terms conversion: `{}` terms converted to `{}` with `{}` params query.'\
.format(terms, query_parts, query_params)
logger.debug(deb_msg)
return text('\n'.join(query_parts)), query_params
|
Creates a query for partition from decomposed search terms.
Args:
terms (dict or unicode or string):
Returns:
tuple of (TextClause, dict): First element is FTS query, second is
parameters of the query. Element of the execution of the query is
tuple of three elements: (vid, dataset_vid, score).
|
juraj-google-style
|
def depth_december_average_ground_temperature(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `depth_december_average_ground_temperature`'.format(value))
self._depth_december_average_ground_temperature = value
|
Corresponds to IDD Field `depth_december_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_december_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def list_hierarchy(class_name, bases):
class_list = [Uri(class_name)]
for base in bases:
if base.__name__ not in IGNORE_CLASSES:
class_list.append(Uri(base.__name__))
return list([i for i in set(class_list)])
|
Creates a list of the class hierarchy
Args:
-----
class_name: name of the current class
bases: list/tuple of bases for the current class
|
juraj-google-style
|
def set_spacing(self, space):
self.figure.spacing = space
if ('subplots_adjust_kwargs' not in self.figure.__dict__):
self.figure.subplots_adjust_kwargs = {}
if (space == 'wide'):
self.figure.subplots_adjust_kwargs['hspace'] = 0.3
self.figure.subplots_adjust_kwargs['wspace'] = 0.3
else:
self.figure.subplots_adjust_kwargs['hspace'] = 0.0
self.figure.subplots_adjust_kwargs['wspace'] = 0.0
return
|
Set the figure spacing.
Sets whether in general there is space between subplots.
If all axes are shared, this can be `tight`. Default in code is `wide`.
The main difference is the tick labels extend to the ends if space==`wide`.
If space==`tight`, the edge tick labels are cut off for clearity.
Args:
space (str): Sets spacing for subplots. Either `wide` or `tight`.
|
codesearchnet
|
def filter_string(self, word):
segs = [m.group(0) for m in self.seg_regex.finditer(word)]
return ''.join(segs)
|
Return a string like the input but containing only legal IPA segments
Args:
word (unicode): input string to be filtered
Returns:
unicode: string identical to `word` but with invalid IPA segments
absent
|
juraj-google-style
|
def Add(self, rdf_value, mutation_pool=None):
self.StaticAdd(self.urn, rdf_value, mutation_pool=mutation_pool)
|
Adds an rdf value to the queue.
Adds an rdf value to the queue. Does not require that the queue be locked.
Args:
rdf_value: The rdf value to add to the queue.
mutation_pool: A MutationPool object to write to.
Raises:
ValueError: rdf_value has unexpected type.
|
codesearchnet
|
def _send_file(self, method, path, data, filename):
with open(filename, 'r') as f:
return self._make_request(method, path, data=data, files=[f, ])
|
Make a multipart/form-encoded request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
|
juraj-google-style
|
def __init__(self, channel):
self.ListDatabases = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases",
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesResponse.FromString,
)
self.CreateDatabase = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase",
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.CreateDatabaseRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetDatabase = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase",
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.Database.FromString,
)
self.UpdateDatabaseDdl = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl",
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.DropDatabase = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase",
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.DropDatabaseRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetDatabaseDdl = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl",
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.FromString,
)
self.SetIamPolicy = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.GetIamPolicy = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def _cursor_pb(cursor_pair):
if (cursor_pair is not None):
(data, before) = cursor_pair
value_pbs = [_helpers.encode_value(value) for value in data]
return query_pb2.Cursor(values=value_pbs, before=before)
|
Convert a cursor pair to a protobuf.
If ``cursor_pair`` is :data:`None`, just returns :data:`None`.
Args:
cursor_pair (Optional[Tuple[list, bool]]): Two-tuple of
* a list of field values.
* a ``before`` flag
Returns:
Optional[google.cloud.firestore_v1beta1.types.Cursor]: A
protobuf cursor corresponding to the values.
|
codesearchnet
|
def _NodeDef(op_type, name, attrs=None) -> node_def_pb2.NodeDef:
node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type), name=compat.as_bytes(name))
if attrs:
for k, v in attrs.items():
node_def.attr[k].CopyFrom(v)
return node_def
|
Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
attrs: Dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
|
github-repos
|
def parse_json_file(self, json_file: Union[str, os.PathLike], allow_extra_keys: bool=False) -> tuple[DataClass, ...]:
with open(Path(json_file), encoding='utf-8') as open_json_file:
data = json.loads(open_json_file.read())
outputs = self.parse_dict(data, allow_extra_keys=allow_extra_keys)
return tuple(outputs)
|
Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the
dataclass types.
Args:
json_file (`str` or `os.PathLike`):
File name of the json file to parse
allow_extra_keys (`bool`, *optional*, defaults to `False`):
Defaults to False. If False, will raise an exception if the json file contains keys that are not
parsed.
Returns:
Tuple consisting of:
- the dataclass instances in the same order as they were passed to the initializer.
|
github-repos
|
def list_deployment_operations(access_token, subscription_id, rg_name, deployment_name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rg_name,
'/providers/Microsoft.Resources/deployments/', deployment_name,
'/operations',
'?api-version=', BASE_API])
return do_get(endpoint, access_token)
|
List all operations involved in a given deployment.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rg_name (str): Azure resource group name.
Returns:
HTTP response. JSON body.
|
juraj-google-style
|
def fixed_point(is_zero, plus, minus, f, x):
@memo_Y
def _fixed_point(fixed_point_fun):
def __fixed_point(collected, new):
diff = minus(new, collected)
if is_zero(diff):
return collected
return fixed_point_fun(plus(collected, diff), f(diff))
return __fixed_point
return _fixed_point(x, f(x))
|
Get the least fixed point when it can be computed piecewise.
.. testsetup::
from proso.func import fixed_point
.. doctest::
>>> sorted(fixed_point(
... is_zero=lambda xs: len(xs) == 0,
... plus=lambda xs, ys: xs + ys,
... minus=lambda xs, ys: [x for x in xs if x not in ys],
... f=lambda xs: [x + 1 for x in xs if x < 10],
... x=[0, 5, 8]
... ))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
Args:
is_zero: function returning True if the given value is zero
plus: function taking two values and returning their addition
minus: function taking two values and returning ther difference
f: function computing the expected value
x: initial value
Returns:
The least fixed point.
|
codesearchnet
|
async def get_matches(self, state: MatchState = MatchState.all_):
matches = await self.connection('GET',
'tournaments/{}/matches'.format(self._tournament_id),
state=state.value,
participant_id=self._id)
ms = []
for m in matches:
ms.append(await self._tournament.get_match(m['match']['id']))
return ms
|
Return the matches of the given state
|methcoro|
Args:
state: see :class:`MatchState`
Raises:
APIException
|
juraj-google-style
|
def _StopOps(from_ops: list[ops.Operation], stop_gradient_ops: list[ops.Operation], pending_count, xs_set):
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in _NonEagerInputs(op, xs_set):
if pending_count[inp.op] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op)
stop_ops.update((op for op in stop_gradient_ops))
return stop_ops
|
The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op] > 0.
In addition, none of `stop_gradient_ops` will be differentiated.
Args:
from_ops: list of Operations.
stop_gradient_ops: list of Operations never to backprop through.
pending_count: mapping from operation to number of backprop inputs.
xs_set: ObjectIdentitySet of Tensors.
Returns:
The set of operations.
|
github-repos
|
def add_jpeg_decoding(module_spec):
(input_height, input_width) = hub.get_expected_image_size(module_spec)
input_depth = hub.get_num_image_channels(module_spec)
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.image.convert_image_dtype(decoded_image, tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
resize_shape = tf.stack([input_height, input_width])
resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
resized_image = tf.image.resize_bilinear(decoded_image_4d, resize_shape_as_int)
return (jpeg_data, resized_image)
|
Adds operations that perform JPEG decoding and resizing to the graph..
Args:
module_spec: The hub.ModuleSpec for the image module being used.
Returns:
Tensors for the node to feed JPEG data into, and the output of the
preprocessing steps.
|
codesearchnet
|
def transform_normalize_unicode(source, form, name=None):
with ops.name_scope(name, "TransformNormalizeUnicode", [source]):
source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)
if isinstance(source, tf.SparseTensor):
result = tf.SparseTensor(
indices=source.indices,
values=ops_module.transform_normalize_unicode(source.values, form),
dense_shape=source.dense_shape
)
else:
result = ops_module.transform_normalize_unicode(source, form)
return result
|
Normalize unicode strings tensor.
Args:
source: `Tensor` or `SparseTensor` of any shape, strings to normalize.
form: Scalar value, name of normalization algorithm.
One of `"NFD"`, `"NFC"`, `"NFKD"`, `"NFKC"`.
name: A name for the operation (optional).
Returns:
`Tensor` or `SparseTensor` of same shape and size as input.
|
juraj-google-style
|
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
if token_ids_1 is None:
return len(token_ids_0 + sep) * [0]
return len(token_ids_0 + sep + token_ids_1 + sep) * [0]
|
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
|
github-repos
|
def _bulk_cache_lookup(self, api_name, keys):
if self._cache:
responses = self._cache.bulk_lookup(api_name, keys)
missing_keys = [key for key in keys if key not in responses.keys()]
return (responses, missing_keys)
return ({}, keys)
|
Performes a bulk cache lookup and returns a tuple with the results
found and the keys missing in the cache. If cached is not configured
it will return an empty dictionary of found results and the initial
list of keys.
Args:
api_name: a string name of the API.
keys: an enumerable of string keys.
Returns:
A tuple: (responses found, missing keys).
|
juraj-google-style
|
def __init__(self, type, document, old_index, new_index):
self.type = type
self.document = document
self.old_index = old_index
self.new_index = new_index
|
DocumentChange
Args:
type (ChangeType):
document (document.DocumentSnapshot):
old_index (int):
new_index (int):
|
juraj-google-style
|
def makesubatoffset(self, bitoffset, *, _offsetideal=None):
if (_offsetideal is None):
_offsetideal = bitoffset
if (bitoffset is 0):
return self
newpromise = TDOPromise(self._chain, (self._bitstart + bitoffset), self._bitlength, _parent=self, bitstartselective=(self._bitstartselective + _offsetideal))
self._addsub(newpromise, 0)
return newpromise
|
Create a copy of this promise with an offset, and use it as this promise's child.
If this promise's primitive is being merged with another
primitive, a new subpromise may be required to keep track of
the new offset of data coming from the new primitive.
Args:
bitoffset: An integer offset of the data in the new primitive.
_offsetideal: integer offset of the data if terms of bits actually used for promises. Used to calculate the start index to read if the associated primitive has arbitrary TDO control.
Returns:
A TDOPromise registered with this promise, and with the
correct offset.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.