code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def stack(x, axis=0): if any_symbolic_tensors((x,)): return Stack(axis=axis).symbolic_call(x) return backend.numpy.stack(x, axis=axis)
Join a sequence of tensors along a new axis. The `axis` parameter specifies the index of the new axis in the dimensions of the result. Args: x: A sequence of tensors. axis: Axis along which to stack. Defaults to `0`. Returns: The stacked tensor.
github-repos
def _AddPathSegments(self, path, ignore_list): path_segments = path.split(self._path_segment_separator) for (path_segment_index, path_segment) in enumerate(path_segments): if (path_segment_index not in self.path_segments_per_index): self.path_segments_per_index[path_segment_index] = {} if (path_segment_index not in ignore_list): path_segments = self.path_segments_per_index[path_segment_index] if (path_segment not in path_segments): path_segments[path_segment] = [] paths_per_segment_list = path_segments[path_segment] paths_per_segment_list.append(path)
Adds the path segments to the table. Args: path: a string containing the path. ignore_list: a list of path segment indexes to ignore, where 0 is the index of the first path segment relative from the root.
codesearchnet
def labels_in_range(self, start, end, fully_included=False): if fully_included: intervals = self.label_tree.envelop(start, end) else: intervals = self.label_tree.overlap(start, end) return [iv.data for iv in intervals]
Return a list of labels, that are within the given range. Also labels that only overlap are included. Args: start(float): Start-time in seconds. end(float): End-time in seconds. fully_included(bool): If ``True``, only labels fully included in the range are returned. Otherwise also overlapping ones are returned. (default ``False``) Returns: list: List of labels in the range. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14) >>>]) >>> ll.labels_in_range(6.2, 10.1) [Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)]
codesearchnet
def mel_spectrogram(self, sequence: np.ndarray): mel_specs = [] for seq in sequence: window = np.hanning(self.window_size + 1)[:-1] mel_specs.append(spectrogram(waveform=seq, window=window, frame_length=self.window_size, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters)) mel_specs = np.array(mel_specs) return mel_specs
Generates MelSpectrogram. Args: sequence (`numpy.ndarray`): The sequence of which the mel-spectrogram will be computed.
github-repos
def _parse_services(self, service_config: dict, service_name: str, service_list: dict) -> dict: for key, value in service_list['services'][service_name].items(): service_config[key] = value if 'command' in key: key = "args" service_config['args'] = value service_config.pop('command') if 'ports' in key: endpoint_spec = self._parse_ports(value) service_config['endpoint_spec'] = endpoint_spec service_config.pop('ports') if 'volumes' in key: volume_spec = self._parse_volumes(value) service_config['mounts'] = volume_spec service_config.pop('volumes') if 'deploy' in key: self._parse_deploy(value, service_config) service_config.pop('deploy') if 'networks' in key: network_spec = self._parse_networks(service_list) service_config['networks'] = network_spec if 'logging' in key: self._parse_logging(value, service_config) service_config.pop('logging') if 'environment' in key: service_config['env'] = value service_config.pop('environment') return service_config
Parse the docker compose file. Args: service_config (dict): Service configurations from the compose file service_name (string): Name of the services service_list (dict): Service configuration list Returns: dict, service specifications extracted from the compose file
juraj-google-style
def _create_handler(self, config): if (config is None): raise ValueError('No handler config to create handler from.') if ('name' not in config): raise ValueError('Handler name is required.') handler_name = config['name'] module_name = handler_name.rsplit('.', 1)[0] class_name = handler_name.rsplit('.', 1)[(- 1)] module = import_module(module_name) handler_class = getattr(module, class_name) instance = handler_class(**config) return instance
Creates a handler from its config. Params: config: handler config Returns: handler instance
codesearchnet
def download_items(cache_fn, start=None): with SqliteDict(cache_fn) as db: last_id = db.get("last_id", 0) if not start else start _download_items(db, last_id) db.commit()
Open the `cache_fn` as database and download all not-yet downloaded items. Args: cache_fn (str): Path to the sqlite database. If not exists, it will be created. start (int, default None): If set, start from this sysno.
juraj-google-style
def _ParseEntryObjectOffsets(self, file_object, file_offset): entry_array_object = self._ParseEntryArrayObject(file_object, file_offset) entry_object_offsets = list(entry_array_object.entry_object_offsets) while entry_array_object.next_entry_array_offset != 0: entry_array_object = self._ParseEntryArrayObject( file_object, entry_array_object.next_entry_array_offset) entry_object_offsets.extend(entry_array_object.entry_object_offsets) return entry_object_offsets
Parses entry array objects for the offset of the entry objects. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the first entry array object relative to the start of the file-like object. Returns: list[int]: offsets of the entry objects.
juraj-google-style
def create_heart(self, git_repo_url, max_commits=10, weeks_from_now=1): self.weeks_from_now = weeks_from_now self.end_date = self.get_end_date() try: self.repository_name = git_repo_url.split('/')[-1][:-4] self.git_repo_url = git_repo_url self.max_commits = max_commits self.do_commits() self.do_commit_amends() except IndexError as ie: raise ErrorMessage( "Please provide the correct URL for the Repository") except Exception as e: raise ErrorMessage(str(e))
Creates heart on the Summary. Args: git_repo_url: The url (ssh or https) of the Repository, used for cloning max_commits: Maximum number of commits in a day weeks_from_now: The number of week from this week the Heart's Right center boundary will be.
juraj-google-style
def __init__(self, network, scope='network-baseline', summary_labels=()): self.network = Network.from_spec( spec=network, kwargs=dict(summary_labels=summary_labels) ) assert len(self.network.internals_spec()) == 0 self.linear = Linear(size=1, bias=0.0, scope='prediction', summary_labels=summary_labels) super(NetworkBaseline, self).__init__(scope=scope, summary_labels=summary_labels)
Network baseline. Args: network_spec: Network specification dict
juraj-google-style
def iterator_full_type_from_spec(element_spec): args = fulltypes_for_flat_tensors(element_spec) return full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_PRODUCT, args=[full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_ITERATOR, args=[full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_PRODUCT, args=args)])])
Returns a FullTypeDef for an iterator for the elements. Args: element_spec: A nested structure of `tf.TypeSpec` objects representing the element type specification. Returns: A FullTypeDef for an iterator for the element tensor representation.
github-repos
def get(self, center, target, date): if (center.index, target.index) in self.segments: pos, vel = self.segments[center.index, target.index].compute_and_differentiate(date.jd) sign = 1 else: pos, vel = self.segments[target.index, center.index].compute_and_differentiate(date.jd) sign = -1 if len(pos) == 3: pv = np.concatenate((pos, vel / S_PER_DAY)) elif len(pos) == 6: pv = np.array(pos) else: raise JplError("Unknown state vector format") return sign * pv * 1000
Retrieve the position and velocity of a target with respect to a center Args: center (Target): target (Target): date (Date): Return: numpy.array: length-6 array position and velocity (in m and m/s) of the target, with respect to the center
juraj-google-style
def __sub__(self, other: 'TensorFluent') -> 'TensorFluent': return self._binary_op(self, other, tf.subtract, tf.float32)
Returns a TensorFluent for the subtraction arithmetic operator. Args: self: The first operand. other: The second operand. Returns: A TensorFluent wrapping the operator's output.
juraj-google-style
def combine(path1, path2): if not path1: return path2.lstrip() return "{}/{}".format(path1.rstrip("/"), path2.lstrip("/"))
Join two paths together. This is faster than :func:`~fs.path.join`, but only works when the second path is relative, and there are no back references in either path. Arguments: path1 (str): A PyFilesytem path. path2 (str): A PyFilesytem path. Returns: str: The joint path. Example: >>> combine("foo/bar", "baz") 'foo/bar/baz'
juraj-google-style
def train(self, mode=True): super().train(mode) if mode: mu.apply_leaf(self, mu.set_train_mode) return self
r""" Sets the module in training mode. This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. Returns: Module: self
codesearchnet
def exp(x): if any_symbolic_tensors((x,)): return Exp().symbolic_call(x) return backend.numpy.exp(x)
Calculate the exponential of all elements in the input tensor. Args: x: Input tensor. Returns: Output tensor, element-wise exponential of `x`.
github-repos
def BDEVolumeOpen(bde_volume, path_spec, file_object, key_chain): password = key_chain.GetCredential(path_spec, 'password') if password: bde_volume.set_password(password) recovery_password = key_chain.GetCredential(path_spec, 'recovery_password') if recovery_password: bde_volume.set_recovery_password(recovery_password) startup_key = key_chain.GetCredential(path_spec, 'startup_key') if startup_key: bde_volume.read_startup_key(startup_key) bde_volume.open_file_object(file_object)
Opens the BDE volume using the path specification. Args: bde_volume (pybde.volume): BDE volume. path_spec (PathSpec): path specification. file_object (FileIO): file-like object. key_chain (KeyChain): key chain.
juraj-google-style
def get_image_patches(self, image: np.array, grid_pinpoints, size: tuple, patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> List[np.array]: if not isinstance(grid_pinpoints, list): raise TypeError('grid_pinpoints must be a list of possible resolutions.') possible_resolutions = grid_pinpoints image_size = get_image_size(image, channel_dim=input_data_format) best_resolution = select_best_resolution(image_size, possible_resolutions) resized_image = self._resize_for_patching(image, best_resolution, resample=resample, input_data_format=input_data_format) padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format) patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format) patches = [to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format) for patch in patches] resized_original_image = resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format) image_patches = [resized_original_image] + patches return image_patches
Process an image with variable resolutions by dividing it into patches. Args: image (np.array): The input image to be processed. grid_pinpoints (List): A string representation of a list of possible resolutions. size (`tuple`): Size to resize the original image to. patch_size (`int`): Size of the patches to divide the image into. resample (`PILImageResampling`): Resampling filter to use if resizing the image. data_format (`ChannelDimension` or `str`): The channel dimension format for the output image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: List[np.array]: A list of NumPy arrays containing the processed image patches.
github-repos
def create_creation_event(self): event = self.create_audit_event(code='AUDIT_CREATE') if self._meta.create_message: event.body = self._meta.create_message['message'] event.code = self._meta.create_message['code'] event.meta = self.parse_meta(self._meta.create_message['meta']) self.create_event_callback(event) event.save() return event
Parse the create message DSL to insert the data into the Event. Returns: fleaker.peewee.EventStorageMixin: A new Event instance with data put in it
codesearchnet
def ordered_repr(obj: object, attrlist: Iterable[str], joiner: str = COMMA_SPACE) -> str: return "<{classname}({kvp})>".format( classname=type(obj).__name__, kvp=joiner.join("{}={}".format(a, repr(getattr(obj, a))) for a in attrlist) )
Shortcut to make :func:`repr` functions ordered. Define your :func:`__repr__` like this: .. code-block:: python def __repr__(self): return ordered_repr(self, ["field1", "field2", "field3"]) Args: obj: object to display attrlist: iterable of attribute names joiner: string with which to join the elements Returns: string: :func:`repr`-style representation
juraj-google-style
def init(library: typing.Union[str, types.ModuleType]) -> None: if isinstance(library, types.ModuleType): library = library.__name__ if library not in manager._handlers: raise ValueError("Possible values are <{}>, not <{}>".format(manager._handlers.keys(), library)) manager.init(library, asynclib) asynclib.lib_name = library asynclib._init = True
Must be called at some point after import and before your event loop is run. Populates the asynclib instance of _AsyncLib with methods relevant to the async library you are using. The supported libraries at the moment are: - curio - trio Args: library (str or module): Either the module name as a string or the imported module itself. E.g. ``multio.init(curio)``.
juraj-google-style
def generate_query_key(self, serializer): rewritten = [] last = len(self.field) - 1 s = serializer field = None for i, field_name in enumerate(self.field): fields = s.fields if field_name not in fields: fields = getattr(s, 'get_all_fields', lambda: {})() if field_name == 'pk': rewritten.append('pk') continue if field_name not in fields: raise ValidationError( "Invalid filter field: %s" % field_name ) field = fields[field_name] model_field_name = field.source or field_name model_field = get_model_field(s.get_model(), model_field_name) if isinstance(model_field, RelatedObject): model_field_name = model_field.field.related_query_name() rewritten.append(model_field_name) if i == last: break s = getattr(field, 'serializer', None) if isinstance(s, serializers.ListSerializer): s = s.child if not s: raise ValidationError( "Invalid nested filter field: %s" % field_name ) if self.operator: rewritten.append(self.operator) return ('__'.join(rewritten), field)
Get the key that can be passed to Django's filter method. To account for serialier field name rewrites, this method translates serializer field names to model field names by inspecting `serializer`. For example, a query like `filter{users.events}` would be returned as `users__events`. Arguments: serializer: A DRF serializer Returns: A filter key.
juraj-google-style
def delete(self, wait_for_deletion=True): if self.exists(): try: self._api.objects_delete(self._bucket, self._key) except Exception as e: raise e if wait_for_deletion: for _ in range(_MAX_POLL_ATTEMPTS): objects = Objects(self._bucket, prefix=self.key, delimiter='/', context=self._context) if any(((o.key == self.key) for o in objects)): time.sleep(_POLLING_SLEEP) continue break else: logging.error('Failed to see object deletion after %d attempts.', _MAX_POLL_ATTEMPTS)
Deletes this object from its bucket. Args: wait_for_deletion: If True, we poll until this object no longer appears in objects.list operations for this bucket before returning. Raises: Exception if there was an error deleting the object.
codesearchnet
def _apply(self, ctx: ExtensionContext) -> AugmentedDict: node_key, node_value = ctx.node def process(pattern: Pattern[str], _str: str) -> Any: _match = pattern.match(_str) if _match is None: return _str placeholder, varname = _match.group(1), _match.group(2) varval = self.vars.get(varname, None) if varval is None and self.fail_on_unset: raise ExtensionError("Variable '{}' is unset.".format(varname)) return _str.replace(placeholder, varval or self.default) _pattern = re.compile(self.__pattern__) node_key = process(_pattern, node_key) node_value = process(_pattern, node_value) return {node_key: node_value}
Replaces any {{var::*}} directives with it's actual variable value or a default. Args: ctx: The processing context. Returns: Returns the altered node key and value.
juraj-google-style
def get_public_ip(access_token, subscription_id, resource_group, ip_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/', 'publicIPAddresses/', ip_name, '?api-version=', NETWORK_API]) return do_get(endpoint, access_token)
Get details about the named public ip address. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. public_ip_name (str): Name of the public ip address resource. Returns: HTTP response. Public IP address JSON body.
codesearchnet
def set_settings(self, settings): for k, v in settings.items(): setattr(self, k, v)
Set every given settings as object attributes. Args: settings (dict): Dictionnary of settings.
juraj-google-style
def MaxBipartiteMatching(self, graph): self.g = nx.Graph(graph) self.left = set((n for n, d in self.g.nodes(data=True) if not d['bipartite'])) self.right = set(self.g) - self.left self.num_matched = 0 self.s = set() self.t = set() self.matches = {} self.slack = {} self.slackx = {} self.prev = {} self.labels = {} for x in self.left: self.labels[x] = max([val['weight'] for val in self.g[x].values()]) for y in self.right: self.labels[y] = 0 while self.num_matched != len(self.left): self._Augment() ret = {} for k in self.left: ret[k] = self.matches[k] return ret
Find a maximum matching for a bipartite graph. This is O(n^3) implementation of the Hungarian method for complete bipartite matching problems. Args: graph: A networkx graph object, assumed to be bipartite. Returns: A dictionary keyed on node names in left to node names in right.
github-repos
def __init__(self, variables, name='ShardedVariable'): super(ShardedVariableMixin, self).__init__() self._variables = variables self._name = name if not isinstance(variables, Sequence) or not variables or any((not isinstance(v, variables_lib.Variable) for v in variables)): raise TypeError(f'Argument `variables` should be a non-empty list of `variables.Variable`s. Received {variables}') var_dtypes = {v.dtype for v in variables} if len(var_dtypes) > 1: raise ValueError(f'All elements in argument `variables` must have the same dtype. Received dtypes: {[v.dtype for v in variables]}') first_var = variables[0] self._dtype = first_var.dtype higher_dim_shapes = {tuple(v.shape.as_list()[1:]) for v in variables} if len(higher_dim_shapes) > 1: raise ValueError(f'All elements in argument `variables` must have the same shapes except for the first axis. Received shapes: {[v.shape for v in variables]}') first_dim = sum((int(v.shape.as_list()[0]) for v in variables)) self._shape = tensor_shape.TensorShape([first_dim] + first_var.shape.as_list()[1:]) for v in variables: v._sharded_container = weakref.ref(self) self._var_offsets = [[0 for _ in range(len(first_var.shape))] for _ in range(len(variables))] for i in range(1, len(variables)): self._var_offsets[i][0] += self._var_offsets[i - 1][0] + variables[i - 1].shape.as_list()[0] save_slice_info = [v._get_save_slice_info() for v in variables] if any((slice_info is not None for slice_info in save_slice_info)): raise ValueError(f'`SaveSliceInfo` should not be set for all elements in argument `variables`. `ShardedVariable` will infer `SaveSliceInfo` according to the order of the elements `variables`. Received save slice info {save_slice_info}') self._saving_variable = resource_variable_ops.UninitializedVariable(shape=self._shape, dtype=self._dtype, name=self._name, trainable=self._variables[0].trainable, synchronization=variables_lib.VariableSynchronization.NONE, aggregation=variables_lib.VariableAggregation.NONE)
Treats `variables` as shards of a larger Variable. Example: ``` variables = [ tf.Variable(..., shape=(10, 100), dtype=tf.float32), tf.Variable(..., shape=(15, 100), dtype=tf.float32), tf.Variable(..., shape=(5, 100), dtype=tf.float32) ] sharded_variable = ShardedVariableMixin(variables) assert sharded_variable.shape.as_list() == [30, 100] ``` Args: variables: A list of `ResourceVariable`s that comprise this sharded variable. Variables should not be shared between different `ShardedVariableMixin` objects. name: String. Name of this container. Defaults to "ShardedVariable".
github-repos
def partial_tile(tensor, tile_assignment, use_sharding_op=False, unspecified_dims=None): return Sharding.partial_tile(tile_assignment).apply_to_tensor(tensor, use_sharding_op=use_sharding_op, unspecified_dims=unspecified_dims or [])
Returns a tensor that has tiled sharding. Args: tensor: A tf.Tensor to shard. tile_assignment: An np.ndarray describing the topology of the tiling and which device will compute which part of the topology. It must have one more dimension than tensor, and the last dimension represents partially replicated tiles. use_sharding_op: If true, adds a sharding op to set the sharding. unspecified_dims: An optional list of dimensions unspecified.
github-repos
def fit_transform(self, col): if self.anonymize: col = self.anonymize_column(col) self._fit(col) return self.transform(col)
Prepare the transformer and return processed data. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame
juraj-google-style
def call_with_mapped_args(self, mapped_args: MappedArgs[FrameType]) -> _HasReturnT:
Calls this function with the given mapped arguments. Args: mapped_args: The function arguments mapped to parameter names. Returns: An object with information about the result of the function call, with a get_return_value() method that retrieves the return value.
github-repos
def __init__(self, path: utils.KeyPath, target: 'Symbolic', field: Optional[pg_typing.Field], old_value: Any, new_value: Any): self.path = path self.target = target self.field = field self.old_value = old_value self.new_value = new_value
Constructor. Args: path: KeyPath of the field that is updated. target: Parent of updated field. field: Specification of the updated field. old_value: Old value of the field. new_value: New value of the field.
github-repos
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size): with tf.name_scope('loss', [logits, labels]): (logits, labels) = _pad_tensors_to_same_length(logits, labels) with tf.name_scope('smoothing_cross_entropy', [logits, labels]): confidence = (1.0 - smoothing) low_confidence = ((1.0 - confidence) / tf.to_float((vocab_size - 1))) soft_targets = tf.one_hot(tf.cast(labels, tf.int32), depth=vocab_size, on_value=confidence, off_value=low_confidence) xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=soft_targets) normalizing_constant = (- ((confidence * tf.log(confidence)) + ((tf.to_float((vocab_size - 1)) * low_confidence) * tf.log((low_confidence + 1e-20))))) xentropy -= normalizing_constant weights = tf.to_float(tf.not_equal(labels, 0)) return ((xentropy * weights), weights)
Calculate cross entropy loss while ignoring padding. Args: logits: Tensor of size [batch_size, length_logits, vocab_size] labels: Tensor of size [batch_size, length_labels] smoothing: Label smoothing constant, used to determine the on and off values vocab_size: int size of the vocabulary Returns: Returns a float32 tensor with shape [batch_size, max(length_logits, length_labels)]
codesearchnet
def select_by_value(self, value): self._selected_key = None self._selected_item = None for k in self.children: item = self.children[k] if item.get_text() == value: item.attributes['selected'] = 'selected' self._selected_key = k self._selected_item = item else: if 'selected' in item.attributes: del item.attributes['selected']
Selects a DropDownItem by means of the contained text- Args: value (str): Textual content of the DropDownItem that have to be selected.
juraj-google-style
def __init__(self, maximum_number_of_cached_values): if maximum_number_of_cached_values <= 0: raise ValueError( 'Invalid maximum number of cached objects value zero or less.') super(ObjectsCache, self).__init__() self._maximum_number_of_cached_values = maximum_number_of_cached_values self._values = {}
Initializes the resolver objects cache object. Args: maximum_number_of_cached_values (int): maximum number of cached values. Raises: ValueError: when the maximum number of cached objects is 0 or less.
juraj-google-style
def get_storage(self, id_or_uri): uri = self.URI + "/{}/storage".format(extract_id_from_uri(id_or_uri)) return self._client.get(uri)
Get storage details of an OS Volume. Args: id_or_uri: ID or URI of the OS Volume. Returns: dict: Storage details
juraj-google-style
def process_exception_message(exception): exception_message = str(exception) for replace_char in ['\t', '\n', '\\n']: exception_message = exception_message.replace(replace_char, ('' if (replace_char != '\t') else ' ')) return exception_message.replace('section', 'alias')
Process an exception message. Args: exception: The exception to process. Returns: A filtered string summarizing the exception.
codesearchnet
def SetEncodedValue(env, name, value, encoding=None): name = Encode(name, encoding=encoding) if value is None: env.pop(name, None) return env[name] = Encode(value, encoding=encoding)
Sets the value of name in env to an encoded value. Args: env: {str: str}, The env dict. name: str, The env var name. value: str or unicode, The value for name. If None then name is removed from env. encoding: str, The encoding to use or None to try to infer it.
github-repos
def Analyze(self, hashes): hash_analyses = [] for digest in hashes: json_response = self._QueryHash(digest) hash_analysis = interface.HashAnalysis(digest, json_response) hash_analyses.append(hash_analysis) return hash_analyses
Looks up hashes in Viper using the Viper HTTP API. Args: hashes (list[str]): hashes to look up. Returns: list[HashAnalysis]: hash analysis. Raises: RuntimeError: If no host has been set for Viper.
codesearchnet
def remove_block(self, block, index="-1"): self[index]["__blocks__"].remove(block) self[index]["__names__"].remove(block.raw())
Remove block element from scope Args: block (Block): Block object
juraj-google-style
def _find_classes(self, dir): if (sys.version_info >= (3, 5)): classes = [d.name for d in os.scandir(dir) if d.is_dir()] else: classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] classes.sort() class_to_idx = {classes[i]: i for i in range(len(classes))} return (classes, class_to_idx)
Finds the class folders in a dataset. Args: dir (string): Root directory path. Returns: tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary. Ensures: No class is a subdirectory of another.
codesearchnet
def GetPluginObjectByName(cls, plugin_name): plugin_class = cls._plugin_classes.get(plugin_name, None) if plugin_class: return plugin_class() return None
Retrieves a specific plugin object by its name. Args: plugin_name (str): name of the plugin. Returns: BasePlugin: a plugin object or None if not available.
codesearchnet
def _SmallestColSize(self, text): if (not text): return 0 stripped = terminal.StripAnsiText(text) return max((len(word) for word in stripped.split()))
Finds the largest indivisible word of a string. ...and thus the smallest possible column width that can contain that word unsplit over rows. Args: text: A string of text potentially consisting of words. Returns: Integer size of the largest single word in the text.
codesearchnet
def _wait_and_retry(provider, job_id, poll_interval, retries, job_descriptor): while True: tasks = provider.lookup_job_tasks({'*'}, job_ids=[job_id]) running_tasks = set() completed_tasks = set() canceled_tasks = set() fully_failed_tasks = set() task_fail_count = dict() message_task = None task_dict = dict() for t in tasks: task_id = job_model.numeric_task_id(t.get_field('task-id')) task_dict[task_id] = t status = t.get_field('task-status') if (status == 'FAILURE'): task_fail_count[task_id] = (task_fail_count.get(task_id, 0) + 1) if (task_fail_count[task_id] > retries): fully_failed_tasks.add(task_id) message_task = t elif (status == 'CANCELED'): canceled_tasks.add(task_id) if (not message_task): message_task = t elif (status == 'SUCCESS'): completed_tasks.add(task_id) elif (status == 'RUNNING'): running_tasks.add(task_id) retry_tasks = set(task_fail_count).difference(fully_failed_tasks).difference(running_tasks).difference(completed_tasks).difference(canceled_tasks) if ((not retry_tasks) and (not running_tasks)): if message_task: return [provider.get_tasks_completion_messages([message_task])] return [] for task_id in retry_tasks: identifier = ('{}.{}'.format(job_id, task_id) if task_id else job_id) print(' {} (attempt {}) failed. Retrying.'.format(identifier, task_fail_count[task_id])) msg = task_dict[task_id].get_field('status-message') print(' Failure message: {}'.format(msg)) _retry_task(provider, job_descriptor, task_id, (task_fail_count[task_id] + 1)) SLEEP_FUNCTION(poll_interval)
Wait for job and retry any tasks that fail. Stops retrying an individual task when: it succeeds, is canceled, or has been retried "retries" times. This function exits when there are no tasks running and there are no tasks eligible to be retried. Args: provider: job service provider job_id: a single job ID (string) to wait for poll_interval: integer seconds to wait between iterations retries: number of retries job_descriptor: job descriptor used to originally submit job Returns: Empty list if there was no error, a list containing an error message from a failed task otherwise.
codesearchnet
def display_required_items(msg_type): print(('Configure a profile for: ' + msg_type)) print('You will need the following information:') for (k, v) in CONFIG[msg_type]['settings'].items(): print((' * ' + v)) print('Authorization/credentials required:') for (k, v) in CONFIG[msg_type]['auth'].items(): print((' * ' + v))
Display the required items needed to configure a profile for the given message type. Args: :msg_type: (str) message type to create config entry.
codesearchnet
def path_get(p: tcod.path.AStar, idx: int) -> Tuple[int, int]: x = ffi.new("int *") y = ffi.new("int *") lib.TCOD_path_get(p._path_c, idx, x, y) return x[0], y[0]
Get a point on a path. Args: p (AStar): An AStar instance. idx (int): Should be in range: 0 <= inx < :any:`path_size`
juraj-google-style
def _get_elements(self, site): try: if isinstance(site.specie, Element): return [site.specie] return [Element(site.specie)] except: return site.species.elements
Get the list of elements for a Site Args: site (Site): Site to assess Returns: [Element]: List of elements
juraj-google-style
def proportions_from_distribution(table, label, sample_size, column_name='Random Sample'): proportions = sample_proportions(sample_size, table.column(label)) return table.with_column('Random Sample', proportions)
Adds a column named ``column_name`` containing the proportions of a random draw using the distribution in ``label``. This method uses ``np.random.multinomial`` to draw ``sample_size`` samples from the distribution in ``table.column(label)``, then divides by ``sample_size`` to create the resulting column of proportions. Args: ``table``: An instance of ``Table``. ``label``: Label of column in ``table``. This column must contain a distribution (the values must sum to 1). ``sample_size``: The size of the sample to draw from the distribution. ``column_name``: The name of the new column that contains the sampled proportions. Defaults to ``'Random Sample'``. Returns: A copy of ``table`` with a column ``column_name`` containing the sampled proportions. The proportions will sum to 1. Throws: ``ValueError``: If the ``label`` is not in the table, or if ``table.column(label)`` does not sum to 1.
codesearchnet
def FoldValue(self, value): if ((value is False) and (self._data_type_definition.false_value is not None)): return self._data_type_definition.false_value if ((value is True) and (self._data_type_definition.true_value is not None)): return self._data_type_definition.true_value raise ValueError('No matching True and False values')
Folds the data type into a value. Args: value (object): value. Returns: object: folded value. Raises: ValueError: if the data type definition cannot be folded into the value.
codesearchnet
def fetch(self, url): opener = self._urllib.build_opener() opener.addheaders = self._requestHeaders.items() response = opener.open(url) headers = response.info() raw = response.read() raw = raw.decode('utf8') if (not ('Content-Type' in headers)): raise OEmbedError('Missing mime-type in response') if ((headers['Content-Type'].find('application/xml') != (- 1)) or (headers['Content-Type'].find('text/xml') != (- 1))): response = OEmbedResponse.newFromXML(raw) elif ((headers['Content-Type'].find('application/json') != (- 1)) or (headers['Content-Type'].find('text/javascript') != (- 1)) or (headers['Content-Type'].find('text/json') != (- 1))): response = OEmbedResponse.newFromJSON(raw) else: raise OEmbedError(('Invalid mime-type in response - %s' % headers['Content-Type'])) return response
Fetch url and create a response object according to the mime-type. Args: url: The url to fetch data from Returns: OEmbedResponse object according to data fetched
codesearchnet
def aggr(array, op, initial_value, ty): weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array weld_template = weld_obj.weld_code = weld_template % { "array": array_var, "ty": ty, "op": op} return weld_obj
Computes the aggregate of elements in the array. Args: array (WeldObject / Numpy.ndarray): Input array to aggregate op (str): Op string used to aggregate the array (+ / *) initial_value (int): Initial value for aggregation ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
juraj-google-style
def _list_samples(self, predicate=None): cursor = self.database[self.sample_collection].find(predicate, {'_id':0, 'md5':1}) return [item['md5'] for item in cursor]
List all samples that meet the predicate or all if predicate is not specified. Args: predicate: Match samples against this predicate (or all if not specified) Returns: List of the md5s for the matching samples
juraj-google-style
def _process_sum_prod(self, func, **kwargs): axis = kwargs.get("axis", 0) min_count = kwargs.get("min_count", 0) def sum_prod_builder(df, **kwargs): return func(df, **kwargs) if min_count <= 1: return self._full_reduce(axis, sum_prod_builder) else: return self._full_axis_reduce(axis, sum_prod_builder)
Calculates the sum or product of the DataFrame. Args: func: Pandas func to apply to DataFrame. ignore_axis: Whether to ignore axis when raising TypeError Return: A new QueryCompiler object with sum or prod of the object.
juraj-google-style
def simple_layer_stack(include_encdec_attention, num_layers=6, d_ff=2048, num_heads=8, d_kv=128, dropout_rate=0.1): ret = [] for _ in xrange(num_layers): ret.append( transformer_layers.SelfAttention( num_heads=num_heads, key_value_size=d_kv, attention_kwargs={"dropout_rate": dropout_rate})) if include_encdec_attention: ret.append( transformer_layers.EncDecAttention( num_heads=num_heads, key_value_size=d_kv, attention_kwargs={"dropout_rate": dropout_rate})) ret.append( transformer_layers.DenseReluDense( hidden_size=d_ff, dropout_rate=dropout_rate)) return transformer.LayerStack(ret)
Create a layer stack. Args: include_encdec_attention: a boolean num_layers: an integer d_ff: an integer num_heads: an integer d_kv: an integer dropout_rate: a float Returns: a LayerStack
juraj-google-style
def invert(self) -> Rotation: if self._rot_mats is not None: return Rotation(rot_mats=invert_rot_mat(self._rot_mats), quats=None) elif self._quats is not None: return Rotation(rot_mats=None, quats=invert_quat(self._quats), normalize_quats=False) else: raise ValueError('Both rotations are None')
Returns the inverse of the current Rotation. Returns: The inverse of the current Rotation
github-repos
def get_vcf_entry(variant_obj, case_id=None): if (variant_obj['category'] == 'snv'): var_type = 'TYPE' else: var_type = 'SVTYPE' info_field = ';'.join([('END=' + str(variant_obj['end'])), ((var_type + '=') + variant_obj['sub_category'].upper())]) variant_string = '{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}'.format(variant_obj['chromosome'], variant_obj['position'], variant_obj['dbsnp_id'], variant_obj['reference'], variant_obj['alternative'], variant_obj['quality'], ';'.join(variant_obj['filters']), info_field) if case_id: variant_string += '\tGT' for sample in variant_obj['samples']: variant_string += ('\t' + sample['genotype_call']) return variant_string
Get vcf entry from variant object Args: variant_obj(dict) Returns: variant_string(str): string representing variant in vcf format
codesearchnet
def int(name, default=None, allow_none=False, fallback=None): value = read(name, default, allow_none, fallback=fallback) if isinstance(value, builtins.str): value = value.strip() if ((value is None) and allow_none): return None else: return builtins.int(value)
Get a string environment value or the default. Args: name: The environment variable name default: The default value to use if no environment variable is found allow_none: If the return value can be `None` (i.e. optional)
codesearchnet
def to_representation(self, obj): representation = {} for (name, field) in self.fields.items(): if field.write_only: continue attribute = self.get_attribute(obj, (field.source or name)) if (attribute is None): representation[name] = ([] if field.many else None) elif field.many: representation[name] = [field.to_representation(item) for item in attribute] else: representation[name] = field.to_representation(attribute) return representation
Convert given internal object instance into representation dict. Representation dict may be later serialized to the content-type of choice in the resource HTTP method handler. This loops over all fields and retrieves source keys/attributes as field values with respect to optional field sources and converts each one using ``field.to_representation()`` method. Args: obj (object): internal object that needs to be represented Returns: dict: representation dictionary
codesearchnet
def find_field_names(fields, model=DEFAULT_MODEL, app=DEFAULT_APP, score_cutoff=50, pad_with_none=False): fields = util.listify(fields) model = get_model(model, app) available_field_names = model._meta.get_all_field_names() matched_fields = [] for field_name in fields: match = fuzzy.extractOne(str(field_name), available_field_names) if (match and (match[1] is not None) and (match[1] >= score_cutoff)): matched_fields += [match[0]] elif pad_with_none: matched_fields += [None] return matched_fields
Use fuzzy string matching to find similar model field names without consulting a synonyms list Returns: list: A list model field names (strings) sorted from most likely to least likely. [] If no similar field names could be found in the indicated model [None] If none found and and `pad_with_none` set Examples: >>> find_field_names(['date_time', 'title_prefix', 'sales'], model='WikiItem') ['date', 'model', 'net_sales']
codesearchnet
def validate(item, namespace='accounts', version=2, context=None): if namespace == 'accounts': if version == 2: schema = v2.AccountSchema(strict=True, context=context) return schema.load(item).data elif version == 1: return v1.AccountSchema(strict=True).load(item).data raise InvalidSWAGDataException('Schema version is not supported. Version: {}'.format(version)) raise InvalidSWAGDataException('Namespace not supported. Namespace: {}'.format(namespace))
Validate item against version schema. Args: item: data object namespace: backend namespace version: schema version context: schema context object
juraj-google-style
def get_job(self): return Job(self.rest_client.make_request(self.job), self.rest_client)
Get the Streams job that owns this view. Returns: Job: Streams Job owning this view.
codesearchnet
class JavaJarExpansionService(object): def __init__(self, path_to_jar, extra_args=None, classpath=None, append_args=None): if extra_args and append_args: raise ValueError('Only one of extra_args or append_args may be provided') self.path_to_jar = path_to_jar self._extra_args = extra_args self._classpath = classpath or [] self._service_count = 0 self._append_args = append_args or [] def is_existing_service(self): return subprocess_server.is_service_endpoint(self.path_to_jar) @staticmethod def _expand_jars(jar): if glob.glob(jar): return glob.glob(jar) elif isinstance(jar, str) and (jar.startswith('http: return [subprocess_server.JavaJarServer.local_jar(jar)] else: try: group_id, artifact_id, version = jar.split(':') except ValueError: logging.warning('Unable to parse %s into group:artifact:version.', jar) return [jar] path = subprocess_server.JavaJarServer.local_jar(subprocess_server.JavaJarServer.path_to_maven_jar(artifact_id, group_id, version)) return [path] def _default_args(self): to_stage = ','.join([self.path_to_jar] + sum((JavaJarExpansionService._expand_jars(jar) for jar in self._classpath or []), [])) args = ['{{PORT}}', f'--filesToStage={to_stage}'] if subprocess_server.SubprocessServer._cache._live_owners: args.append('--alsoStartLoopbackWorker') return args def __enter__(self): if self._service_count == 0: self.path_to_jar = subprocess_server.JavaJarServer.local_jar(self.path_to_jar) if self._extra_args is None: self._extra_args = self._default_args() + self._append_args logging.info('Starting a JAR-based expansion service from JAR %s ' + ('and with classpath: %s' % self._classpath if self._classpath else ''), self.path_to_jar) classpath_urls = [subprocess_server.JavaJarServer.local_jar(path) for jar in self._classpath for path in JavaJarExpansionService._expand_jars(jar)] self._service_provider = subprocess_server.JavaJarServer(ExpansionAndArtifactRetrievalStub, self.path_to_jar, self._extra_args, classpath=classpath_urls) self._service = self._service_provider.__enter__() self._service_count += 1 return self._service def __exit__(self, *args): self._service_count -= 1 if self._service_count == 0: self._service_provider.__exit__(*args)
An expansion service based on an Java Jar file. This can be passed into an ExternalTransform as the expansion_service argument which will spawn a subprocess using this jar to expand the transform. Args: path_to_jar: the path to a locally available executable jar file to be used to start up the expansion service. extra_args: arguments to be provided when starting up the expansion service using the jar file. These arguments will replace the default arguments. classpath: Additional dependencies to be added to the classpath. append_args: arguments to be provided when starting up the expansion service using the jar file. These arguments will be appended to the default arguments.
github-repos
def assert_raises(expected_exception, extras=None, *args, **kwargs): context = _AssertRaisesContext(expected_exception, extras=extras) return context
Assert that an exception is raised when a function is called. If no exception is raised, test fail. If an exception is raised but not of the expected type, the exception is let through. This should only be used as a context manager: with assert_raises(Exception): func() Args: expected_exception: An exception class that is expected to be raised. extras: An optional field for extra information to be included in test result.
github-repos
def __init__(self, todo_tasklet, limit): self._todo_tasklet = todo_tasklet self._limit = limit self._queues = {} self._running = [] self._cache = {}
Init. Args: todo_tasklet: the tasklet that actually fires RPC and waits on a MultiRPC. It should take a list of (future, arg) pairs and an "options" as arguments. "options" are rpc options. limit: max number of items to batch for each distinct value of "options".
juraj-google-style
def render(self, program: moderngl.Program, mode=None, vertices=(- 1), first=0, instances=1): vao = self.instance(program) if (mode is None): mode = self.mode vao.render(mode, vertices=vertices, first=first, instances=instances)
Render the VAO. Args: program: The ``moderngl.Program`` Keyword Args: mode: Override the draw mode (``TRIANGLES`` etc) vertices (int): The number of vertices to transform first (int): The index of the first vertex to start with instances (int): The number of instances
codesearchnet
def MultiDelete(self, urns, token=None): urns = [rdfvalue.RDFURN(urn) for urn in urns] if token is None: token = data_store.default_token for urn in urns: if urn.Path() == "/": raise ValueError("Can't delete root URN. Please enter a valid URN") deletion_pool = DeletionPool(token=token) deletion_pool.MultiMarkForDeletion(urns) marked_root_urns = deletion_pool.root_urns_for_deletion marked_urns = deletion_pool.urns_for_deletion logging.debug(u"Found %d objects to remove when removing %s", len(marked_urns), urns) logging.debug(u"Removing %d root objects when removing %s: %s", len(marked_root_urns), urns, marked_root_urns) pool = data_store.DB.GetMutationPool() for root in marked_root_urns: self._DeleteChildFromIndex(root, mutation_pool=pool) for urn_to_delete in marked_urns: try: self.intermediate_cache.ExpireObject(urn_to_delete.Path()) except KeyError: pass pool.DeleteSubjects(marked_urns) pool.Flush() self.Flush() logging.debug("Removed %d objects", len(marked_urns))
Drop all the information about given objects. DANGEROUS! This recursively deletes all objects contained within the specified URN. Args: urns: Urns of objects to remove. token: The Security Token to use for opening this item. Raises: ValueError: If one of the urns is too short. This is a safety check to ensure the root is not removed.
juraj-google-style
def get(cls, issue_id): res = Issue.get(issue_id, IssueType.get(cls.issue_type).issue_type_id) return cls(res) if res else None
Returns the class object identified by `issue_id` Args: issue_id (str): Unique EC2 Instance ID to load from database Returns: EC2 Instance object if found, else None
juraj-google-style
def payments(self, virtual_account_id, data={}, **kwargs): url = "{}/{}/payments".format(self.base_url, virtual_account_id) return self.get_url(url, data, **kwargs)
Fetch Payment for Virtual Account Id Args: virtual_account_id : Id for which Virtual Account objects has to be retrieved Returns: Payment dict for given Virtual Account Id
juraj-google-style
def convert_file_size_to_int(size: Union[int, str]): if isinstance(size, int): return size if size.upper().endswith('GIB'): return int(size[:-3]) * 2 ** 30 if size.upper().endswith('MIB'): return int(size[:-3]) * 2 ** 20 if size.upper().endswith('KIB'): return int(size[:-3]) * 2 ** 10 if size.upper().endswith('GB'): int_size = int(size[:-2]) * 10 ** 9 return int_size if size.upper().endswith('MB'): int_size = int(size[:-2]) * 10 ** 6 return int_size if size.upper().endswith('KB'): int_size = int(size[:-2]) * 10 ** 3 return int_size raise ValueError("`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.")
Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). Args: size (`int` or `str`): The size to convert. Will be directly returned if an `int`. Example: ```py >>> convert_file_size_to_int("1MiB") 1048576 ```
github-repos
def _ProcessUnknownMessages(message, encoded_message): if (not encoded_message): return message decoded_message = json.loads(six.ensure_str(encoded_message)) message_fields = ([x.name for x in message.all_fields()] + list(message.all_unrecognized_fields())) missing_fields = [x for x in decoded_message.keys() if (x not in message_fields)] for field_name in missing_fields: message.set_unrecognized_field(field_name, decoded_message[field_name], messages.Variant.STRING) return message
Store any remaining unknown fields as strings. ProtoRPC currently ignores unknown values for which no type can be determined (and logs a "No variant found" message). For the purposes of reserializing, this is quite harmful (since it throws away information). Here we simply add those as unknown fields of type string (so that they can easily be reserialized). Args: message: Proto message we've decoded thus far. encoded_message: JSON string we're decoding. Returns: message, with any remaining unrecognized fields saved.
codesearchnet
def GetColocationGroups(self): return tf_item.TF_GetColocationGroups(self.tf_item)
Return a list of hard colocation constraints. All the nodes in a colocation tuple must be placed on the same device for the model to work. Returns: A list of colocation tuples.
github-repos
def market_normal(self, session, after_open, before_close) -> Session: logger = logs.get_logger(self.market_normal) if session not in self.exch: return SessNA ss = self.exch[session] s_time = shift_time(ss[0], int(after_open) + 1) e_time = shift_time(ss[-1], -int(before_close)) request_cross = pd.Timestamp(s_time) >= pd.Timestamp(e_time) session_cross = pd.Timestamp(ss[0]) >= pd.Timestamp(ss[1]) if request_cross and (not session_cross): logger.warning(f'end time {e_time} is earlier than {s_time} ...') return SessNA return Session(s_time, e_time)
Time intervals between market Args: session: [allday, day, am, pm, night] after_open: mins after open before_close: mins before close Returns: Session of start_time and end_time
juraj-google-style
def Map(fn, *args, **kwargs): if not callable(fn): raise TypeError('Map can be used only with callable objects. Received %r instead.' % fn) from apache_beam.transforms.util import fn_takes_side_inputs if fn_takes_side_inputs(fn): wrapper = lambda x, *args, **kwargs: [fn(x, *args, **kwargs)] else: wrapper = lambda x: [fn(x)] label = 'Map(%s)' % ptransform.label_from_callable(fn) if hasattr(fn, '__name__'): wrapper.__name__ = fn.__name__ type_hints = get_type_hints(fn).with_defaults(typehints.decorators.IOTypeHints.from_callable(fn)) if type_hints.input_types is not None: wrapper = with_input_types(*type_hints.input_types[0], **type_hints.input_types[1])(wrapper) output_hint = type_hints.simple_output_type(label) if output_hint: wrapper = with_output_types(typehints.Iterable[_strip_output_annotations(output_hint)])(wrapper) wrapper._argspec_fn = fn pardo = FlatMap(wrapper, *args, **kwargs) pardo.label = label return pardo
:func:`Map` is like :func:`FlatMap` except its callable returns only a single element. Args: fn (callable): a callable object. *args: positional arguments passed to the transform callable. **kwargs: keyword arguments passed to the transform callable. Returns: ~apache_beam.pvalue.PCollection: A :class:`~apache_beam.pvalue.PCollection` containing the :func:`Map` outputs. Raises: TypeError: If the **fn** passed as argument is not a callable. Typical error is to pass a :class:`DoFn` instance which is supported only for :class:`ParDo`.
github-repos
def for_new_graph(*args, **kwargs): graph = tf.Graph() with graph.as_default(): return for_default_graph(*args, **kwargs)
Creates a Bookkeeper for a new graph. You must use `m.g.as_default()` to put the graph in scope: m = Bookkeeper.for_new_graph() with m.g.as_default(): ... Args: *args: Arguments to pass into Bookkeeper's constructor. **kwargs: Arguments to pass into Bookkeeper's constructor. Returns: A new Bookkeeper.
codesearchnet
def MROMerge(input_seqs): seqs = [Dedup(s) for s in input_seqs] try: return MergeSequences(seqs) except ValueError as e: raise MROError(input_seqs) from e
Merge a sequence of MROs into a single resulting MRO. Args: input_seqs: A sequence of MRO sequences. Returns: A single resulting MRO. Raises: MROError: If we discovered an illegal inheritance.
github-repos
def to_yaml(self, **kwargs): raise RuntimeError('Method `model.to_yaml()` has been removed due to security risk of arbitrary code execution. Please use `model.to_json()` instead.')
Returns a yaml string containing the network configuration. Note: Since TF 2.6, this method is no longer supported and will raise a RuntimeError. To load a network from a yaml save file, use `keras.models.model_from_yaml(yaml_string, custom_objects={})`. `custom_objects` should be a dictionary mapping the names of custom losses / layers / etc to the corresponding functions / classes. Args: **kwargs: Additional keyword arguments to be passed to `yaml.dump()`. Returns: A YAML string. Raises: RuntimeError: announces that the method poses a security risk
github-repos
def unravel_sections(section_data): sections = [] for (type, subsection_list) in section_data.items(): for section in subsection_list: section['sectionType'] = type sections.append(section) return sections
Unravels section type dictionary into flat list of sections with section type set as an attribute. Args: section_data(dict): Data return from py:method::get_sections Returns: list: Flat list of sections with ``sectionType`` set to type (i.e. recitation, lecture, etc)
codesearchnet
def create_all(cls, list_of_kwargs): try: return cls.add_all([ cls.new(**kwargs) if kwargs is not None else None for kwargs in list_of_kwargs]) except: cls.session.rollback() raise
Batch method for creating a list of instances Args: list_of_kwargs(list of dicts): hereA list of dicts where each dict denotes the keyword args that you would pass to the create method separately Examples: >>> Customer.create_all([ ... {'name': 'Vicky', 'age': 34, 'user_id': 1}, ... {'name': 'Ron', 'age': 40, 'user_id': 1, 'gender': 'Male'}])
juraj-google-style
def create_index(self, model, waiting_models): bucket_name = model._get_bucket_name() bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE) index_name = ('%s_%s' % (settings.DEFAULT_BUCKET_TYPE, bucket_name)) bucket = bucket_type.bucket(bucket_name) try: client.get_search_index(index_name) if (not (bucket.get_property('search_index') == index_name)): bucket.set_property('search_index', index_name) print(('+ %s (%s) search index is created.' % (model.__name__, index_name))) except RiakError: try: client.create_search_index(index_name, index_name, self.n_val) bucket.set_property('search_index', index_name) print(('+ %s (%s) search index is created.' % (model.__name__, index_name))) except RiakError: print(('+ %s (%s) search index checking operation is taken to queue.' % (model.__name__, index_name))) waiting_models.append(model)
Creates search indexes. Args: model: model to execute waiting_models: if riak can't return response immediately, model is taken to queue. After first execution session, method is executed with waiting models and controlled. And be ensured that all given models are executed properly. Returns:
codesearchnet
def create_nsg_rule(access_token, subscription_id, resource_group, nsg_name, nsg_rule_name, description, protocol='Tcp', source_range='*', destination_range='*', source_prefix='*', destination_prefix='*', access='Allow', priority=100, direction='Inbound'): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/networkSecurityGroups/', nsg_name, '/securityRules/', nsg_rule_name, '?api-version=', NETWORK_API]) properties = {'description': description} properties['protocol'] = protocol properties['sourcePortRange'] = source_range properties['destinationPortRange'] = destination_range properties['sourceAddressPrefix'] = source_prefix properties['destinationAddressPrefix'] = destination_prefix properties['access'] = access properties['priority'] = priority properties['direction'] = direction ip_body = {'properties': properties} body = json.dumps(ip_body) return do_put(endpoint, body, access_token)
Create network security group rule. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. nsg_name (str): Name of the Network Security Group. nsg_rule_name (str): Name of the new rule. description (str): Description. protocol (str): Optional protocol. Default Tcp. source_range (str): Optional source IP range. Default '*'. destination_range (str): Destination IP range. Default *'. source_prefix (str): Source DNS prefix. Default '*'. destination_prefix (str): Destination prefix. Default '*'. access (str): Allow or deny rule. Default Allow. priority: Relative priority. Default 100. direction: Inbound or Outbound. Default Inbound. Returns: HTTP response. NSG JSON rule body.
codesearchnet
def register(self, token, regexp): self._tokens.append((token, re.compile(regexp)))
Register a token. Args: token (Token): the token class to register regexp (str): the regexp for that token
codesearchnet
def get_pattern_additional_cycles(self, patternnumber): _checkPatternNumber(patternnumber) address = _calculateRegisterAddress('cycles', patternnumber) return self.read_register(address)
Get the number of additional cycles for a given pattern. Args: patternnumber (integer): 0-7 Returns: The number of additional cycles (int).
juraj-google-style
def routerify(obj): router = Router() for info in get_routing_attributes(obj): router.add_route(*info) obj.__growler_router = router return router
Scan through attributes of object parameter looking for any which match a route signature. A router will be created and added to the object with parameter. Args: obj (object): The object (with attributes) from which to setup a router Returns: Router: The router created from attributes in the object.
juraj-google-style
def _tf_predict(model_dir, input_csvlines): with tf.Graph().as_default(), tf.Session() as sess: (input_alias_map, output_alias_map) = _tf_load_model(sess, model_dir) csv_tensor_name = list(input_alias_map.values())[0] results = sess.run(fetches=output_alias_map, feed_dict={csv_tensor_name: input_csvlines}) if (len(input_csvlines) == 1): for (k, v) in six.iteritems(results): if (not isinstance(v, (list, np.ndarray))): results[k] = [v] for (k, v) in six.iteritems(results): if any((isinstance(x, bytes) for x in v)): results[k] = [x.decode('utf-8') for x in v] return results
Prediction with a tf savedmodel. Args: model_dir: directory that contains a saved model input_csvlines: list of csv strings Returns: Dict in the form tensor_name:prediction_list. Note that the value is always a list, even if there was only 1 row in input_csvlines.
codesearchnet
def prune(self, cutoff: int = 2): for node_pair in tqdm(list(permutations(self.nodes(), 2))): paths = [ list(pairwise(path)) for path in nx.all_simple_paths(self, *node_pair, cutoff) ] if len(paths) > 1: for path in paths: if len(path) == 1: self.delete_edge(*path[0]) if any(self.degree(n) == 0 for n in path[0]): self.add_edge(*path[0]) break
Prunes the CAG by removing redundant paths. If there are multiple (directed) paths between two nodes, this function removes all but the longest paths. Subsequently, it restricts the graph to the largest connected component. Args: cutoff: The maximum path length to consider for finding redundant paths. Higher values of this parameter correspond to more aggressive pruning.
juraj-google-style
def str_to_mac(mac_string): sp = mac_string.split(':') mac_string = ''.join(sp) return binascii.unhexlify(mac_string)
Convert a readable string to a MAC address Args: mac_string (str): a readable string (e.g. '01:02:03:04:05:06') Returns: str: a MAC address in hex form
codesearchnet
def add(self, *dic): dicList = list(flatten(dic)) for d in dicList: di = [] for k in d: di.append(Pair(k, IntegerSingle(d[k]))) dictSingle = DictSingle(di) self._add([dictSingle], self.l)
add a config to StartCalendarInterval. Args: *dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.
codesearchnet
def hill_climb(nsteps, start_node, get_next_node): outputs = [] best_score = (- float('inf')) for step in range(nsteps): (next_node, score, output) = get_next_node(copy.deepcopy(start_node)) if (score > best_score): start_node = copy.deepcopy(next_node) best_score = score outputs.append(output) return (start_node, best_score, outputs)
Modular hill climbing algorithm. Example: >>> def get_next_node(node): ... a, b = random.sample(range(len(node)), 2) ... node[a], node[b] = node[b], node[a] ... plaintext = decrypt(node, ciphertext) ... score = lantern.score(plaintext, *fitness_functions) ... return node, score, Decryption(plaintext, ''.join(node), score) >>> final_node, best_score, outputs = hill_climb(10, "ABC", get_next_node) Args: nsteps (int): The number of neighbours to visit start_node: The starting node get_next_node (function): Function to return the next node the score of the current node and any optional output from the current node Returns: The highest node found, the score of this node and the outputs from the best nodes along the way
codesearchnet
def get_input(self, name, ds): columns = self.inputs.get(name) df = ds.get_dataframe() for column in columns: if column not in df.columns: df[column] = self.defaults.get(column) return df[columns]
Retrieves the content of an input given a DataSource. The input acts like a filter over the outputs of the DataSource. Args: name (str): The name of the input. ds (openflow.DataSource): The DataSource that will feed the data. Returns: pandas.DataFrame: The content of the input.
juraj-google-style
def print_projects(projects=None): grouped_by = {} if not projects: print( "Your selection didn't include any projects for this experiment.") return for name in projects: prj = projects[name] if prj.GROUP not in grouped_by: grouped_by[prj.GROUP] = [] grouped_by[prj.GROUP].append("{name}/{group}".format( name=prj.NAME, group=prj.GROUP)) for name in grouped_by: print("group: {0}".format(name)) group_projects = sorted(grouped_by[name]) for prj in group_projects: prj_cls = projects[prj] version_str = None if hasattr(prj_cls, 'versions'): version_str = ", ".join(prj_cls.versions()) project_id = "{0}/{1}".format(prj_cls.NAME, prj_cls.GROUP) project_str = \ " name: {id:<32} version: {version:<24} source: {src}".format( id=str(project_id), version=str(prj_cls.VERSION), src=str(prj_cls.SRC_FILE)) print(project_str) if prj_cls.__doc__: docstr = prj_cls.__doc__.strip("\n ") print(" description: {desc}".format(desc=docstr)) if version_str: print(" versions: {versions}".format(versions=version_str)) print()
Print a list of projects registered for that experiment. Args: exp: The experiment to print all projects for.
juraj-google-style
def _is_of_type(self, path, st_flag, follow_symlinks=True): path = make_string_path(path) if path is None: raise TypeError try: obj = self.resolve(path, follow_symlinks) if obj: self.raise_for_filepath_ending_with_separator( path, obj, macos_handling=not follow_symlinks) return S_IFMT(obj.st_mode) == st_flag except (IOError, OSError): return False return False
Helper function to implement isdir(), islink(), etc. See the stat(2) man page for valid stat.S_I* flag values Args: path: Path to file to stat and test st_flag: The stat.S_I* flag checked for the file's st_mode Returns: (boolean) `True` if the st_flag is set in path's st_mode. Raises: TypeError: if path is None
juraj-google-style
def apply_fixup_array(bin_view, fx_offset, fx_count, entry_size): fx_array = bin_view[fx_offset:fx_offset+(2 * fx_count)] fx_len = fx_count - 1 sector_size = int(entry_size / fx_len) index = 1 position = (sector_size * index) - 2 while (position <= entry_size): if bin_view[position:position+2].tobytes() == fx_array[:2].tobytes(): bin_view[position:position+2] = fx_array[index * 2:(index * 2) + 2] else: _MOD_LOGGER.error("Error applying the fixup array") raise FixUpError(f"Signature {fx_array[:2].tobytes()} does not match {bin_view[position:position+2].tobytes()} at offset {position}.") index += 1 position = (sector_size * index) - 2 _MOD_LOGGER.info("Fix up array applied successfully.")
This function reads the fixup array and apply the correct values to the underlying binary stream. This function changes the bin_view in memory. Args: bin_view (memoryview of bytearray) - The binary stream fx_offset (int) - Offset to the fixup array fx_count (int) - Number of elements in the fixup array entry_size (int) - Size of the MFT entry
juraj-google-style
def build_exon(exon_info, build='37'): try: chrom = exon_info['chrom'] except KeyError: raise KeyError('Exons has to have a chromosome') try: start = int(exon_info['start']) except KeyError: raise KeyError('Exon has to have a start') except TypeError: raise TypeError('Exon start has to be integer') try: end = int(exon_info['end']) except KeyError: raise KeyError('Exon has to have a end') except TypeError: raise TypeError('Exon end has to be integer') try: rank = int(exon_info['rank']) except KeyError: raise KeyError('Exon has to have a rank') except TypeError: raise TypeError('Exon rank has to be integer') try: exon_id = exon_info['exon_id'] except KeyError: raise KeyError('Exons has to have a id') try: transcript = exon_info['transcript'] except KeyError: raise KeyError('Exons has to have a transcript') try: hgnc_id = int(exon_info['hgnc_id']) except KeyError: raise KeyError('Exons has to have a hgnc_id') except TypeError: raise TypeError('hgnc_id has to be integer') exon_obj = Exon(exon_id=exon_id, chrom=chrom, start=start, end=end, rank=rank, transcript=transcript, hgnc_id=hgnc_id, build=build) return exon_obj
Build a Exon object object Args: exon_info(dict): Exon information Returns: exon_obj(Exon) "exon_id": str, # str(chrom-start-end) "chrom": str, "start": int, "end": int, "transcript": str, # ENST ID "hgnc_id": int, # HGNC_id "rank": int, # Order of exon in transcript "build": str, # Genome build
codesearchnet
def to_representation(self, instance): updated_course = copy.deepcopy(instance) enterprise_customer_catalog = self.context['enterprise_customer_catalog'] updated_course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url( updated_course['key'] ) for course_run in updated_course['course_runs']: course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url( course_run['key'] ) return updated_course
Return the updated course data dictionary. Arguments: instance (dict): The course data. Returns: dict: The updated course data.
juraj-google-style
def FormatType(self, level_name, class_problist): class_problist.sort() output = [] for classname, problist in class_problist: output.append('<h4 class="issueHeader"><a name="%s%s">%s</a></h4><ul>\n' % (level_name, classname, UnCamelCase(classname))) for e in problist.problems: self.FormatException(e, output) if problist.dropped_count: output.append('<li>and %d more of this type.' % (problist.dropped_count)) output.append('</ul>\n') return ''.join(output)
Write the HTML dumping all problems of one type. Args: level_name: string such as "Error" or "Warning" class_problist: sequence of tuples (class name, BoundedProblemList object) Returns: HTML in a string
juraj-google-style
def restore(self, restored_tensors, unused_restored_shapes): with ops.control_dependencies([self._create_op]): return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self.resource_handle, stamp_token=restored_tensors[0], tree_ensemble_serialized=restored_tensors[1])
Restores the associated tree ensemble from 'restored_tensors'. Args: restored_tensors: the tensors that were loaded from a checkpoint. unused_restored_shapes: the shapes this object should conform to after restore. Not meaningful for trees. Returns: The operation that restores the state of the tree ensemble variable.
github-repos
def evaluate_partition(self, direction, mechanism, purview, partition, repertoire=None): if (repertoire is None): repertoire = self.repertoire(direction, mechanism, purview) partitioned_repertoire = self.partitioned_repertoire(direction, partition) phi = repertoire_distance(direction, repertoire, partitioned_repertoire) return (phi, partitioned_repertoire)
Return the |small_phi| of a mechanism over a purview for the given partition. Args: direction (Direction): |CAUSE| or |EFFECT|. mechanism (tuple[int]): The nodes in the mechanism. purview (tuple[int]): The nodes in the purview. partition (Bipartition): The partition to evaluate. Keyword Args: repertoire (np.array): The unpartitioned repertoire. If not supplied, it will be computed. Returns: tuple[int, np.ndarray]: The distance between the unpartitioned and partitioned repertoires, and the partitioned repertoire.
codesearchnet
def merge_leading_dims(array_or_tensor, n_dims=2): tensor = tf.convert_to_tensor(array_or_tensor) tensor_shape_static = tensor.get_shape() if (tensor_shape_static.dims is None): raise ValueError("Can't merge leading dimensions of tensor of unknown rank.") tensor_shape_list = tensor_shape_static.as_list() if (n_dims > len(tensor_shape_list)): return array_or_tensor if tensor_shape_static.is_fully_defined(): new_shape = ([np.prod(tensor_shape_list[:n_dims])] + tensor_shape_list[n_dims:]) return tf.reshape(tensor, new_shape) tensor_shape = tf.shape(tensor) new_first_dim = tf.reduce_prod(tensor_shape[:n_dims], keepdims=True) other_dims = tensor_shape[n_dims:] new_size = tf.concat([new_first_dim, other_dims], 0) result = tf.reshape(tensor, new_size) if all(((value is not None) for value in tensor_shape_list[:n_dims])): merged_leading_size = np.prod(tensor_shape_list[:n_dims]) else: merged_leading_size = None result.set_shape(([merged_leading_size] + tensor_shape_list[n_dims:])) return result
Merge the first dimensions of a tensor. Args: array_or_tensor: Tensor to have its first dimensions merged. Can also be an array or numerical value, which will be converted to a tensor for batch application, if needed. n_dims: Number of dimensions to merge. Returns: Either the input value converted to a Tensor, with the requested dimensions merged, or the unmodified input value if the input has less than `n_dims` dimensions. Raises: ValueError: If the rank of `array_or_tensor` is not well-defined.
codesearchnet
def VerifyStructure(self, parser_mediator, lines): match_generator = self._VERIFICATION_GRAMMAR.scanString(lines, maxMatches=1) return bool(list(match_generator))
Verifies that this is a bash history file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. lines (str): one or more lines from the text file. Returns: bool: True if this is the correct parser, False otherwise.
juraj-google-style
def add(self, *value): flattenedValueList = list(flatten(value)) return self._add(flattenedValueList, self.value)
convert value and add to self.value Subclass must overwrite this method. Subclass are responsible of creating whatever single instance it need from its ``add(*value)`` and call ``_add()`` to add them to ``self.value`` Args: *value: the value to be added
juraj-google-style