code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_or_create_hosted_zone(client, zone_name): zone_id = get_hosted_zone_by_name(client, zone_name) if zone_id: return zone_id logger.debug('Zone %s does not exist, creating.', zone_name) reference = uuid.uuid4().hex response = client.create_hosted_zone(Name=zone_name, CallerReference=reference) return parse_zone_id(response['HostedZone']['Id'])
Get the Id of an existing zone, or create it. Args: client (:class:`botocore.client.Route53`): The connection used to interact with Route53's API. zone_name (string): The name of the DNS hosted zone to create. Returns: string: The Id of the Hosted Zone.
codesearchnet
def get_end(pos, alt, category, snvend=None, svend=None, svlen=None): end = pos if (category in ('snv', 'indel', 'cancer')): end = snvend elif (category == 'sv'): end = svend if (svend == pos): if svlen: end = (pos + svlen) if (':' in alt): match = BND_ALT_PATTERN.match(alt) if match: end = int(match.group(2)) return end
Return the end coordinate for a variant Args: pos(int) alt(str) category(str) snvend(str) svend(int) svlen(int) Returns: end(int)
codesearchnet
def __eq__(self, other): return isinstance(other, ArgumentPlaceholder)
Tests for equality of two placeholder objects. Args: other: Another placeholder object to compare to. This method is used only for test code. All placeholder objects are equal to each other.
github-repos
def __init__(self, endpoint_name, sagemaker_session=None): super(ChainerPredictor, self).__init__(endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer)
Initialize an ``ChainerPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one using the default AWS configuration chain.
juraj-google-style
def GetStorageMediaImageTypeIndicators(cls, path_spec, resolver_context=None): if ((cls._storage_media_image_remainder_list is None) or (cls._storage_media_image_store is None)): (specification_store, remainder_list) = cls._GetSpecificationStore(definitions.FORMAT_CATEGORY_STORAGE_MEDIA_IMAGE) cls._storage_media_image_remainder_list = remainder_list cls._storage_media_image_store = specification_store if (cls._storage_media_image_scanner is None): cls._storage_media_image_scanner = cls._GetSignatureScanner(cls._storage_media_image_store) return cls._GetTypeIndicators(cls._storage_media_image_scanner, cls._storage_media_image_store, cls._storage_media_image_remainder_list, path_spec, resolver_context=resolver_context)
Determines if a file contains a supported storage media image types. Args: path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators.
codesearchnet
def offset(self, num_to_skip): return self.__class__( self._parent, projection=self._projection, field_filters=self._field_filters, orders=self._orders, limit=self._limit, offset=num_to_skip, start_at=self._start_at, end_at=self._end_at, )
Skip to an offset in a query. If the current query already has specified an offset, this will overwrite it. Args: num_to_skip (int): The number of results to skip at the beginning of query results. (Must be non-negative.) Returns: ~.firestore_v1beta1.query.Query: An offset query. Acts as a copy of the current query, modified with the newly added "offset" field.
juraj-google-style
def _ParseRecord(self, parser_mediator, text_file_object): try: title = text_file_object.readline() except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'unable to read and decode title') return False if not title: return False try: url = text_file_object.readline() except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'unable to read and decode url') return False try: timestamp = text_file_object.readline() except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'unable to read and decode timestamp') return False try: popularity_index = text_file_object.readline() except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'unable to read and decode popularity index') return False event_data = OperaGlobalHistoryEventData() event_data.url = url.strip() title = title.strip() if title != event_data.url: event_data.title = title popularity_index = popularity_index.strip() try: event_data.popularity_index = int(popularity_index, 10) except ValueError: parser_mediator.ProduceExtractionWarning( 'unable to convert popularity index: {0:s}'.format(popularity_index)) if event_data.popularity_index < 0: event_data.description = 'First and Only Visit' else: event_data.description = 'Last Visit' timestamp = timestamp.strip() try: timestamp = int(timestamp, 10) except ValueError: parser_mediator.ProduceExtractionWarning( 'unable to convert timestamp: {0:s}'.format(timestamp)) timestamp = None if timestamp is None: date_time = dfdatetime_semantic_time.SemanticTime('Invalid') else: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data) return True
Parses an Opera global history record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. text_file_object (dfvfs.TextFile): text file. Returns: bool: True if the record was successfully parsed.
juraj-google-style
def create_lb_with_nat_pool(access_token, subscription_id, resource_group, lb_name, public_ip_id, fe_start_port, fe_end_port, backend_port, location): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/loadBalancers/', lb_name, '?api-version=', NETWORK_API]) lb_body = {'location': location} frontendipcconfig = {'name': 'LoadBalancerFrontEnd'} fipc_properties = {'publicIPAddress': {'id': public_ip_id}} frontendipcconfig['properties'] = fipc_properties properties = {'frontendIPConfigurations': [frontendipcconfig]} properties['backendAddressPools'] = [{'name': 'bepool'}] inbound_natpool = {'name': 'natpool'} lbfe_id = (((((('/subscriptions/' + subscription_id) + '/resourceGroups/') + resource_group) + '/providers/Microsoft.Network/loadBalancers/') + lb_name) + '/frontendIPConfigurations/LoadBalancerFrontEnd') ibnp_properties = {'frontendIPConfiguration': {'id': lbfe_id}} ibnp_properties['protocol'] = 'tcp' ibnp_properties['frontendPortRangeStart'] = fe_start_port ibnp_properties['frontendPortRangeEnd'] = fe_end_port ibnp_properties['backendPort'] = backend_port inbound_natpool['properties'] = ibnp_properties properties['inboundNatPools'] = [inbound_natpool] lb_body['properties'] = properties body = json.dumps(lb_body) return do_put(endpoint, body, access_token)
Create a load balancer with inbound NAT pools. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. lb_name (str): Name of the new load balancer. public_ip_id (str): Public IP address resource id. fe_start_port (int): Start of front-end port range. fe_end_port (int): End of front-end port range. backend_port (int): Back end port for VMs. location (str): Azure data center location. E.g. westus. Returns: HTTP response. Load Balancer JSON body.
codesearchnet
def on_train_end(self, logs=None):
Called at the end of training. Subclasses should override for any actions to run. Args: logs: Dict. Currently the output of the last call to `on_epoch_end()` is passed to this argument for this method but that may change in the future.
github-repos
def get_nondebug_quantized_model(self) -> bytes: return self._get_quantized_model(is_debug=False)
Returns a non-instrumented quantized model. Convert the quantized model with the initialized converter and return bytes for nondebug model. The model will not be instrumented with numeric verification operations. Returns: Model bytes corresponding to the model. Raises: ValueError: if converter is not passed to the debugger.
github-repos
def __format__(self, format_spec=None): if not format_spec: return str(self) elif format_spec == 'url': return self.to_url() elif format_spec.startswith('url:'): parts = format_spec.split(':')[1:] site = parts[0] if len(parts) > 1: country = parts[1] else: country = 'us' return self.to_url(site, country) elif format_spec == 'urn': return self.to_urn() else: raise ValueError('Unknown format_spec %r' % format_spec)
Extended pretty printing for ISBN strings. Args: format_spec (str): Extended format to use Returns: ``str``: Human readable string representation of ``Isbn`` object Raises: ValueError: Unknown value for ``format_spec``
juraj-google-style
def CreateSitelinkFeedItem(feed_items, feed_item_id): site_link_from_feed = feed_items[feed_item_id] site_link_feed_item = {'sitelinkText': site_link_from_feed['text'], 'sitelinkLine2': site_link_from_feed['line2'], 'sitelinkLine3': site_link_from_feed['line3']} if (('finalUrls' in site_link_from_feed) and site_link_from_feed['finalUrls']): site_link_feed_item['sitelinkFinalUrls'] = {'urls': site_link_from_feed['finalUrls']} if ('finalMobileUrls' in site_link_from_feed): site_link_feed_item['sitelinkFinalMobileUrls'] = {'urls': site_link_from_feed['finalMobileUrls']} site_link_feed_item['sitelinkTrackingUrlTemplate'] = site_link_from_feed['trackingUrlTemplate'] else: site_link_feed_item['sitelinkUrl'] = site_link_from_feed['url'] return site_link_feed_item
Creates a Sitelink Feed Item. Args: feed_items: a list of all Feed Items. feed_item_id: the Id of a specific Feed Item for which a Sitelink Feed Item should be created. Returns: The new Sitelink Feed Item.
codesearchnet
async def is_try_or_pull_request(self): tasks = [asyncio.ensure_future(link.is_try_or_pull_request()) for link in self.links] tasks.insert(0, asyncio.ensure_future(is_try_or_pull_request(self.context, self.task))) conditions = (await raise_future_exceptions(tasks)) return any(conditions)
Determine if any task in the chain is a try task. Returns: bool: True if a task is a try task.
codesearchnet
def memory_write8(self, addr, data, zone=None): return self.memory_write(addr, data, zone, 8)
Writes bytes to memory of a target system. Args: self (JLink): the ``JLink`` instance addr (int): start address to write to data (list): list of bytes to write zone (str): optional memory zone to access Returns: Number of bytes written to target. Raises: JLinkException: on memory access error.
juraj-google-style
def resize(self, image: np.ndarray, size: Dict[str, int], anti_aliasing: bool=True, anti_aliasing_sigma=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: requires_backends(self, 'scipy') output_shape = (size['height'], size['width']) image = to_channel_dimension_format(image, ChannelDimension.LAST) image, output_shape = _preprocess_resize_output_shape(image, output_shape) input_shape = image.shape factors = np.divide(input_shape, output_shape) ndi_mode = 'mirror' cval = 0 order = 1 if anti_aliasing: if anti_aliasing_sigma is None: anti_aliasing_sigma = np.maximum(0, (factors - 1) / 2) else: anti_aliasing_sigma = np.atleast_1d(anti_aliasing_sigma) * np.ones_like(factors) if np.any(anti_aliasing_sigma < 0): raise ValueError('Anti-aliasing standard deviation must be greater than or equal to zero') elif np.any((anti_aliasing_sigma > 0) & (factors <= 1)): warnings.warn('Anti-aliasing standard deviation greater than zero but not down-sampling along all axes') filtered = ndi.gaussian_filter(image, anti_aliasing_sigma, cval=cval, mode=ndi_mode) else: filtered = image zoom_factors = [1 / f for f in factors] out = ndi.zoom(filtered, zoom_factors, order=order, mode=ndi_mode, cval=cval, grid_mode=True) image = _clip_warp_output(image, out) image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST) image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image return image
Resize an image as per the original implementation. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary containing the height and width to resize the image to. anti_aliasing (`bool`, *optional*, defaults to `True`): Whether to apply anti-aliasing when downsampling the image. anti_aliasing_sigma (`float`, *optional*, defaults to `None`): Standard deviation for Gaussian kernel when downsampling the image. If `None`, it will be calculated automatically. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image.
github-repos
def _detect(self): results = [] for c in self.slither.contracts_derived: ret = self.detect_uninitialized(c) for (variable, functions) in ret: info = '{}.{} ({}) is never initialized. It is used in:\n' info = info.format(variable.contract.name, variable.name, variable.source_mapping_str) for f in functions: info += '\t- {} ({})\n'.format(f.name, f.source_mapping_str) source = [variable.source_mapping] source += [f.source_mapping for f in functions] json = self.generate_json_result(info) self.add_variable_to_json(variable, json) self.add_functions_to_json(functions, json) results.append(json) return results
Detect uninitialized state variables Recursively visit the calls Returns: dict: [contract name] = set(state variable uninitialized)
codesearchnet
def add_children(self, children): self._children += [c for c in children if c not in self._children]
Adds new children nodes after filtering for duplicates Args: children (list): list of OmniTree nodes to add as children
juraj-google-style
def fail_steamid(channel): gui = ui_embed.UI(channel, "That SteamID doesn't exist.", 'You can get your SteamID by going to your profile page and looking at the url, or you can set a custom ID by going to edit profile on your profile page.', modulename=modulename, colour=35071) return gui
Creates an embed UI for invalid SteamIDs Args: channel (discord.Channel): The Discord channel to bind the embed to Returns: ui (ui_embed.UI): The embed UI object
codesearchnet
def __init__(self, power=0., validate_args=False, name="power_transform"): self._graph_parents = [] self._name = name self._validate_args = validate_args with self._name_scope("init"): power = tf.get_static_value( tf.convert_to_tensor(value=power, name="power")) if power is None or power < 0: raise ValueError("`power` must be a non-negative TF constant.") self._power = power super(PowerTransform, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name)
Instantiates the `PowerTransform` bijector. Args: power: Python `float` scalar indicating the transform power, i.e., `Y = g(X) = (1 + X * c)**(1 / c)` where `c` is the `power`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. Raises: ValueError: if `power < 0` or is not known statically.
juraj-google-style
def unkown_field(self, value=None): if (value is not None): try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str for field `unkown_field`'.format(value)) if (',' in value): raise ValueError('value should not contain a comma for field `unkown_field`') self._unkown_field = value
Corresponds to IDD Field `unkown_field` Empty field in data. Args: value (str): value for IDD Field `unkown_field` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def sysmeta_add_preferred(sysmeta_pyxb, node_urn): if (not has_replication_policy(sysmeta_pyxb)): sysmeta_set_default_rp(sysmeta_pyxb) rp_pyxb = sysmeta_pyxb.replicationPolicy _add_node(rp_pyxb, 'pref', node_urn) _remove_node(rp_pyxb, 'block', node_urn)
Add a remote Member Node to the list of preferred replication targets to this System Metadata object. Also remove the target MN from the list of blocked Member Nodes if present. If the target MN is already in the preferred list and not in the blocked list, this function is a no-op. Args: sysmeta_pyxb : SystemMetadata PyXB object. System Metadata in which to add the preferred replication target. If the System Metadata does not already have a Replication Policy, a default replication policy which enables replication is added and populated with the preferred replication target. node_urn : str Node URN of the remote MN that will be added. On the form ``urn:node:MyMemberNode``.
codesearchnet
def _SetYaraRules(self, yara_rules_string): if (not yara_rules_string): return analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance('yara') analyzer_object.SetRules(yara_rules_string) self._analyzers.append(analyzer_object)
Sets the Yara rules. Args: yara_rules_string (str): unparsed Yara rule definitions.
codesearchnet
def get_cn_dict(self, structure, n, use_weights=False): siw = self.get_nn_info(structure, n) cn_dict = {} for i in siw: site_element = i['site'].species_string if (site_element not in cn_dict): if use_weights: cn_dict[site_element] = i['weight'] else: cn_dict[site_element] = 1 elif use_weights: cn_dict[site_element] += i['weight'] else: cn_dict[site_element] += 1 return cn_dict
Get coordination number, CN, of each element bonded to site with index n in structure Args: structure (Structure): input structure n (integer): index of site for which to determine CN. use_weights (boolean): flag indicating whether (True) to use weights for computing the coordination number or not (False, default: each coordinated site has equal weight). Returns: cn (dict): dictionary of CN of each element bonded to site
codesearchnet
def _get_dependent_variables(input_ops, output_ops): output_ops = nest.map_structure(gen_array_ops.identity, output_ops) inbetween_ops = op_selector.get_backward_walk_ops(seed_ops=output_ops, stop_at_ts=input_ops, inclusive=False, only_differentiable=True) var_ops = (op for op in inbetween_ops if op.type in VAR_OP_TYPES) var_names = (op.name for op in var_ops) tf_vars = (get_variable_by_name(var_name) for var_name in var_names) tf_vars = [v for v in tf_vars if v is not None] return tf_vars
Finds variables involved in the subgraph between input_ops and output_ops. Args: input_ops: Flattened list of input ops output_ops: Flattened list of output ops Returns: A list of variables
github-repos
def parse_GDS(filepath): dataset_lines = [] subsets = {} database = None dataset_name = None with utils.smart_open(filepath) as soft: groupper = groupby(soft, lambda x: x.startswith("^")) for is_new_entry, group in groupper: if is_new_entry: entry_type, entry_name = __parse_entry(next(group)) logger.debug("%s: %s" % (entry_type.upper(), entry_name)) if entry_type == "SUBSET": is_data, data_group = next(groupper) message = ("The key is not False, probably there is an " "error in the SOFT file") assert not is_data, message subset_metadata = parse_metadata(data_group) subsets[entry_name] = GDSSubset(name=entry_name, metadata=subset_metadata) elif entry_type == "DATABASE": is_data, data_group = next(groupper) message = ("The key is not False, probably there is an " "error in the SOFT file") assert not is_data, message database_metadata = parse_metadata(data_group) database = GEODatabase(name=entry_name, metadata=database_metadata) elif entry_type == "DATASET": is_data, data_group = next(groupper) dataset_name = entry_name for line in data_group: dataset_lines.append(line.rstrip()) else: logger.error("Cannot recognize type %s" % entry_type) metadata = parse_metadata(dataset_lines) columns = parse_GDS_columns(dataset_lines, subsets) table = parse_table_data(dataset_lines) return GDS(name=dataset_name, metadata=metadata, columns=columns, table=table, subsets=subsets, database=database)
Parse GDS SOFT file. Args: filepath (:obj:`str`): Path to GDS SOFT file. Returns: :obj:`GEOparse.GDS`: A GDS object.
juraj-google-style
def convert_squeeze(params, w_name, scope_name, inputs, layers, weights, names): print('Converting squeeze ...') if (len(params['axes']) > 1): raise AssertionError('Cannot convert squeeze by multiple dimensions') def target_layer(x, axis=int(params['axes'][0])): import tensorflow as tf return tf.squeeze(x, axis=axis) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert squeeze operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
codesearchnet
def script_dir(pyobject, follow_symlinks=True): if getattr(sys, 'frozen', False): path = abspath(sys.executable) else: path = inspect.getabsfile(pyobject) if follow_symlinks: path = realpath(path) return dirname(path)
Get current script's directory Args: pyobject (Any): Any Python object in the script follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True. Returns: str: Current script's directory
juraj-google-style
def __init__(self, expr, weld_type, df=None, column_name=None, index_type=None, index_name=None): self.expr = expr self.weld_type = weld_type self.dim = 1 self.df = df self.column_name = column_name self.index_type = index_type self.index_name = index_name
Summary TODO: Implement an actual Index Object like how Pandas does Args: expr (TYPE): Description weld_type (TYPE): Description df (None, optional): Description column_name (None, optional): Description
juraj-google-style
def visualize_decision(features, labels, true_w_b, candidate_w_bs, fname): fig = figure.Figure(figsize=(6, 6)) canvas = backend_agg.FigureCanvasAgg(fig) ax = fig.add_subplot(1, 1, 1) ax.scatter(features[(:, 0)], features[(:, 1)], c=np.float32(labels[(:, 0)]), cmap=cm.get_cmap('binary'), edgecolors='k') def plot_weights(w, b, **kwargs): (w1, w2) = w x1s = np.linspace((- 1), 1, 100) x2s = ((- ((w1 * x1s) + b)) / w2) ax.plot(x1s, x2s, **kwargs) for (w, b) in candidate_w_bs: plot_weights(w, b, alpha=(1.0 / np.sqrt(len(candidate_w_bs))), lw=1, color='blue') if (true_w_b is not None): plot_weights(*true_w_b, lw=4, color='green', label='true separator') ax.set_xlim([(- 1.5), 1.5]) ax.set_ylim([(- 1.5), 1.5]) ax.legend() canvas.print_figure(fname, format='png') print('saved {}'.format(fname))
Utility method to visualize decision boundaries in R^2. Args: features: Input points, as a Numpy `array` of shape `[num_examples, 2]`. labels: Numpy `float`-like array of shape `[num_examples, 1]` giving a label for each point. true_w_b: A `tuple` `(w, b)` where `w` is a Numpy array of shape `[2]` and `b` is a scalar `float`, interpreted as a decision rule of the form `dot(features, w) + b > 0`. candidate_w_bs: Python `iterable` containing tuples of the same form as true_w_b. fname: The filename to save the plot as a PNG image (Python `str`).
codesearchnet
def get_percentile_to_value_dict(self, percentile_list): result = {} total = 0 percentile_list_index = 0 count_at_percentile = 0 percentile_list = list(set(percentile_list)) percentile_list.sort() for index in range(self.counts_len): total += self.get_count_at_index(index) while True: if (not count_at_percentile): if (percentile_list_index == len(percentile_list)): return result percentile = percentile_list[percentile_list_index] percentile_list_index += 1 if (percentile > 100): return result count_at_percentile = self.get_target_count_at_percentile(percentile) if (total >= count_at_percentile): value_at_index = self.get_value_from_index(index) if percentile: result[percentile] = self.get_highest_equivalent_value(value_at_index) else: result[percentile] = self.get_lowest_equivalent_value(value_at_index) count_at_percentile = 0 else: break return result
A faster alternative to query values for a list of percentiles. Args: percentile_list: a list of percentiles in any order, dups will be ignored each element in the list must be a float value in [0.0 .. 100.0] Returns: a dict of percentile values indexed by the percentile
codesearchnet
def find_clients(self, hosts): clients = [] for host in hosts: clients.append(self._get_client_by_hostname(host)) return [client for client in clients if (client is not None)]
Finds GRR clients given a list of hosts. Args: hosts: List of hostname FQDNs Returns: List of GRR client objects.
codesearchnet
def crud_handler(Model, name=None, **kwds): from nautilus.network.events import combine_action_handlers from . import update_handler, create_handler, delete_handler, read_handler return combine_action_handlers( create_handler(Model, name=name), read_handler(Model, name=name), update_handler(Model, name=name), delete_handler(Model, name=name), )
This action handler factory reaturns an action handler that responds to actions with CRUD types (following nautilus conventions) and performs the necessary mutation on the model's database. Args: Model (nautilus.BaseModel): The model to delete when the action received. Returns: function(type, payload): The action handler for this model
juraj-google-style
def _AssertGrayscaleImage(image): return control_flow_ops.with_dependencies(_CheckGrayscaleImage(image, require_static=False), image)
Assert that we are working with a properly shaped grayscale image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: >= 2-D Tensor of size [*, 1] Raises: ValueError: if image.shape is not a [>= 2] vector or if last dimension is not size 1. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape.
github-repos
def format_diff_pyxb(a_pyxb, b_pyxb): return '\n'.join( difflib.ndiff( serialize_to_xml_str(a_pyxb).splitlines(), serialize_to_xml_str(b_pyxb).splitlines(), ) )
Create a diff between two PyXB objects. Args: a_pyxb: PyXB object b_pyxb: PyXB object Returns: str : `Differ`-style delta
juraj-google-style
def __init__(self, source_geo_def, target_geo_def): self.source_geo_def = source_geo_def self.target_geo_def = target_geo_def
Initialize resampler with geolocation information. Args: source_geo_def (SwathDefinition, AreaDefinition): Geolocation definition for the data to be resampled target_geo_def (CoordinateDefinition, AreaDefinition): Geolocation definition for the area to resample data to.
juraj-google-style
def delay_response(delay): delay = min(float(delay), 10) time.sleep(delay) return jsonify( get_dict("url", "args", "form", "data", "origin", "headers", "files") )
Returns a delayed response (max of 10 seconds). --- tags: - Dynamic data parameters: - in: path name: delay type: int produces: - application/json responses: 200: description: A delayed response.
juraj-google-style
def restore(self, output): pass
Create an accumulator based on 'output'. This method creates a new accumulator with identical internal state to the one used to create the data in 'output'. This means that if you do output_data = combiner.extract(accumulator_1) accumulator_2 = combiner.restore(output_data) then accumulator_1 and accumulator_2 will have identical internal state, and computations using either of them will be equivalent. Args: output: The data output from a previous computation. Should be in the same form as provided by 'extract_output'. Returns: A new accumulator.
github-repos
def _GetAuthCookie(self, auth_token): continue_location = "http: args = {"continue": continue_location, "auth": auth_token} req = self._CreateRequest("https: try: response = self.opener.open(req) except urllib2.HTTPError, e: response = e if (response.code != 302 or response.info()["location"] != continue_location): raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp) self.authenticated = True
Fetches authentication cookies for an authentication token. Args: auth_token: The authentication token returned by ClientLogin. Raises: HTTPError: If there was an error fetching the authentication cookies.
juraj-google-style
def __init__(self, fetches): if isinstance(fetches, wrapt.ObjectProxy): self._fetch_type = type(fetches.__wrapped__) else: self._fetch_type = type(fetches) self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches] self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
Creates a _ListFetchMapper. Args: fetches: List, tuple, or namedtuple of fetches.
github-repos
def mpim_close(self, *, channel: str, **kwargs) -> SlackResponse: kwargs.update({"channel": channel}) return self.api_call("mpim.close", json=kwargs)
Closes a multiparty direct message channel. Args: channel (str): Multiparty Direct message channel to close. e.g. 'G1234567890'
juraj-google-style
def __eq__(self, other): if not isinstance(other, LocationDescriptor): return False nbr_of_sub_locations = self.nbr_of_sub_locations() if nbr_of_sub_locations != other.nbr_of_sub_locations(): return False for i in range(nbr_of_sub_locations): if self._locations_list[i] != other._locations_list[i]: return False return True
Detect if another object is equal to this :class:`LocationDescriptor` object. Args: other: object to test.
juraj-google-style
def get_by_resource(self, resource_uri): uri = (((self.URI + self.RESOURCES_PATH) + '/') + resource_uri) return self._client.get(id_or_uri=uri)
Gets all the labels for the specified resource Args: resource_uri: The resource URI Returns: dict: Resource Labels
codesearchnet
def find(self, collection, query): obj = getattr(self.db, collection) result = obj.find(query) return result
Search a collection for the query provided. Just a raw interface to mongo to do any query you want. Args: collection: The db collection. See main class documentation. query: A mongo find query. Returns: pymongo Cursor object with the results.
juraj-google-style
def process(self, batch, *args, **kwargs): if self.postprocessing is not None: batch = self.postprocessing(batch) return batch
Process a list of examples to create a batch. Postprocess the batch with user-provided Pipeline. Args: batch (list(object)): A list of object from a batch of examples. Returns: object: Processed object given the input and custom postprocessing Pipeline.
juraj-google-style
def Shell(device, *command): if command: return device.StreamingShell(' '.join(command)) else: terminal_prompt = device.InteractiveShell() print(terminal_prompt.decode('utf-8')) while True: cmd = input('> ') if (not cmd): continue elif (cmd == 'exit'): break else: stdout = device.InteractiveShell(cmd, strip_cmd=True, delim=terminal_prompt, strip_delim=True) if stdout: if isinstance(stdout, bytes): stdout = stdout.decode('utf-8') print(stdout) device.Close()
Runs a command on the device and prints the stdout. Args: command: Command to run on the target.
codesearchnet
def _summary_iterator(test_dir): event_paths = sorted(glob.glob(os.path.join(test_dir, 'event*'))) return summary_iterator.summary_iterator(event_paths[-1])
Reads events from test_dir/events. Args: test_dir: Name of the test directory. Returns: A summary_iterator
github-repos
def __init__(self, details, _class): if not isinstance(details, dict): raise ValueError('details in ' + self.__class__.__name__ + '.' + sys._getframe().f_code.co_name + ' must be a dict') self.validation_failures = {} self._class = _class self._optional = False if '__optional__' in details: if isinstance(details['__optional__'], bool): self._optional = details['__optional__'] else: sys.stderr.write('"' + str(details['__optional__']) + '" is not a valid value for __optional__, assuming false') del details['__optional__'] self._special = {} for k in (tuple(details.keys())): oMatch = _specialKey.match(k) if oMatch: self._special[oMatch.group(1)] = details[k] del details[k]
Constructor Initialises the instance Arguments: details {dict} -- Details describing the type of values allowed for the node _class {str} -- The class of the child Raises: ValueError Returns: _BaseNode
juraj-google-style
def _ScheduleTask(self, task): if self._processing_profiler: self._processing_profiler.StartTiming('schedule_task') try: self._task_queue.PushItem(task, block=False) is_scheduled = True except errors.QueueFull: is_scheduled = False if self._processing_profiler: self._processing_profiler.StopTiming('schedule_task') return is_scheduled
Schedules a task. Args: task (Task): task. Returns: bool: True if the task was scheduled.
codesearchnet
def read_into(self, buffer, viewport=None, components=3, *, attachment=0, alignment=1, dtype='f1', write_offset=0) -> None: if (type(buffer) is Buffer): buffer = buffer.mglo return self.mglo.read_into(buffer, viewport, components, attachment, alignment, dtype, write_offset)
Read the content of the framebuffer into a buffer. Args: buffer (bytearray): The buffer that will receive the pixels. viewport (tuple): The viewport. components (int): The number of components to read. Keyword Args: attachment (int): The color attachment. alignment (int): The byte alignment of the pixels. dtype (str): Data type. write_offset (int): The write offset.
codesearchnet
def get_graph(self, run_key, device_name, debug=False): return self.get_graphs(run_key, debug=debug).get(device_name, None)
Get the runtime GraphDef proto associated with a run key and a device. Args: run_key: A Session.run kay. device_name: Name of the device in question. debug: Whether the debugger-decoratedgraph is to be retrieved. Returns: A `GraphDef` proto.
juraj-google-style
def check_plugin(self, plugin): vcf_section = self[plugin] try: vcf_field = vcf_section['field'] if not vcf_field in self.vcf_columns: raise ValidateError( "field has to be in {0}\n" "Wrong field name in plugin: {1}".format( self.vcf_columns, plugin )) if vcf_field == 'INFO': try: info_key = vcf_section['info_key'] if info_key == 'CSQ': try: csq_key = vcf_section['csq_key'] except KeyError: raise ValidateError( "CSQ entrys has to refer to an csq field.\n" "Refer with keyword 'csq_key'\n" "csq_key is missing in section: {0}".format( plugin ) ) except KeyError: raise ValidateError( "INFO entrys has to refer to an INFO field.\n" "Refer with keyword 'info_key'\n" "info_key is missing in section: {0}".format( plugin ) ) except KeyError: raise ValidateError( "Vcf entrys have to refer to a field in the VCF with keyword" " 'field'.\nMissing keyword 'field' in plugin: {0}".format( plugin )) try: data_type = vcf_section['data_type'] if not data_type in self.data_types: raise ValidateError( "data_type has to be in {0}\n" "Wrong data_type in plugin: {1}".format( self.data_types, plugin) ) except KeyError: raise ValidateError( "Vcf entrys have to refer to a data type in the VCF with " "keyword 'data_type'.\n" "Missing data_type in plugin: {0}".format(plugin) ) separators = vcf_section.get('separators', None) if separators: if len(separators) == 1: self[plugin]['separators'] = list(separators) else: if data_type != 'flag': raise ValidateError( "If data_type != flag the separators have to be defined" "Missing separators in plugin: {0}".format(plugin) ) record_rule = vcf_section.get('record_rule', None) if record_rule: if not record_rule in ['min', 'max']: raise ValidateError( "Record rules have to be in {0}\n" "Wrong record_rule in plugin: {1}".format( ['min', 'max'], plugin) ) else: self.logger.info("Setting record rule to default: 'max'") return True
Check if the section is in the proper format vcf format. Args: vcf_section (dict): The information from a vcf section Returns: True is it is in the proper format
juraj-google-style
def _emit_tensor_snapshot(self, tensor: _TensorTracker, timestamp: int, pid: int, tid: int, value: step_stats_pb2.NodeOutput) -> None: desc = str(value.tensor_description).replace('"', '') snapshot = {'tensor_description': desc} self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid, tid, tensor.object_id, snapshot)
Generate Chrome Trace snapshot event for a computed Tensor. Args: tensor: A 'TensorTracker' object. timestamp: The timestamp of this snapshot as a long integer. pid: The pid assigned for showing the device where this op ran. tid: The tid of the thread computing the tensor snapshot. value: A JSON-compliant snapshot of the object.
github-repos
def sysprep(disk, distro, loader=None, backend='direct', **kwargs): if (loader is None): loader = PackageLoader('lago', 'templates') sysprep_file = _render_template(distro, loader=loader, **kwargs) cmd = ['virt-sysprep', '-a', disk] cmd.extend(['--commands-from-file', sysprep_file]) env = os.environ.copy() if ('LIBGUESTFS_BACKEND' not in env): env['LIBGUESTFS_BACKEND'] = backend ret = utils.run_command(cmd, env=env) if ret: raise RuntimeError(('Failed to bootstrap %s\ncommand:%s\nstdout:%s\nstderr:%s' % (disk, ' '.join((('"%s"' % elem) for elem in cmd)), ret.out, ret.err)))
Run virt-sysprep on the ``disk``, commands are built from the distro specific template and arguments passed in ``kwargs``. If no template is available it will default to ``sysprep-base.j2``. Args: disk(str): path to disk distro(str): distro to render template for loader(jinja2.BaseLoader): Jinja2 template loader, if not passed, will search Lago's package. backend(str): libguestfs backend to use **kwargs(dict): environment variables for Jinja2 template Returns: None Raises: RuntimeError: On virt-sysprep none 0 exit code.
codesearchnet
def save_output(results, output_directory="output"): aggregate_reports = results["aggregate_reports"] forensic_reports = results["forensic_reports"] if os.path.exists(output_directory): if not os.path.isdir(output_directory): raise ValueError("{0} is not a directory".format(output_directory)) else: os.makedirs(output_directory) with open("{0}".format(os.path.join(output_directory, "aggregate.json")), "w", newline="\n", encoding="utf-8") as agg_json: agg_json.write(json.dumps(aggregate_reports, ensure_ascii=False, indent=2)) with open("{0}".format(os.path.join(output_directory, "aggregate.csv")), "w", newline="\n", encoding="utf-8") as agg_csv: csv = parsed_aggregate_reports_to_csv(aggregate_reports) agg_csv.write(csv) with open("{0}".format(os.path.join(output_directory, "forensic.json")), "w", newline="\n", encoding="utf-8") as for_json: for_json.write(json.dumps(forensic_reports, ensure_ascii=False, indent=2)) with open("{0}".format(os.path.join(output_directory, "forensic.csv")), "w", newline="\n", encoding="utf-8") as for_csv: csv = parsed_forensic_reports_to_csv(forensic_reports) for_csv.write(csv) samples_directory = os.path.join(output_directory, "samples") if not os.path.exists(samples_directory): os.makedirs(samples_directory) sample_filenames = [] for forensic_report in forensic_reports: sample = forensic_report["sample"] message_count = 0 parsed_sample = forensic_report["parsed_sample"] subject = parsed_sample["filename_safe_subject"] filename = subject while filename in sample_filenames: message_count += 1 filename = "{0} ({1})".format(subject, message_count) sample_filenames.append(filename) filename = "{0}.eml".format(filename) path = os.path.join(samples_directory, filename) with open(path, "w", newline="\n", encoding="utf-8") as sample_file: sample_file.write(sample)
Save report data in the given directory Args: results (OrderedDict): Parsing results output_directory: The patch to the directory to save in
juraj-google-style
def _ReadEventDataIntoEvent(self, event): if self._storage_type != definitions.STORAGE_TYPE_SESSION: return event_data_identifier = event.GetEventDataIdentifier() if event_data_identifier: lookup_key = event_data_identifier.CopyToString() event_data = self._event_data[lookup_key] for attribute_name, attribute_value in event_data.GetAttributes(): setattr(event, attribute_name, attribute_value)
Reads the data into the event. This function is intended to offer backwards compatible event behavior. Args: event (EventObject): event.
juraj-google-style
def report(self, verbose=1): lines = [] if (verbose >= 2): lines.append(self._status_line(tense='past')) if (verbose >= 3): (unit, mag) = _choose_unit(self.total_time, self.unit, self._asciimode) lines.append(' body took: {total:.{pr}{t}} {unit}'.format(total=(self.total_time / mag), t=self._precision_type, pr=self._precision, unit=unit)) lines.append(' time per loop: {}'.format(self._seconds_str())) else: line = ('Timed ' + self._seconds_str()) if self.label: line += (' for ' + self.label) lines.append(line) text = '\n'.join(lines) return text
Creates a human readable report Args: verbose (int): verbosity level. Either 1, 2, or 3. Returns: str: the report SeeAlso: timerit.Timerit.print Example: >>> import math >>> ti = Timerit(num=1).call(math.factorial, 5) >>> print(ti.report(verbose=1)) Timed best=...s, mean=...s
codesearchnet
def GetSortedEvents(self, time_range=None): filter_expression = None if time_range: filter_expression = [] if time_range.start_timestamp: filter_expression.append( '_timestamp >= {0:d}'.format(time_range.start_timestamp)) if time_range.end_timestamp: filter_expression.append( '_timestamp <= {0:d}'.format(time_range.end_timestamp)) filter_expression = ' AND '.join(filter_expression) event_generator = self._GetAttributeContainers( self._CONTAINER_TYPE_EVENT, filter_expression=filter_expression, order_by='_timestamp') for event in event_generator: if hasattr(event, 'event_data_row_identifier'): event_data_identifier = identifiers.SQLTableIdentifier( 'event_data', event.event_data_row_identifier) event.SetEventDataIdentifier(event_data_identifier) del event.event_data_row_identifier yield event
Retrieves the events in increasing chronological order. Args: time_range (Optional[TimeRange]): time range used to filter events that fall in a specific period. Yield: EventObject: event.
juraj-google-style
class RandomNormal(RandomInitializer): def __init__(self, mean=0.0, stddev=0.05, seed=None): self.mean = mean self.stddev = stddev super().__init__(seed=seed) def __call__(self, shape, dtype=None): return random.normal(shape=shape, mean=self.mean, stddev=self.stddev, seed=self.seed, dtype=dtype) def get_config(self): base_config = super().get_config() config = {'mean': self.mean, 'stddev': self.stddev} return {**base_config, **config}
Random normal initializer. Draws samples from a normal distribution for given parameters. Examples: >>> # Standalone usage: >>> initializer = RandomNormal(mean=0.0, stddev=1.0) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = RandomNormal(mean=0.0, stddev=1.0) >>> layer = Dense(3, kernel_initializer=initializer) Args: mean: A python scalar or a scalar keras tensor. Mean of the random values to generate. stddev: A python scalar or a scalar keras tensor. Standard deviation of the random values to generate. seed: A Python integer or instance of `keras.backend.SeedGenerator`. Used to make the behavior of the initializer deterministic. Note that an initializer seeded with an integer or `None` (unseeded) will produce the same random values across multiple calls. To get different random values across multiple calls, use as seed an instance of `keras.backend.SeedGenerator`.
github-repos
def __init__(self, data_type, unit=None, analysis_period=None, metadata=None): assert hasattr(data_type, 'isDataType'), \ 'data_type must be a Ladybug DataType. Got {}'.format(type(data_type)) if unit is None: unit = data_type.units[0] else: data_type.is_unit_acceptable(unit) if analysis_period is not None: assert hasattr(analysis_period, 'isAnalysisPeriod'), \ 'analysis_period must be a Ladybug AnalysisPeriod. Got {}'.format( type(analysis_period)) if metadata is not None: assert isinstance(metadata, dict), \ 'metadata must be a dictionary. Got {}'.format(type(metadata)) self._data_type = data_type self._unit = unit self._analysis_period = analysis_period self._metadata = metadata or {}
Initiate Ladybug header for lists. Args: data_type: A DataType object. (e.g. Temperature) unit: data_type unit (Default: None) analysis_period: A Ladybug analysis period (Defualt: None) metadata: Optional dictionary of additional metadata, containing information such as 'source', 'city', or 'zone'.
juraj-google-style
def JoinPath(self, path_segments): first_path_segment = None if path_segments and platform.system() == 'Windows': first_path_segment = path_segments[0] first_path_segment_length = len(first_path_segment) first_path_segment_prefix = None if (first_path_segment_length >= 7 and first_path_segment.startswith('\\\\.\\') and first_path_segment[5:7] == ':\\'): first_path_segment_prefix = first_path_segment[4:6] first_path_segment = first_path_segment[7:] elif (first_path_segment_length >= 4 and first_path_segment[:4] in ['\\\\.\\', '\\\\?\\']): first_path_segment_prefix = first_path_segment[:4] first_path_segment = first_path_segment[4:] elif first_path_segment_length >= 2 and first_path_segment[1] == ':': first_path_segment_prefix = first_path_segment[:2] first_path_segment = first_path_segment[2:] elif first_path_segment.startswith('\\\\'): prefix, _, remainder = first_path_segment[2:].partition( self.PATH_SEPARATOR) first_path_segment_prefix = '\\\\{0:s}'.format(prefix) first_path_segment = '\\{0:s}'.format(remainder) if first_path_segment_prefix: first_path_segment, _, remainder = first_path_segment.partition( self.PATH_SEPARATOR) if not remainder: _ = path_segments.pop(0) else: path_segments[0] = remainder first_path_segment = ''.join([ first_path_segment_prefix, first_path_segment]) else: first_path_segment = None path_segments = [ segment.split(self.PATH_SEPARATOR) for segment in path_segments] path_segments = [ element for sublist in path_segments for element in sublist] path_segments = list(filter(None, path_segments)) if first_path_segment is None: path = '{0:s}{1:s}'.format( self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments)) else: path = first_path_segment if path_segments: path = '{0:s}{1:s}{2:s}'.format( path, self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments)) return path
Joins the path segments into a path. Args: path_segments (list[str]): path segments. Returns: str: joined path segments prefixed with the path separator.
juraj-google-style
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): local_stream = utils.BytearrayStream() if self._unique_identifier: self._unique_identifier.write(local_stream, kmip_version=kmip_version) self.length = local_stream.length() super(ArchiveResponsePayload, self).write(output_stream, kmip_version=kmip_version) output_stream.write(local_stream.buffer)
Write the data encoding the Archive response payload to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined.
codesearchnet
def from_config(cls, config): config.pop('dtype', None) return cls(**config)
Instantiates an initializer from a configuration dictionary. Example: ```python initializer = RandomUniform(-1, 1) config = initializer.get_config() initializer = RandomUniform.from_config(config) ``` Args: config: A Python dictionary. It will typically be the output of `get_config`. Returns: An Initializer instance.
github-repos
def UploadFilePath(self, filepath, offset=0, amount=None): return self._UploadChunkStream( self._streamer.StreamFilePath(filepath, offset=offset, amount=amount))
Uploads chunks of a file on a given path to the transfer store flow. Args: filepath: A path to the file to upload. offset: An integer offset at which the file upload should start on. amount: An upper bound on number of bytes to stream. If it is `None` then the whole file is uploaded. Returns: A `BlobImageDescriptor` object.
juraj-google-style
def delete_folder(self, folder): if (not is_valid_uuid(folder)): raise StorageArgumentException('Invalid UUID for folder: {0}'.format(folder)) self._authenticated_request.to_endpoint('folder/{}/'.format(folder)).delete()
Delete a folder. It will recursively delete all the content. Args: folder_id (str): The UUID of the folder to be deleted. Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: 403 StorageNotFoundException: 404 HTTPError: other non-20x error codes
codesearchnet
def Deserialize(self, reader: BinaryReader): self.Type = StateType(reader.ReadByte()) self.Key = reader.ReadVarBytes(max=100) self.Field = reader.ReadVarString(max=32).decode('utf-8') self.Value = reader.ReadVarBytes(max=65535) if self.Type == StateType.Account: self.CheckAccountState() elif self.Type == StateType.Validator: self.CheckValidatorState()
Deserialize full object. Args: reader (neocore.IO.BinaryReader):
juraj-google-style
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's *utils.make_positions*. Args: input_ids (`torch.LongTensor`): Indices of input sequence tokens in the vocabulary. Returns: torch.Tensor
github-repos
async def fetch(self, method, url, params=None, headers=None, data=None): logger.debug('Sending request %s %s:\n%r', method, url, data) for retry_num in range(MAX_RETRIES): try: async with self.fetch_raw(method, url, params=params, headers=headers, data=data) as res: async with async_timeout.timeout(REQUEST_TIMEOUT): body = (await res.read()) logger.debug('Received response %d %s:\n%r', res.status, res.reason, body) except asyncio.TimeoutError: error_msg = 'Request timed out' except aiohttp.ServerDisconnectedError as err: error_msg = 'Server disconnected error: {}'.format(err) except (aiohttp.ClientError, ValueError) as err: error_msg = 'Request connection error: {}'.format(err) else: break logger.info('Request attempt %d failed: %s', retry_num, error_msg) else: logger.info('Request failed after %d attempts', MAX_RETRIES) raise exceptions.NetworkError(error_msg) if (res.status != 200): logger.info('Request returned unexpected status: %d %s', res.status, res.reason) raise exceptions.NetworkError('Request return unexpected status: {}: {}'.format(res.status, res.reason)) return FetchResponse(res.status, body)
Make an HTTP request. Automatically uses configured HTTP proxy, and adds Google authorization header and cookies. Failures will be retried MAX_RETRIES times before raising NetworkError. Args: method (str): Request method. url (str): Request URL. params (dict): (optional) Request query string parameters. headers (dict): (optional) Request headers. data: (str): (optional) Request body data. Returns: FetchResponse: Response data. Raises: NetworkError: If the request fails.
codesearchnet
def can_create(self): if (self.data.get('key_name') and self.data.get('value_name') and self.data.get('value_type')): return True return False
If the key_name, value_name, and value_type has been provided returns that the Registry Key can be created, otherwise returns that the Registry Key cannot be created. Returns:
codesearchnet
def strace(device, trace_address, breakpoint_address): jlink = pylink.JLink() jlink.open() jlink.power_on() jlink.set_tif(pylink.JLinkInterfaces.SWD) jlink.connect(device) jlink.reset() jlink.breakpoint_clear_all() op = pylink.JLinkStraceOperation.TRACE_START jlink.strace_clear_all() jlink.strace_start() bphandle = jlink.breakpoint_set(breakpoint_address, thumb=True) trhandle = jlink.strace_code_fetch_event(op, address=trace_address) jlink.restart() time.sleep(1) while True: if jlink.halted(): break while True: instructions = jlink.strace_read(1) if (len(instructions) == 0): break instruction = instructions[0] print(jlink.disassemble_instruction(instruction)) jlink.power_off() jlink.close()
Implements simple trace using the STrace API. Args: device (str): the device to connect to trace_address (int): address to begin tracing from breakpoint_address (int): address to breakpoint at Returns: ``None``
codesearchnet
def resolve_class(classref): if (classref is None): return None elif isinstance(classref, six.class_types): return classref elif isinstance(classref, six.string_types): return import_class(classref) else: raise ValueError(("Unable to resolve class for '%s'" % classref))
Attempt to return a Python class for the input class reference. If `classref` is a class or None, return it. If `classref` is a python classpath (e.g., "foo.bar.MyClass") import the class and return it. Args: classref: A fully-qualified Python path to class, or a Python class. Returns: A class.
codesearchnet
def load_test_config_file(test_config_path, tb_filters=None): configs = _load_config_file(test_config_path) if tb_filters: tbs = [] for tb in configs[keys.Config.key_testbed.value]: if tb[keys.Config.key_testbed_name.value] in tb_filters: tbs.append(tb) if len(tbs) != len(tb_filters): raise MoblyConfigError('Expect to find %d test bed configs, found %d. Check if you have the correct test bed names.' % (len(tb_filters), len(tbs))) configs[keys.Config.key_testbed.value] = tbs mobly_params = configs.get(keys.Config.key_mobly_params.value, {}) log_path = mobly_params.get(keys.Config.key_log_path.value, _DEFAULT_LOG_PATH) if ENV_MOBLY_LOGPATH in os.environ: log_path = os.environ[ENV_MOBLY_LOGPATH] log_path = utils.abs_path(log_path) _validate_test_config(configs) _validate_testbed_configs(configs[keys.Config.key_testbed.value]) test_configs = [] for original_bed_config in configs[keys.Config.key_testbed.value]: test_run_config = TestRunConfig() test_run_config.testbed_name = original_bed_config[keys.Config.key_testbed_name.value] test_run_config.test_bed_name = test_run_config.testbed_name test_run_config.log_path = log_path test_run_config.controller_configs = original_bed_config.get(keys.Config.key_testbed_controllers.value, {}) test_run_config.user_params = original_bed_config.get(keys.Config.key_testbed_test_params.value, {}) test_configs.append(test_run_config) return test_configs
Processes the test configuration file provied by user. Loads the configuration file into a dict, unpacks each testbed config into its own dict, and validate the configuration in the process. Args: test_config_path: Path to the test configuration file. tb_filters: A subset of test bed names to be pulled from the config file. If None, then all test beds will be selected. Returns: A list of test configuration dicts to be passed to test_runner.TestRunner.
github-repos
def add_figure(self, key, url, **kwargs): figure = self._check_metadata_for_file(key=key, url=url, **kwargs) for dict_key in ( 'caption', 'label', 'material', 'filename', 'url', 'original_url', ): if kwargs.get(dict_key) is not None: figure[dict_key] = kwargs[dict_key] if key_already_there(figure, self.record.get('figures', ())): raise ValueError( 'There\'s already a figure with the key %s.' % figure['key'] ) self._append_to('figures', figure) self.add_document
Add a figure. Args: key (string): document key url (string): document url Keyword Args: caption (string): simple description label (string): material (string): original_url (string): original url filename (string): current url Returns: None
juraj-google-style
def get_permissions(self): user_role = (self.last_login_role() if self.last_login_role_key else self.role_set[0].role) return user_role.get_permissions()
Permissions of the user. Returns: List of Permission objects.
codesearchnet
def _prepare_init_params_from_job_description(cls, job_details): init_params = dict() init_params['model_name'] = job_details['ModelName'] init_params['instance_count'] = job_details['TransformResources']['InstanceCount'] init_params['instance_type'] = job_details['TransformResources']['InstanceType'] init_params['volume_kms_key'] = job_details['TransformResources'].get('VolumeKmsKeyId') init_params['strategy'] = job_details.get('BatchStrategy') init_params['assemble_with'] = job_details['TransformOutput'].get('AssembleWith') init_params['output_path'] = job_details['TransformOutput']['S3OutputPath'] init_params['output_kms_key'] = job_details['TransformOutput'].get('KmsKeyId') init_params['accept'] = job_details['TransformOutput'].get('Accept') init_params['max_concurrent_transforms'] = job_details.get('MaxConcurrentTransforms') init_params['max_payload'] = job_details.get('MaxPayloadInMB') init_params['base_transform_job_name'] = job_details['TransformJobName'] return init_params
Convert the transform job description to init params that can be handled by the class constructor Args: job_details (dict): the returned job details from a describe_transform_job API call. Returns: dict: The transformed init_params
juraj-google-style
def run(main, argv=None, flags_parser=parse_flags_with_usage): try: args = _run_init((sys.argv if (argv is None) else argv), flags_parser) while _init_callbacks: callback = _init_callbacks.popleft() callback() try: _run_main(main, args) except UsageError as error: usage(shorthelp=True, detailed_error=error, exitcode=error.exitcode) except: if FLAGS.pdb_post_mortem: traceback.print_exc() pdb.post_mortem() raise except Exception as e: _call_exception_handlers(e) raise
Begins executing the program. Args: main: The main function to execute. It takes an single argument "argv", which is a list of command line arguments with parsed flags removed. If it returns an integer, it is used as the process's exit code. argv: A non-empty list of the command line arguments including program name, sys.argv is used if None. flags_parser: Callable[[List[Text]], Any], the function used to parse flags. The return value of this function is passed to `main` untouched. It must guarantee FLAGS is parsed after this function is called. - Parses command line flags with the flag module. - If there are any errors, prints usage(). - Calls main() with the remaining arguments. - If main() raises a UsageError, prints usage and the error message.
codesearchnet
def convert_variable_to_constant(self, incoming_edge, tensor_data): raise NotImplementedError
Converts a variable in this Convertible and its dependencies. This method should make sure that a converted copy of itself is present in the converted graph, and that all Convertibles depending on this one also go through the same process. Args: incoming_edge: The graph edge into this Convertible that is being converted to a constant. tensor_data: The tensor representing the constant.
github-repos
def _get_original_composition_ratio(self, reaction): if self.c1_original == self.c2_original: return 1 c1_coeff = reaction.get_coeff(self.c1_original) \ if self.c1_original in reaction.reactants else 0 c2_coeff = reaction.get_coeff(self.c2_original) \ if self.c2_original in reaction.reactants else 0 return c1_coeff * 1.0 / (c1_coeff + c2_coeff)
Returns the molar mixing ratio between the reactants with ORIGINAL ( instead of processed) compositions for a reaction. Args: reaction (Reaction): Reaction object that contains the original reactant compositions. Returns: The molar mixing ratio between the original reactant compositions for a reaction.
juraj-google-style
def getEstTraitCovar(self,term_i=None): assert self.P>1, 'Trait covars not defined for single trait analysis' if term_i==None: RV=SP.zeros((self.P,self.P)) for term_i in range(self.n_terms): RV+=self.vd.getTerm(term_i).getTraitCovar().K() else: assert term_i<self.n_terms, 'Term index non valid' RV = self.vd.getTerm(term_i).getTraitCovar().K() return RV
Returns explicitly the estimated trait covariance matrix Args: term_i: index of the term we are interested in
juraj-google-style
def _gather_all_deps(self, args, kwargs): depends = [] count = 0 for dep in args: if isinstance(dep, Future): if (self.tasks[dep.tid]['status'] not in FINAL_STATES): count += 1 depends.extend([dep]) for key in kwargs: dep = kwargs[key] if isinstance(dep, Future): if (self.tasks[dep.tid]['status'] not in FINAL_STATES): count += 1 depends.extend([dep]) for dep in kwargs.get('inputs', []): if isinstance(dep, Future): if (self.tasks[dep.tid]['status'] not in FINAL_STATES): count += 1 depends.extend([dep]) return (count, depends)
Count the number of unresolved futures on which a task depends. Args: - args (List[args]) : The list of args list to the fn - kwargs (Dict{kwargs}) : The dict of all kwargs passed to the fn Returns: - count, [list of dependencies]
codesearchnet
def write_csv_from_dict(filename, input_dict): f = open(PATH_TO_DIR + '/data/' + filename, 'w') for k, v in input_dict.items(): line = k for item in v: line += ',' + item f.write(line + '\n') f.flush() print('Wrote to file %s' % filename) check_with_golden(filename)
Writes out a `.csv` file from an input dictionary. After writing out the file, it checks the new list against the golden to make sure golden file is up-to-date. Args: filename: String that is the output file name. input_dict: Dictionary that is to be written out to a `.csv` file.
github-repos
def Approve(self, request, global_params=None): config = self.GetMethodConfig('Approve') return self._RunMethod(config, request, global_params=global_params)
Approves or rejects a pending build. If approved, the returned LRO will be analogous to the LRO returned from a CreateBuild call. If rejected, the returned LRO will be immediately done. Args: request: (CloudbuildProjectsLocationsBuildsApproveRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Operation) The response message.
github-repos
def forward(self, inputs, expert_size): input_list = inputs.split(expert_size, dim=0) output_list = [] for i in range(self.num_experts): output_list.append(F.linear(input_list[i], self.weight[i])) results = torch.cat(output_list, dim=0) return results
Forward pass of the JetMoeParallelExperts module. Args: inputs (Tensor): Input tensor. expert_size: Expert size information. Returns: Tensor: Output tensor.
github-repos
def SMGetJobDictionaries(self, domain='kSMDomainSystemLaunchd'): cfstring_launchd = ctypes.c_void_p.in_dll(self.dll, domain) return CFArray(self.dll.SMCopyAllJobDictionaries(cfstring_launchd))
Copy all Job Dictionaries from the ServiceManagement. Args: domain: The name of a constant in Foundation referencing the domain. Will copy all launchd services by default. Returns: A marshalled python list of dicts containing the job dictionaries.
juraj-google-style
def decode_list(self, ids): decoded_ids = [] for id_ in ids: if (0 <= id_ < self._num_reserved_ids): decoded_ids.append(RESERVED_TOKENS[int(id_)]) else: decoded_ids.append((id_ - self._num_reserved_ids)) return [str(d) for d in decoded_ids]
Transform a sequence of int ids into a their string versions. This method supports transforming individual input/output ids to their string versions so that sequence to/from text conversions can be visualized in a human readable format. Args: ids: list of integers to be converted. Returns: strs: list of human-readable string.
codesearchnet
def has_course_mode(self, course_run_id, mode): course_modes = self.get_course_modes(course_run_id) return any(course_mode for course_mode in course_modes if course_mode['slug'] == mode)
Query the Enrollment API to see whether a course run has a given course mode available. Arguments: course_run_id (str): The string value of the course run's unique identifier Returns: bool: Whether the course run has the given mode avaialble for enrollment.
juraj-google-style
def __call__(self, old, new): if not new or not self.precondition(): return self.validate(old, new)
Validate the `new` translation against the `old` one. No checks are needed for deleted translations Args: old: The old translation. new: The new translation. Raises: A ValidationError with an appropriate message.
juraj-google-style
def merge_tree(dest: Any, src: Any, merge_fn: Optional[Callable[[KeyPath, Any, Any], Any]]=None, root_path: Optional[KeyPath]=None) -> Any: if not root_path: root_path = KeyPath() if isinstance(dest, dict) and isinstance(src, dict): return _merge_dict_into_dict(dest, src, merge_fn, root_path) if isinstance(dest, list) and isinstance(src, dict): return _merge_dict_into_list(dest, src, root_path) if merge_fn: return merge_fn(root_path, dest, src) return src
Deep merge two (maybe) hierarchical values. Args: dest: Destination value. src: Source value. When source value is a dict, it's considered as a patch (delta) to the destination when destination is a dict or list. For other source types, it's considered as a new value that will replace dest completely. merge_fn: A function to handle value merge that will be called for updated or added keys. If a branch is added/updated, the root of branch will be passed to merge_fn. the signature of function is: (path, left_value, right_value) -> final_value If a key is only present in src dict, old_value is MISSING_VALUE. If a key is only present in dest dict, new_value is MISSING_VALUE. Otherwise both new_value and old_value are filled. If final value is MISSING_VALUE, it will be removed from its parent collection. root_path: KeyPath of dest. Returns: Merged value. Raises: KeyError: Dict keys are not integers when merging into a list.
github-repos
def tabledata_list(self, table_name, start_index=None, max_results=None, page_token=None): url = (Api._ENDPOINT + (Api._TABLEDATA_PATH % table_name)) args = {} if start_index: args['startIndex'] = start_index if max_results: args['maxResults'] = max_results if (page_token is not None): args['pageToken'] = page_token return datalab.utils.Http.request(url, args=args, credentials=self._credentials)
Retrieves the contents of a table. Args: table_name: the name of the table as a tuple of components. start_index: the index of the row at which to start retrieval. max_results: an optional maximum number of rows to retrieve. page_token: an optional token to continue the retrieval. Returns: A parsed result object. Raises: Exception if there is an error performing the operation.
codesearchnet
def _ParseLogLine(self, parser_mediator, structure, key): time_elements_tuple = self._GetTimeElementsTuple(structure) try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return self._last_month = time_elements_tuple[1] if key == 'logline': self._previous_structure = structure else: structure = self._previous_structure event_data = MacAppFirewallLogEventData() event_data.action = structure.action event_data.agent = structure.agent event_data.computer_name = structure.computer_name event_data.process_name = structure.process_name.strip() event_data.status = structure.status event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parse a single log line and produce an event object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): identifier of the structure of tokens. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file.
juraj-google-style
def custom_licenses(self): buf = (ctypes.c_char * self.MAX_BUF_SIZE)() result = self._dll.JLINK_EMU_GetLicenses(buf, self.MAX_BUF_SIZE) if (result < 0): raise errors.JLinkException(result) return ctypes.string_at(buf).decode()
Returns a string of the installed licenses the J-Link has. Args: self (JLink): the ``JLink`` instance Returns: String of the contents of the custom licenses the J-Link has.
codesearchnet
def cast_to_floatx(x): if isinstance(x, (tensor_lib.Tensor, variables_module.Variable, sparse_tensor.SparseTensor)): return math_ops.cast(x, dtype=floatx()) return numpy_compat.np_asarray(x, dtype=floatx())
Cast a Numpy array to the default Keras float type. Args: x: Numpy array or TensorFlow tensor. Returns: The same array (Numpy array if `x` was a Numpy array, or TensorFlow tensor if `x` was a tensor), cast to its new type. Example: >>> tf.keras.backend.floatx() 'float32' >>> arr = np.array([1.0, 2.0], dtype='float64') >>> arr.dtype dtype('float64') >>> new_arr = cast_to_floatx(arr) >>> new_arr array([1., 2.], dtype=float32) >>> new_arr.dtype dtype('float32')
github-repos
def _ResolveVars(value): if isinstance(value, dict): resolved_value = {} for k, v in value.items(): resolved_value[k] = MessageValue._ResolveVars(v) return resolved_value if isinstance(value, list): return [MessageValue._ResolveVars(v) for v in value] if isinstance(value, stl.base.QualifierValue.Resolved): return value.Generate() if isinstance(value, stl.base.LocalVar): if value.value is None: raise ValueError("LocalVar '%s' does not have a value." % value.name) return value.value if isinstance(value, stl.base.Func): return value.Run() if isinstance(value, MessageValue): return value._EncodeToString() return value
Resolve any variables or run any functions in |value|. Args: value: Value which may have variables or functions to resolve. Returns: Resolved value. Raises: ValueError: If a concrete value for |value| cannot be determined.
github-repos
def from_file(cls, fp, format_=None, fps=None, **kwargs): if (format_ is None): text = fp.read() fragment = text[:10000] format_ = autodetect_format(fragment) fp = io.StringIO(text) impl = get_format_class(format_) subs = cls() subs.format = format_ subs.fps = fps impl.from_file(subs, fp, format_, fps=fps, **kwargs) return subs
Read subtitle file from file object. See :meth:`SSAFile.load()` for full description. Note: This is a low-level method. Usually, one of :meth:`SSAFile.load()` or :meth:`SSAFile.from_string()` is preferable. Arguments: fp (file object): A file object, ie. :class:`io.TextIOBase` instance. Note that the file must be opened in text mode (as opposed to binary). Returns: SSAFile
codesearchnet
def inverseHistogram(hist, bin_range): data = (hist.astype(float) / np.min(hist[np.nonzero(hist)])) new_data = np.empty(shape=np.sum(data, dtype=int)) i = 0 xvals = np.linspace(bin_range[0], bin_range[1], len(data)) for (d, x) in zip(data, xvals): new_data[i:(i + d)] = x i += int(d) return new_data
sample data from given histogram and min, max values within range Returns: np.array: data that would create the same histogram as given
codesearchnet
def get_program_type_by_slug(self, slug): return self._load_data(self.PROGRAM_TYPES_ENDPOINT, resource_id=slug, default=None)
Get a program type by its slug. Arguments: slug (str): The slug to identify the program type. Returns: dict: A program type object.
codesearchnet
def ParseReceiverData( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) data = {} key_url = self._GetRowValue(query_hash, row, 'request_key') data_dict = {} description = 'MacKeeper Entry' if key_url.endswith('plist'): description = 'Configuration Definition' data['text'] = 'Plist content added to cache.' elif key_url.startswith('http: description = 'MacKeeper Event' try: _, _, part = key_url.partition('?') data['text'] = part.replace('&', ' ') except UnicodeDecodeError: data['text'] = 'N/A' elif key_url.startswith('http: description = 'Account Activity' _, _, activity = key_url.partition(' if activity: data['text'] = 'Action started: {0:s}'.format(activity) else: data['text'] = 'Unknown activity.' elif key_url.startswith('http: description = 'Chat ' try: jquery = self._GetRowValue(query_hash, row, 'data') jquery = codecs.decode(jquery, 'utf-8') except UnicodeDecodeError: jquery = '' data_dict = self._ExtractJQuery(jquery) data = self._ParseChatData(data_dict) data['entry_type'] = data_dict.get('type', '') if data['entry_type'] == 'comment': description += 'Comment' elif data['entry_type'] == 'outgoing': description += 'Outgoing Message' elif data['entry_type'] == 'incoming': description += 'Incoming Message' else: description += 'Entry' data['text'] = ';'.join(self._DictToListOfStrings(data_dict)) if not data['text']: data['text'] = 'No additional data.' event_data = MacKeeperCacheEventData() event_data.description = description event_data.event_type = data.get('event_type', None) event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.record_id = data.get('id', None) event_data.room = data.get('room', None) event_data.text = data.get('text', None) event_data.url = key_url event_data.user_name = data.get('user', None) event_data.user_sid = data.get('sid', None) time_value = self._GetRowValue(query_hash, row, 'time_string') if isinstance(time_value, py2to3.INTEGER_TYPES): date_time = dfdatetime_java_time.JavaTime(timestamp=time_value) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) else: try: timestamp = timelib.Timestamp.FromTimeString(time_value) except errors.TimestampError: parser_mediator.ProduceExtractionWarning( 'Unable to parse time string: {0:s}'.format(time_value)) return event = time_events.TimestampEvent( timestamp, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a single row from the receiver and cache response table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
juraj-google-style
def _build_recursive_hd_gather(input_tensors, devices, red_op): num_devices = len(devices) num_hops = int(math.log(num_devices, 2)) if num_devices != 2 ** num_hops: raise ValueError('num_devices must be a power of 2') chunks = input_tensors for h in range(0, num_hops): span = 2 ** h group_size = span * 2 new_chunks = [[] for _ in devices] for d in range(0, num_devices): if d % group_size >= group_size / 2: continue left_dev = devices[d] right_dev = devices[d + span] left_split = array_ops.split(chunks[d], 2) right_split = array_ops.split(chunks[d + span], 2) with ops.device(left_dev): new_chunks[d] = red_op(left_split[0], right_split[0]) with ops.device(right_dev): new_chunks[d + span] = red_op(left_split[1], right_split[1]) chunks = new_chunks return chunks
Construct the gather phase of recursive halving-doubling all-reduce. Args: input_tensors: list of `tf.Tensor` to be elementwise reduced. devices: a list of strings naming the devices hosting input_tensors, which will also be used to host the (partial) reduction values. red_op: a binary elementwise reduction Op. Returns: list of `tf.Tensor` which are the fully reduced tensor shards. Raises: ValueError: num_devices not a power of 2, or tensor len not divisible by 2 the proper number of times.
github-repos
def sheets_tab_create(config, auth, sheet_url_or_name, sheet_tab): sheet_id, tab_id = sheets_tab_id(config, auth, sheet_url_or_name, sheet_tab) if tab_id is None: sheets_batch_update(config, auth, sheet_url_or_name, {'requests': [{'addSheet': {'properties': {'title': sheet_tab}}}]})
Create a tab in a sheet. Args: config - see starthinker/util/configuration.py auth - user or service url_or_name - one of: URL, document title, or id sheet_tab - name of tab to get id for No Return
github-repos
def compress_file(filepath, compression='gz'): if (compression not in ['gz', 'bz2']): raise ValueError("Supported compression formats are 'gz' and 'bz2'.") from monty.io import zopen if (not filepath.lower().endswith(('.%s' % compression))): with open(filepath, 'rb') as f_in, zopen(('%s.%s' % (filepath, compression)), 'wb') as f_out: f_out.writelines(f_in) os.remove(filepath)
Compresses a file with the correct extension. Functions like standard Unix command line gzip and bzip2 in the sense that the original uncompressed files are not retained. Args: filepath (str): Path to file. compression (str): A compression mode. Valid options are "gz" or "bz2". Defaults to "gz".
codesearchnet
def sg_summary_image(tensor, prefix=None, name=None): r prefix = '' if prefix is None else prefix + '/' name = prefix + _pretty_name(tensor) if name is None else prefix + name if not tf.get_variable_scope().reuse: tf.summary.image(name + '-im', tensor)
r"""Register `tensor` to summary report as `image` Args: tensor: A tensor to log as image prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
juraj-google-style