code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def GetFileEntryByPathSpec(self, path_spec): location = getattr(path_spec, 'location', None) if (location is None or not location.startswith(self.LOCATION_ROOT)): return None if len(location) == 1: return cpio_file_entry.CPIOFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True) cpio_archive_file_entry = self._cpio_archive_file.GetFileEntryByPath( location[1:]) if cpio_archive_file_entry is None: return None return cpio_file_entry.CPIOFileEntry( self._resolver_context, self, path_spec, cpio_archive_file_entry=cpio_archive_file_entry)
Retrieves a file entry for a path specification. Args: path_spec (PathSpec): a path specification. Returns: CPIOFileEntry: a file entry or None if not available.
juraj-google-style
def _set_mode(self, discover_mode, connect_mode): payload = struct.pack("<BB", discover_mode, connect_mode) response = self._send_command(6, 1, payload) result, = unpack("<H", response.payload) if result != 0: return False, {'reason': 'Error code from BLED112 setting mode', 'code': result} return True, None
Set the mode of the BLED112, used to enable and disable advertising To enable advertising, use 4, 2. To disable advertising use 0, 0. Args: discover_mode (int): The discoverability mode, 0 for off, 4 for on (user data) connect_mode (int): The connectability mode, 0 for of, 2 for undirected connectable
juraj-google-style
def __init__(self, structure, element): self.structure = structure self.element = element interstitial_finder = StructureMotifInterstitial(self.structure, self.element) self.unique_defect_seq = [] pdc = PointDefectComparator() for poss_site in interstitial_finder.enumerate_defectsites(): now_defect = Interstitial( self.structure, poss_site) append_defect = True for unique_defect in self.unique_defect_seq: if pdc.are_equal( now_defect, unique_defect): append_defect = False if append_defect: self.unique_defect_seq.append( now_defect) self.count_def = 0
Initializes an Interstitial generator using structure motifs Args: structure (Structure): pymatgen structure object element (str or Element or Specie): element for the interstitial
juraj-google-style
def _namespace_to_ord(namespace): n = 0 for (i, c) in enumerate(namespace): n += ((_LEX_DISTANCE[((MAX_NAMESPACE_LENGTH - i) - 1)] * NAMESPACE_CHARACTERS.index(c)) + 1) return n
Converts a namespace string into an int representing its lexographic order. >>> _namespace_to_ord('') '' >>> _namespace_to_ord('_') 1 >>> _namespace_to_ord('__') 2 Args: namespace: A namespace string. Returns: An int representing the lexographical order of the given namespace string.
codesearchnet
def smart_case(pred_fn_pairs, default=None, exclusive=False, name='smart_case'): return control_flow_case._case_helper(smart_cond, pred_fn_pairs, default, exclusive, name, allow_python_preds=True)
Like tf.case, except attempts to statically evaluate predicates. If any predicate in `pred_fn_pairs` is a bool or has a constant value, the associated callable will be called or omitted depending on its value. Otherwise this functions like tf.case. Args: pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to `True`. name: A name for this operation (optional). Returns: The tensors returned by the first pair whose predicate evaluated to True, or those returned by `default` if none does. Raises: TypeError: If `pred_fn_pairs` is not a list/dictionary. TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable.
github-repos
def cursor_event(self, x, y, dx, dy): self.sys_camera.rot_state(x, y)
The standard mouse movement event method. Can be overriden to add new functionality. By default this feeds the system camera with new values. Args: x: The current mouse x position y: The current mouse y position dx: Delta x postion (x position difference from the previous event) dy: Delta y postion (y position difference from the previous event)
juraj-google-style
def get_sid(principal): if principal is None: principal = 'NULL SID' try: sid = salt.utils.win_functions.get_sid_from_name(principal) except CommandExecutionError: sid = principal try: sid = win32security.ConvertStringSidToSid(sid) except pywintypes.error: log.exception('Invalid user/group or sid: %s', principal) raise CommandExecutionError( 'Invalid user/group or sid: {0}'.format(principal)) except TypeError: raise CommandExecutionError return sid
Converts a username to a sid, or verifies a sid. Required for working with the DACL. Args: principal(str): The principal to lookup the sid. Can be a sid or a username. Returns: PySID Object: A sid Usage: .. code-block:: python # Get a user's sid salt.utils.win_dacl.get_sid('jsnuffy') # Verify that the sid is valid salt.utils.win_dacl.get_sid('S-1-5-32-544')
juraj-google-style
def list_physical_devices(self, device_type=None): self._initialize_physical_devices() if device_type is None: return list(self._physical_devices) return [d for d in self._physical_devices if d.device_type == device_type]
List local devices visible to the system. This API allows a client to query the devices before they have been initialized by the eager runtime. Additionally a user can filter by device type, to get only CPUs or GPUs. Args: device_type: Optional device type to limit results to Returns: List of PhysicalDevice objects.
github-repos
def get_year_description(self): def format_year(s): regex = re.compile('^\\d+$') if regex.match(s): year_int = int(s) if (year_int < 1900): return year_int return datetime.date(year_int, 1, 1).strftime('%Y') else: return s return self.get_segment_description(self._expression_parts[6], '', (lambda s: format_year(s)), (lambda s: _(', every {0} years').format(s)), (lambda s: _(', {0} through {1}')), (lambda s: _(', only in {0}')))
Generates a description for only the YEAR portion of the expression Returns: The YEAR description
codesearchnet
def read(path, encoding="utf-8"): try: with io.open(path, encoding=encoding) as f: return f.read() except Exception as e: logger.error("read: %s failed. Error: %s", path, e) return ""
Read the content of the file. Args: path (str): Path to the file encoding (str): File encoding. Default: utf-8 Returns: str: File content or empty string if there was an error
juraj-google-style
def _audio_response_for_run(self, tensor_events, run, tag, sample): response = [] index = 0 filtered_events = self._filter_by_sample(tensor_events, sample) content_type = self._get_mime_type(run, tag) for (index, tensor_event) in enumerate(filtered_events): data = tensor_util.make_ndarray(tensor_event.tensor_proto) label = data[(sample, 1)] response.append({'wall_time': tensor_event.wall_time, 'step': tensor_event.step, 'label': plugin_util.markdown_to_safe_html(label), 'contentType': content_type, 'query': self._query_for_individual_audio(run, tag, sample, index)}) return response
Builds a JSON-serializable object with information about audio. Args: tensor_events: A list of image event_accumulator.TensorEvent objects. run: The name of the run. tag: The name of the tag the audio entries all belong to. sample: The zero-indexed sample of the audio sample for which to retrieve information. For instance, setting `sample` to `2` will fetch information about only the third audio clip of each batch, and steps with fewer than three audio clips will be omitted from the results. Returns: A list of dictionaries containing the wall time, step, URL, width, and height for each audio entry.
codesearchnet
def _cmd(self, command, uid=None): if not uid: uid = self.uid self._client_send(json.dumps({'cmd': command, 'uid': uid})) return self._client_receive()
Send a command to the server. Args: command: str, The name of the command to execute. uid: int, the uid of the session to send the command to. Returns: The line that was written back.
juraj-google-style
def compute_values(edge_compatibility, v): all_edge_values = tf.matmul(tf.to_float(edge_compatibility), v) output = tf.reduce_sum(all_edge_values, axis=1) return output
Compute values. If edge compatibilities is just adjacency, we get ggnn. Args: edge_compatibility: A tensor of shape [batch, num_transforms, length, depth] v: A tensor of shape [batch, num_transforms, length, depth] Returns: output: A [batch, length, depth] tensor
codesearchnet
def configs_for_writer(writer=None, ppp_config_dir=None): search_paths = (ppp_config_dir,) if ppp_config_dir else tuple() if writer is not None: if not isinstance(writer, (list, tuple)): writer = [writer] config_files = [w if w.endswith('.yaml') else w + '.yaml' for w in writer] else: writer_configs = glob_config(os.path.join('writers', '*.yaml'), *search_paths) config_files = set(writer_configs) for config_file in config_files: config_basename = os.path.basename(config_file) writer_configs = config_search_paths( os.path.join("writers", config_basename), *search_paths) if not writer_configs: LOG.warning("No writer configs found for '%s'", writer) continue yield writer_configs
Generator of writer configuration files for one or more writers Args: writer (Optional[str]): Yield configs only for this writer ppp_config_dir (Optional[str]): Additional configuration directory to search for writer configuration files. Returns: Generator of lists of configuration files
juraj-google-style
def _override_helper(clazz_object, operator, func): if operator not in Tensor.OVERLOADABLE_OPERATORS: raise ValueError(f'Overriding {operator} is disallowed. Allowed operators are {Tensor.OVERLOADABLE_OPERATORS}.') setattr(clazz_object, operator, func)
Overrides (string) operator on Tensors to call func. Args: clazz_object: the class to override for; either Tensor or SparseTensor. operator: the string name of the operator to override. func: the function that replaces the overridden operator. Raises: ValueError: If operator is not allowed to be overwritten.
github-repos
def configs_for_reader(reader=None, ppp_config_dir=None): search_paths = (ppp_config_dir,) if ppp_config_dir else tuple() if reader is not None: if not isinstance(reader, (list, tuple)): reader = [reader] new_readers = [] for reader_name in reader: if reader_name.endswith('.yaml') or reader_name not in OLD_READER_NAMES: new_readers.append(reader_name) continue new_name = OLD_READER_NAMES[reader_name] raise ValueError("Reader name '{}' has been deprecated, use '{}' instead.".format(reader_name, new_name)) reader = new_readers config_files = [r if r.endswith('.yaml') else r + '.yaml' for r in reader] else: reader_configs = glob_config(os.path.join('readers', '*.yaml'), *search_paths) config_files = set(reader_configs) for config_file in config_files: config_basename = os.path.basename(config_file) reader_configs = config_search_paths( os.path.join("readers", config_basename), *search_paths) if not reader_configs: raise ValueError("No reader(s) named: {}".format(reader)) yield reader_configs
Generator of reader configuration files for one or more readers Args: reader (Optional[str]): Yield configs only for this reader ppp_config_dir (Optional[str]): Additional configuration directory to search for reader configuration files. Returns: Generator of lists of configuration files
juraj-google-style
def AnalyzeFileObject(self, file_object): tsk_image_object = tsk_image.TSKFileSystemImage(file_object) try: pytsk3.Volume_Info(tsk_image_object) except IOError: return None return self.type_indicator
Retrieves the format specification. Args: file_object (FileIO): file-like object. Returns: str: type indicator if the file-like object contains a supported format or None otherwise.
juraj-google-style
def restrict_bond_dict(self, bond_dict): return {j: bond_dict[j] & set(self.index) for j in self.index}
Restrict a bond dictionary to self. Args: bond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`, to see examples for a bond_dict. Returns: bond dictionary
juraj-google-style
def abs_path(rel_path): return os.path.abspath(os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), rel_path))
Convert a path that is relative to the module from which this function is called, to an absolute path. Args: rel_path: str Path relative to the location of the module file from which this function is called. Returns: str : Absolute path to the location specified by ``rel_path``.
codesearchnet
def get_error_name(error): error_type = type(error) if error_type.__module__ in ['__main__', 'builtins']: return error_type.__name__ else: return f'{error_type.__module__}.{error_type.__name__}'
Return canonical error name as string. For builtin errors like ValueError or Exception, will return the bare name, like ValueError or Exception. For all other exceptions, will return modulename.errorname, such as arbpackage.mod.myerror Args: error: Exception object. Returns: str. Canonical error name.
juraj-google-style
def ReadFile(filename, logger=None): try: encoding = file_resources.FileEncoding(filename) with codecs.open(filename, mode='r', encoding=encoding) as fd: lines = fd.readlines() line_ending = file_resources.LineEnding(lines) source = '\n'.join((line.rstrip('\r\n') for line in lines)) + '\n' return (source, line_ending, encoding) except IOError as e: if logger: logger(e) e.args = (e.args[0], (filename, e.args[1][1], e.args[1][2], e.args[1][3])) raise except UnicodeDecodeError as e: if logger: logger('Could not parse %s! Consider excluding this file with --exclude.', filename) logger(e) e.args = (e.args[0], (filename, e.args[1][1], e.args[1][2], e.args[1][3])) raise
Read the contents of the file. An optional logger can be specified to emit messages to your favorite logging stream. If specified, then no exception is raised. This is external so that it can be used by third-party applications. Arguments: filename: (unicode) The name of the file. logger: (function) A function or lambda that takes a string and emits it. Returns: The contents of filename. Raises: IOError: raised if there was an error reading the file.
github-repos
def gen_schedule(user, num_blocks=6, surrounding_blocks=None): no_signup_today = None schedule = [] if (surrounding_blocks is None): surrounding_blocks = EighthBlock.objects.get_upcoming_blocks(num_blocks) if (len(surrounding_blocks) == 0): return (None, False) signups = EighthSignup.objects.filter(user=user, scheduled_activity__block__in=surrounding_blocks).select_related('scheduled_activity', 'scheduled_activity__block', 'scheduled_activity__activity') block_signup_map = {s.scheduled_activity.block.id: s.scheduled_activity for s in signups} for b in surrounding_blocks: current_sched_act = block_signup_map.get(b.id, None) if current_sched_act: current_signup = current_sched_act.title_with_flags current_signup_cancelled = current_sched_act.cancelled current_signup_sticky = current_sched_act.activity.sticky rooms = current_sched_act.get_scheduled_rooms() else: current_signup = None current_signup_cancelled = False current_signup_sticky = False rooms = None flags = ('locked' if b.locked else 'open') blk_today = b.is_today() if (blk_today and (not current_signup)): flags += ' warning' if current_signup_cancelled: flags += ' cancelled warning' if current_signup_cancelled: current_signup = current_signup.replace(' (Cancelled)', '') info = {'id': b.id, 'block': b, 'block_letter': b.block_letter, 'current_signup': current_signup, 'current_signup_cancelled': current_signup_cancelled, 'current_signup_sticky': current_signup_sticky, 'locked': b.locked, 'date': b.date, 'flags': flags, 'is_today': blk_today, 'signup_time': b.signup_time, 'signup_time_future': b.signup_time_future, 'rooms': rooms} schedule.append(info) if (blk_today and (not current_signup)): no_signup_today = True return (schedule, no_signup_today)
Generate a list of information about a block and a student's current activity signup. Returns: schedule no_signup_today
codesearchnet
def __init__(self, context): self._db_connection_provider = context.db_connection_provider self._multiplexer = context.multiplexer
Instantiates HistogramsPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
juraj-google-style
def dbr(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `dbr`'.format(value)) self._dbr = value
Corresponds to IDD Field `dbr` Daily temperature range for hottest month. [defined as mean of the difference between daily maximum and daily minimum dry-bulb temperatures for hottest month] Args: value (float): value for IDD Field `dbr` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def __init__(self, context, name, task_id): self.name = name self.context = context self.task_id = task_id
Initialize ChainOfTrust. Args: context (scriptworker.context.Context): the scriptworker context name (str): the name of the task (e.g., signing) task_id (str): the task_id of the task
juraj-google-style
def GetStringTypeSummary(obj, available_space, line_length): if len(obj) + len(TWO_DOUBLE_QUOTES) <= available_space: content = obj else: additional_len_needed = len(TWO_DOUBLE_QUOTES) + len(formatting.ELLIPSIS) if available_space < additional_len_needed: available_space = line_length content = formatting.EllipsisTruncate(obj, available_space - len(TWO_DOUBLE_QUOTES), line_length) return formatting.DoubleQuote(content)
Returns a custom summary for string type objects. This function constructs a summary for string type objects by double quoting the string value. The double quoted string value will be potentially truncated with ellipsis depending on whether it has enough space available to show the full string value. Args: obj: The object to generate summary for. available_space: Number of character spaces available. line_length: The full width of the terminal, default is 80. Returns: A summary for the input object.
github-repos
def CredibleInterval(self, percentage=90): prob = ((1 - (percentage / 100.0)) / 2) interval = (self.Value(prob), self.Value((1 - prob))) return interval
Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high
codesearchnet
def add_triple(self, p, o, auto_refresh=True): self.rdf.graph.add((self.uri, p, self._handle_object(o))) self._handle_triple_refresh(auto_refresh)
add triple by providing p,o, assumes s = subject Args: p (rdflib.term.URIRef): predicate o (): object auto_refresh (bool): whether or not to update object-like self.rdf.triples Returns: None: adds triple to self.rdf.graph
codesearchnet
def download_files_maybe_extract(urls, directory, check_files=[]): check_files = [os.path.join(directory, f) for f in check_files] if _check_download(*check_files): return for url in urls: download_file_maybe_extract(url=url, directory=directory) if (not _check_download(*check_files)): raise ValueError('[DOWNLOAD FAILED] `*check_files` not found')
Download the files at ``urls`` to ``directory``. Extract to ``directory`` if tar or zip. Args: urls (str): Url of files. directory (str): Directory to download to. check_files (list of str): Check if these files exist, ensuring the download succeeded. If these files exist before the download, the download is skipped. Raises: ValueError: Error if one of the ``check_files`` are not found following the download.
codesearchnet
def group_by_size(input_tensors, bytes_per_pack): if bytes_per_pack == 0: return [input_tensors] packs = [] last_pack_size = 0 for value in input_tensors: num_elements = value.shape.num_elements() if num_elements is None: logging.warning('not packing values due to the unknown or inconsistent shape of %s', value) return [input_tensors] size = num_elements * value.dtype.size if not packs or last_pack_size > bytes_per_pack: packs.append([]) last_pack_size = 0 packs[-1].append(value) last_pack_size += size return packs
Groups `input_tensors` into chunks of `bytes_per_pack`. The method preserves the original order of `input_tensors`. The grouping is best effort, each pack could have more or less bytes than `bytes_per_pack`. It only groups values with known shape. Args: input_tensors: a list of Tensor. bytes_per_pack: an integer. Returns: A list of packs of Tensor. All values are grouped into one pack if `bytes_per_pack` is zero or any of the value has unknown shape.
github-repos
def convert_aspect_ratios_to_ids(aspect_ratios: List[List[Tuple[int, int]]], max_image_tiles: int) -> np.ndarray: batch_size = len(aspect_ratios) max_num_images = max([len(row) for row in aspect_ratios]) supported_aspect_ratios = get_all_supported_aspect_ratios(max_image_tiles) aspect_ratios_ids = np.zeros((batch_size, max_num_images), dtype=np.int64) for i, sample_aspect_ratios in enumerate(aspect_ratios): for j, (num_tiles_h, num_tiles_w) in enumerate(sample_aspect_ratios): aspect_ratios_ids[i, j] = supported_aspect_ratios.index((num_tiles_h, num_tiles_w)) + 1 return aspect_ratios_ids
Convert aspect ratio tuples to unique ids. For batch padding we use 0, because there might be different number of images in each batch. The aspect ratio ids start from 1, with 1 corresponding to the first supported aspect ratio. Args: aspect_ratios (`List[List[Tuple[int, int]]]`): A list of aspect ratios for each image in the batch. max_image_tiles (`int`): The maximum number of tiles any image can be split into. Returns: `np.ndarray`: The aspect ratios ids as a numpy array with shape (batch_size, max_num_images). Each id corresponds to the index of the aspect ratio in the list of supported aspect ratios, offset by 1 (so 0 can be used for padding).
github-repos
def get_lib(): import tensorflow as tf return _os_path.join(_os_path.dirname(tf.__file__))
Get the directory containing the TensorFlow framework library. Returns: The directory as string.
github-repos
def List(self, device_path): connection = self.protocol_handler.Open(self._handle, destination=b'sync:') listing = self.filesync_handler.List(connection, device_path) connection.Close() return listing
Return a directory listing of the given path. Args: device_path: Directory to list.
juraj-google-style
def forward(self, raw_audio: torch.FloatTensor) -> Tuple[torch.Tensor, torch.Tensor]: input_audio = raw_audio.permute(0, 2, 1).float() latent_states = [] for level in range(self.levels): encoder = self.encoders[level] latent_state = encoder(input_audio) latent_states.append(latent_state[-1]) _, music_tokens, commit_losses, _ = self.bottleneck(latent_states) dequantised_states = [] for level in range(self.levels): decoder = self.decoders[level] dequantised_state = decoder(music_tokens[level:level + 1], all_levels=False) dequantised_states.append(dequantised_state.permute(0, 2, 1)) commit_loss = sum(commit_losses) loss = self.commit * commit_loss return (dequantised_states, loss)
Forward pass of the VQ-VAE, encodes the `raw_audio` to latent states, which are then decoded for each level. The commit loss, which ensure that the encoder's computed embeddings are close to the codebook vectors, is computed. Args: raw_audio (`torch.FloatTensor`): Audio input which will be encoded and decoded. Returns: `Tuple[torch.Tensor, torch.Tensor]` Example: ```python >>> from transformers import JukeboxVQVAE, set_seed >>> import torch >>> model = JukeboxVQVAE.from_pretrained("openai/jukebox-1b-lyrics").eval() >>> set_seed(0) >>> zs = [torch.randint(100, (4, 1))] >>> model.decode(zs).shape torch.Size([4, 8, 1]) ```
github-repos
def make_multi_lagger(lags, groupby_kwargs=None): laggers = [SingleLagger(l, groupby_kwargs=groupby_kwargs) for l in lags] feature_union = FeatureUnion([ (repr(lagger), lagger) for lagger in laggers ]) return feature_union
Return a union of transformers that apply different lags Args: lags (Collection[int]): collection of lags to apply groupby_kwargs (dict): keyword arguments to pd.DataFrame.groupby
juraj-google-style
def profile_df(df): return IPython.core.display.HTML(pandas_profiling.ProfileReport(df).html.replace('bootstrap', 'nonexistent'))
Generate a profile of data in a dataframe. Args: df: the Pandas dataframe.
codesearchnet
def GenerateLibSig(short_name): with _UTILITY_LOCK: utilities_used = ', '.join([utility for utility in sorted(_utility_registry)]) _utility_registry.Clear() if utilities_used: return (' (%s, %s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION, utilities_used)) else: return (' (%s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION))
Generates a library signature suitable for a user agent field. Args: short_name: The short, product-specific string name for the library. Returns: A library signature string to append to user-supplied user-agent value.
codesearchnet
def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult: known_args, pipeline_args = parse_known_args(argv) pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = save_main_session model_handler = VertexAIModelHandlerJSON(endpoint_id=known_args.endpoint, project=known_args.project, location=known_args.location, experiment=known_args.experiment, network=known_args.vpc_network, private=known_args.private) pipeline = test_pipeline if not test_pipeline: pipeline = beam.Pipeline(options=pipeline_options) read_glob = pipeline | 'Get glob' >> beam.Create([known_args.input]) read_image_name = read_glob | 'Get Image Paths' >> fileio.MatchAll() load_image = read_image_name | 'Read Image' >> beam.Map(lambda image_name: read_image(image_name.path)) preprocess = load_image | 'Preprocess Image' >> beam.MapTuple(lambda img_name, img: (img_name, preprocess_image(img))) predictions = preprocess | 'RunInference' >> RunInference(KeyedModelHandler(model_handler)) process_output = predictions | 'Process Predictions' >> beam.ParDo(PostProcessor()) _ = process_output | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True) result = pipeline.run() result.wait_until_finish() return result
Args: argv: Command line arguments defined for this example. save_main_session: Used for internal testing. test_pipeline: Used for internal testing.
github-repos
def _generate_response(self, response: dict, request: dict) -> dict: response_template = deepcopy(self.response_template) response_template['sessionAttributes']['sessionId'] = request['session']['sessionId'] for key, value in response_template.items(): if key not in response.keys(): response[key] = value return response
Populates generated response with additional data conforming Alexa response specification. Args: response: Raw user input extracted from Alexa request. request: Alexa request. Returns: response: Response conforming Alexa response specification.
juraj-google-style
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] question_suffix = [self.question_token_id] + [self.convert_tokens_to_ids('.')] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] if self.padding_side == 'right': return len(cls + token_ids_0 + question_suffix + sep) * [0] + len(token_ids_1 + sep) * [1] else: return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + question_suffix + sep) * [1]
Create the token type IDs corresponding to the sequences passed. [What are token type IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of building those. Args: token_ids_0 (`List[int]`): The first tokenized sequence. token_ids_1 (`List[int]`, *optional*): The second tokenized sequence. Returns: `List[int]`: The token type ids.
github-repos
def read_folder(directory): res = [] for filename in os.listdir(directory): with io.open(os.path.join(directory, filename), encoding="utf-8") as f: content = f.read() res.append(content) return res
read text files in directory and returns them as array Args: directory: where the text files are Returns: Array of text
juraj-google-style
class VptqConfig(QuantizationConfigMixin): def __init__(self, enable_proxy_error: bool=False, config_for_layers: Dict[str, Any]={}, shared_layer_config: Dict[str, Any]={}, modules_to_not_convert: Optional[List]=None, **kwargs): self.quant_method = QuantizationMethod.VPTQ self.enable_proxy_error = enable_proxy_error self.config_for_layers: Dict[str, Any] = config_for_layers self.shared_layer_config: Dict[str, Any] = shared_layer_config self.modules_to_not_convert = modules_to_not_convert self.post_init() def post_init(self): for layer_name, layer_param in self.config_for_layers.items(): VptqLayerConfig(**layer_param) if self.enable_proxy_error is True: raise ValueError('enable_proxy_error should always be False until we support training')
This is a wrapper class about `vptq` parameters. Args: enable_proxy_error (`bool`, *optional*, defaults to `False`): calculate proxy error for each layer config_for_layers (`Dict`, *optional*, defaults to `{}`): quantization params for each layer shared_layer_config (`Dict`, *optional*, defaults to `{}`): shared quantization params among layers modules_to_not_convert (`list`, *optional*, default to `None`): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers). kwargs (`Dict[str, Any]`, *optional*): Additional parameters from which to initialize the configuration object.
github-repos
def set_Tc(self, Tc, T=None): if isinstance(Tc, Iterable): if (len(Tc) == len(T)): x = np.concatenate(([(- ttconf.BIG_NUMBER)], T, [ttconf.BIG_NUMBER])) y = np.concatenate(([Tc[0]], Tc, [Tc[(- 1)]])) self.Tc = interp1d(x, y) else: self.logger('need Tc values and Timepoints of equal length', 2, warn=True) self.Tc = interp1d([(- ttconf.BIG_NUMBER), ttconf.BIG_NUMBER], [1e-05, 1e-05]) else: self.Tc = interp1d([(- ttconf.BIG_NUMBER), ttconf.BIG_NUMBER], [(Tc + ttconf.TINY_NUMBER), (Tc + ttconf.TINY_NUMBER)]) self.calc_integral_merger_rate()
initialize the merger model with a coalescent time Args: - Tc: a float or an iterable, if iterable another argument T of same shape is required - T: an array like of same shape as Tc that specifies the time pivots corresponding to Tc Returns: - None
codesearchnet
def post_transform(self, args): args = args[1:] if args and args[0] == 'az' else args post_transform_commands = [] for i, arg in enumerate(args): if is_alias_command(['create'], args) and i > 0 and args[i - 1] in ['-c', '--command']: post_transform_commands.append(arg) else: post_transform_commands.append(os.path.expandvars(arg)) AliasManager.write_alias_config_hash(self.alias_config_hash) AliasManager.write_collided_alias(self.collided_alias) return post_transform_commands
Inject environment variables, and write hash to alias hash file after transforming alias to commands. Args: args: A list of args to post-transform.
juraj-google-style
def readinto(self, b): if not self._readable: raise UnsupportedOperation('read') with self._seek_lock: seek = self._seek queue = self._read_queue if seek == 0: self._preload_range() size = len(b) if size: b_view = memoryview(b) size_left = size else: b_view = b size_left = -1 b_end = 0 buffer_size = self._buffer_size while size_left > 0 or size_left == -1: start = seek % buffer_size queue_index = seek - start try: buffer = queue[queue_index] except KeyError: break with handle_os_exceptions(): try: queue[queue_index] = buffer = buffer.result() except AttributeError: pass buffer_view = memoryview(buffer) data_size = len(buffer) if not data_size: break if size_left != -1: end = start + size_left else: end = data_size - start if end >= data_size: end = data_size del queue[queue_index] index = queue_index + buffer_size * self._max_buffers if index < self._size: queue[index] = self._workers.submit( self._read_range, index, index + buffer_size) read_size = end - start if size_left != -1: size_left -= read_size seek += read_size b_start = b_end b_end = b_start + read_size b_view[b_start:b_end] = buffer_view[start:end] self._seek = seek self._raw.seek(seek) return b_end
Read bytes into a pre-allocated, writable bytes-like object b, and return the number of bytes read. Args: b (bytes-like object): buffer. Returns: int: number of bytes read
juraj-google-style
def _get_jwt_for_audience(self, audience): (token, expiry) = self._cache.get(audience, (None, None)) if ((token is None) or (expiry < _helpers.utcnow())): (token, expiry) = self._make_jwt_for_audience(audience) self._cache[audience] = (token, expiry) return token
Get a JWT For a given audience. If there is already an existing, non-expired token in the cache for the audience, that token is used. Otherwise, a new token will be created. Args: audience (str): The intended audience. Returns: bytes: The encoded JWT.
codesearchnet
def transform_array_to_list(array): if ((array.dtype.kind in ('u', 'i', 'f')) and (~ np.isfinite(array)).any()): transformed = array.astype('object') transformed[np.isnan(array)] = 'NaN' transformed[np.isposinf(array)] = 'Infinity' transformed[np.isneginf(array)] = '-Infinity' return transformed.tolist() elif ((array.dtype.kind == 'O') and pd and pd.isnull(array).any()): transformed = array.astype('object') transformed[pd.isnull(array)] = 'NaN' return transformed.tolist() return array.tolist()
Transforms a NumPy array into a list of values Args: array (np.nadarray) : the NumPy array series to transform Returns: list or dict
codesearchnet
def interp_color(a, b, f): a_ = (a.redF(), a.greenF(), a.blueF()) b_ = (b.redF(), b.greenF(), b.blueF()) a_ = [(x * (1 - f)) for x in a_] b_ = [(x * f) for x in b_] c = [(x + y) for (x, y) in zip(a_, b_)] return QtGui.QColor.fromRgbF(*c)
Interpolate between two colors. Returns: `QColor` object.
codesearchnet
def return_handler(self, call_node, function_nodes, saved_function_call_index, first_node): if any((isinstance(node, YieldNode) for node in function_nodes)): rhs_prefix = 'yld_' elif any((isinstance(node, ConnectToExitNode) for node in function_nodes)): rhs_prefix = 'ret_' else: return LHS = ((CALL_IDENTIFIER + 'call_') + str(saved_function_call_index)) RHS = (rhs_prefix + get_call_names_as_string(call_node.func)) return_node = RestoreNode(((LHS + ' = ') + RHS), LHS, [RHS], line_number=call_node.lineno, path=self.filenames[(- 1)]) return_node.first_node = first_node self.nodes[(- 1)].connect(return_node) self.nodes.append(return_node)
Handle the return from a function during a function call. Args: call_node(ast.Call) : The node that calls the definition. function_nodes(list[Node]): List of nodes of the function being called. saved_function_call_index(int): Unique number for each call. first_node(EntryOrExitNode or RestoreNode): Used to connect previous statements to this function.
codesearchnet
def add(self, files): if (files.__class__.__name__ == 'str'): self._files.append(files) else: self._files.extend(files)
Adds files to check. Args: files: List of files to check.
codesearchnet
def FromDictionary(cls, msg_dict): level = msg_dict.get('level') msg = msg_dict.get('message') now = msg_dict.get('now_time') created = msg_dict.get('created_time') count = msg_dict.get('count', 1) msg_id = msg_dict.get('id', 0) new_msg = ServiceMessage(level, msg, msg_id, created, now) if (count > 1): new_msg.count = count return new_msg
Create from a dictionary with kv pairs. Args: msg_dict (dict): A dictionary with information as created by to_dict() Returns: ServiceMessage: the converted message
codesearchnet
def check(self): if (not isinstance(self.parsed_yaml, dict)): msg = 'In {0}:\n'.format(self.sourcefile) msg += 'Assistants and snippets must be Yaml mappings, not "{0}"!'.format(self.parsed_yaml) raise exceptions.YamlTypeError(msg) self._check_fullname(self.sourcefile) self._check_description(self.sourcefile) self._check_section_names(self.sourcefile) self._check_project_type(self.sourcefile) self._check_args(self.sourcefile) self._check_files(self.sourcefile) self._check_dependencies(self.sourcefile) self._check_run(self.sourcefile)
Checks whether loaded yaml is well-formed according to syntax defined for version 0.9.0 and later. Raises: YamlError: (containing a meaningful message) when the loaded Yaml is not well formed
codesearchnet
def stack(xs, dim_name, axis=0, name=None): ret = StackOperation(xs, dim_name, axis, name).outputs[0] return ret
Stack multiple Tensors to make a new dimension. Args: xs: a list of Tensors with identical shapes. dim_name: a string (name of the new dimension) axis: an integer (index of the new dimension in the output shape) name: an optional string Returns: a Tensor
juraj-google-style
def _send(self, email_message): pre_send.send(self.__class__, message=email_message) if (not email_message.recipients()): return False from_email = sanitize_address(email_message.from_email, email_message.encoding) recipients = [sanitize_address(addr, email_message.encoding) for addr in email_message.recipients()] message = email_message.message().as_bytes(linesep='\r\n') try: result = self.conn.send_raw_email(Source=from_email, Destinations=recipients, RawMessage={'Data': message}) message_id = result['MessageId'] post_send.send(self.__class__, message=email_message, message_id=message_id) except ClientError: if (not self.fail_silently): raise return False return True
Sends an individual message via the Amazon SES HTTP API. Args: email_message: A single Django EmailMessage object. Returns: True if the EmailMessage was sent successfully, otherwise False. Raises: ClientError: An interaction with the Amazon SES HTTP API failed.
codesearchnet
def __init__(self, location, resource_pool): super(FileSystemPackageRepository, self).__init__(location, resource_pool) global _settings _settings = config.plugins.package_repository.filesystem self.register_resource(FileSystemPackageFamilyResource) self.register_resource(FileSystemPackageResource) self.register_resource(FileSystemVariantResource) self.register_resource(FileSystemCombinedPackageFamilyResource) self.register_resource(FileSystemCombinedPackageResource) self.register_resource(FileSystemCombinedVariantResource) self.get_families = lru_cache(maxsize=None)(self._get_families) self.get_family = lru_cache(maxsize=None)(self._get_family) self.get_packages = lru_cache(maxsize=None)(self._get_packages) self.get_variants = lru_cache(maxsize=None)(self._get_variants) self.get_file = lru_cache(maxsize=None)(self._get_file)
Create a filesystem package repository. Args: location (str): Path containing the package repository.
juraj-google-style
def extraterrestrial_direct_normal_radiation(self, value=9999.0): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `extraterrestrial_direct_normal_radiation`'.format(value)) if value < 0.0: raise ValueError( 'value need to be greater or equal 0.0 ' 'for field `extraterrestrial_direct_normal_radiation`') self._extraterrestrial_direct_normal_radiation = value
Corresponds to IDD Field `extraterrestrial_direct_normal_radiation` Args: value (float): value for IDD Field `extraterrestrial_direct_normal_radiation` Unit: Wh/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def add(self, information, timeout=(- 1)): return self._client.create(information, timeout=timeout)
Adds a data center resource based upon the attributes specified. Args: information: Data center information timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Added data center.
codesearchnet
def SetLines(self, lines): (self._cli_lines, self._cli_cols) = TerminalSize() if lines: self._cli_lines = int(lines)
Set number of screen lines. Args: lines: An int, number of lines. If None, use terminal dimensions. Raises: ValueError, TypeError: Not a valid integer representation.
juraj-google-style
def calc_padding(fmt, align): remain = (struct.calcsize(fmt) % align) if (remain == 0): return '' return ('x' * (align - remain))
Calculate how many padding bytes needed for ``fmt`` to be aligned to ``align``. Args: fmt (str): :mod:`struct` format. align (int): alignment (2, 4, 8, etc.) Returns: str: padding format (e.g., various number of 'x'). >>> calc_padding('b', 2) 'x' >>> calc_padding('b', 3) 'xx'
codesearchnet
def run(self, dag): for node in dag.threeQ_or_more_gates(): rule = node.op.definition if (not rule): raise QiskitError(('Cannot unroll all 3q or more gates. No rule to expand instruction %s.' % node.op.name)) decomposition = DAGCircuit() decomposition.add_qreg(rule[0][1][0][0]) for inst in rule: decomposition.apply_operation_back(*inst) decomposition = self.run(decomposition) dag.substitute_node_with_dag(node, decomposition) return dag
Expand 3+ qubit gates using their decomposition rules. Args: dag(DAGCircuit): input dag Returns: DAGCircuit: output dag with maximum node degrees of 2 Raises: QiskitError: if a 3q+ gate is not decomposable
codesearchnet
def write(self, auth, resource, value, options={}, defer=False): return self._call('write', auth, [resource, value, options], defer)
Writes a single value to the resource specified. Args: auth: cik for authentication. resource: resource to write to. value: value to write options: options.
juraj-google-style
class BatchFeature(BaseBatchFeature):
Holds the output of the image processor specific `__call__` methods. This class is derived from a python dictionary and can be used as a dictionary. Args: data (`dict`): Dictionary of lists/arrays/tensors returned by the __call__ method ('pixel_values', etc.). tensor_type (`Union[None, str, TensorType]`, *optional*): You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization.
github-repos
def _force_edge_active(self, seqs: List[List[GridQubit]], edge: EDGE, sample_bool: Callable[[], bool] ) -> List[List[GridQubit]]: n0, n1 = edge seqs = list(seqs) i0, j0 = index_2d(seqs, n0) i1, j1 = index_2d(seqs, n1) s0 = seqs[i0] s1 = seqs[i1] if i0 != i1: part = [s0[:j0], s0[j0 + 1:]], [s1[:j1], s1[j1 + 1:]] del seqs[max(i0, i1)] del seqs[min(i0, i1)] c0 = 0 if not part[0][1] else 1 if not part[0][ 0] else sample_bool() if c0: part[0][c0].reverse() c1 = 0 if not part[1][1] else 1 if not part[1][ 0] else sample_bool() if not c1: part[1][c1].reverse() seqs.append(part[0][c0] + [n0, n1] + part[1][c1]) other = [1, 0] seqs.append(part[0][other[c0]]) seqs.append(part[1][other[c1]]) else: if j0 > j1: j0, j1 = j1, j0 n0, n1 = n1, n0 head = s0[:j0] inner = s0[j0 + 1:j1] tail = s0[j1 + 1:] del seqs[i0] if sample_bool(): if sample_bool(): seqs.append(inner + [n1, n0] + head[::-1]) seqs.append(tail) else: seqs.append(tail[::-1] + [n1, n0] + inner) seqs.append(head) else: seqs.append(head + [n0, n1] + tail) seqs.append(inner) return [e for e in seqs if e]
Move which forces given edge to appear on some sequence. Args: seqs: List of linear sequences covering chip. edge: Edge to be activated. sample_bool: Callable returning random bool. Returns: New list of linear sequences with given edge on some of the sequences.
juraj-google-style
def make_bubble_surface(dims=DEFAULT_DIMS, repeat=3): gradients = make_gradients(dims) return (np.sin((((gradients[0] - 0.5) * repeat) * np.pi)) * np.sin((((gradients[1] - 0.5) * repeat) * np.pi)))
Makes a surface from the product of sine functions on each axis. Args: dims (pair): the dimensions of the surface to create repeat (int): the frequency of the waves is set to ensure this many repetitions of the function Returns: surface: A surface.
codesearchnet
def starts_with_prefix_in_list(text, prefixes): for prefix in prefixes: if text.startswith(prefix): return True return False
Return True if the given string starts with one of the prefixes in the given list, otherwise return False. Arguments: text (str): Text to check for prefixes. prefixes (list): List of prefixes to check for. Returns: bool: True if the given text starts with any of the given prefixes, otherwise False.
codesearchnet
def wait_for_registration(self, processor_type): with self._condition: self._condition.wait_for(lambda: ( processor_type in self or self._cancelled_event.is_set())) if self._cancelled_event.is_set(): raise WaitCancelledException()
Waits for a particular processor type to register or until is_cancelled is True. is_cancelled cannot be part of this class since we aren't cancelling all waiting for a processor_type, but just this particular wait. Args: processor_type (ProcessorType): The family, and version of the transaction processor. Returns: None
juraj-google-style
def list_group_maintainers(self, name): self.project_service.set_auth(self._token_project) return self.project_service.list_group_maintainers(name)
Get the maintainers of a group. Args: name (string): Name of group to query. Returns: (list[string]): List of maintainer names. Raises: requests.HTTPError on failure.
juraj-google-style
def paginator(limit, offset, record_count, base_uri, page_nav_tpl='&limit={}&offset={}'): total_pages = int(math.ceil(record_count / limit)) next_cond = limit + offset <= record_count prev_cond = offset >= limit next_page = base_uri + page_nav_tpl.format(limit, offset + limit) if next_cond else None prev_page = base_uri + page_nav_tpl.format(limit, offset - limit) if prev_cond else None return OrderedDict([ ('total_count', record_count), ('total_pages', total_pages), ('next_page', next_page), ('prev_page', prev_page) ])
Compute pagination info for collection filtering. Args: limit (int): Collection filter limit. offset (int): Collection filter offset. record_count (int): Collection filter total record count. base_uri (str): Collection filter base uri (without limit, offset) page_nav_tpl (str): Pagination template. Returns: A mapping of pagination info.
juraj-google-style
def list_hierarchy(class_name, bases): class_list = [Uri(class_name)] for base in bases: if (base.__name__ not in IGNORE_CLASSES): class_list.append(Uri(base.__name__)) return list([i for i in set(class_list)])
Creates a list of the class hierarchy Args: ----- class_name: name of the current class bases: list/tuple of bases for the current class
codesearchnet
def annotate(self, gpl, annotation_column, gpl_on='ID', gsm_on='ID_REF', in_place=False): if isinstance(gpl, GPL): annotation_table = gpl.table elif isinstance(gpl, DataFrame): annotation_table = gpl else: raise TypeError('gpl should be a GPL object or a pandas.DataFrame') annotated = self.table.merge(annotation_table[[gpl_on, annotation_column]], left_on=gsm_on, right_on=gpl_on) del annotated[gpl_on] if in_place: self.table = annotated return None else: return annotated
Annotate GSM with provided GPL Args: gpl (:obj:`pandas.DataFrame`): A Platform or DataFrame to annotate with annotation_column (str`): Column in a table for annotation gpl_on (:obj:`str`): Use this column in GSM to merge. Defaults to "ID". gsm_on (:obj:`str`): Use this column in GPL to merge. Defaults to "ID_REF". in_place (:obj:`bool`): Substitute table in GSM by new annotated table. Defaults to False. Returns: :obj:`pandas.DataFrame` or :obj:`None`: Annotated table or None Raises: TypeError: GPL should be GPL or pandas.DataFrame
codesearchnet
def getall(self, key, default=[]): return self.data[key] if key in self.data else default
Return the list of all values for the specified key. Arguments: key (object): Key default (list): Default value to return if the key does not exist, defaults to ``[]``, i.e. an empty list. Returns: list: List of all values for the specified key if the key exists, ``default`` otherwise.
juraj-google-style
def __init__(self, vendor=None, body=b''): super().__init__() self.vendor = vendor self.body = body
Create instance attributes. Args: vendor (int): 32-bit vendor ID. body (bytes): Vendor-defined body
juraj-google-style
def __init__(self, paginator, number, items): self.paginator = paginator self.number = number self.number0 = number - 1 self.items = items self.count = len(items) self.total_items = paginator.total_items self.items_per_page = paginator.items_per_page self.total_pages = paginator.total_pages self.page_range = paginator.page_range self.start_page = paginator.start_page self.last_page = paginator.last_page
Constructor. Args: paginator: The parent paginator object. number: The number of this page (starting from 1). items: A list of items to belong to this page.
juraj-google-style
def authorize(self, scheme, **params): if (scheme not in self.schemes): return False for (field, value) in iteritems(params): setattr(self, field, value) if ((field in self.schemes[scheme][u'params'].keys()) and value): self.schemes[scheme][u'params'][field] = value return True
Store credentials required to satisfy a given auth scheme. Args: scheme (str): The name of the Authentication scheme. **params: parameters for the specified scheme. Returns: True if parameters are set successfully (note that this doesn't mean the credentials are valid) False if the scheme specified is not supported
codesearchnet
def add_site(self, site): start_angle = 0 radius = 0 total_occu = 0 for specie, occu in site.species.items(): radius += occu * (specie.ionic_radius if isinstance(specie, Specie) and specie.ionic_radius else specie.average_ionic_radius) total_occu += occu vis_radius = 0.2 + 0.002 * radius for specie, occu in site.species.items(): if not specie: color = (1, 1, 1) elif specie.symbol in self.el_color_mapping: color = [i / 255 for i in self.el_color_mapping[specie.symbol]] mapper = self.add_partial_sphere(site.coords, vis_radius, color, start_angle, start_angle + 360 * occu) self.mapper_map[mapper] = [site] start_angle += 360 * occu if total_occu < 1: mapper = self.add_partial_sphere(site.coords, vis_radius, (1,1,1), start_angle, start_angle + 360 * (1 - total_occu)) self.mapper_map[mapper] = [site]
Add a site to the render window. The site is displayed as a sphere, the color of which is determined based on the element. Partially occupied sites are displayed as a single element color, though the site info still shows the partial occupancy. Args: site: Site to add.
juraj-google-style
def info_gen(self, code, message, compressed=False): if ('COMPRESS=GZIP' in message): return self.__info_gzip_gen() if compressed: return self.__info_yenczlib_gen() return self.__info_plain_gen()
Dispatcher for the info generators. Determines which __info_*_gen() should be used based on the supplied parameters. Args: code: The status code for the command response. message: The status message for the command reponse. compressed: Force decompression. Useful for xz* commands. Returns: An info generator.
codesearchnet
def fsync(self, file_des): if 0 <= file_des < NR_STD_STREAMS: self.filesystem.raise_os_error(errno.EINVAL) file_object = self.filesystem.get_open_file(file_des) if self.filesystem.is_windows_fs: if (not hasattr(file_object, 'allow_update') or not file_object.allow_update): self.filesystem.raise_os_error( errno.EBADF, file_object.file_path)
Perform fsync for a fake file (in other words, do nothing). Args: file_des: The file descriptor of the open file. Raises: OSError: file_des is an invalid file descriptor. TypeError: file_des is not an integer.
juraj-google-style
def __call__(self, user_lo_config): lo_config = {} q_los = self.get_qubit_los(user_lo_config) if q_los: lo_config['qubit_lo_freq'] = q_los m_los = self.get_meas_los(user_lo_config) if m_los: lo_config['meas_lo_freq'] = m_los return self.qobj_model(**lo_config)
Return PulseQobjExperimentConfig Args: user_lo_config (LoConfig): A dictionary of LOs to format. Returns: PulseQobjExperimentConfig: qobj.
juraj-google-style
def tersoff_potential(self, structure): bv = BVAnalyzer() el = [site.specie.symbol for site in structure] valences = bv.get_valences(structure) el_val_dict = dict(zip(el, valences)) gin = 'species \n' qerfstring = 'qerfc\n' for key in el_val_dict.keys(): if ((key != 'O') and ((el_val_dict[key] % 1) != 0)): raise SystemError('Oxide has mixed valence on metal') specie_string = (((key + ' core ') + str(el_val_dict[key])) + '\n') gin += specie_string qerfstring += (((key + ' ') + key) + ' 0.6000 10.0000 \n') gin += ' met_oxi_ters = TersoffPotential().data for key in el_val_dict.keys(): if (key != 'O'): metal = (((key + '(') + str(int(el_val_dict[key]))) + ')') ters_pot_str = met_oxi_ters[metal] gin += ters_pot_str gin += qerfstring return gin
Generate the species, tersoff potential lines for an oxide structure Args: structure: pymatgen.core.structure.Structure
codesearchnet
def __init__( self, initial_site ): Atom.atom_number += 1 self.number = Atom.atom_number self._site = initial_site if self._site.occupation == 0: self._site.occupation = self.number self._site.is_occupied = True self._site.atom = self else: raise ValueError( "This site is already occupied by atom {}".format( initial_site.occupation ) ) self.reset()
Initialise an Atom instance. Args: initial_site (Site): Lattice site initially occupied by this Atom. Returns: None
juraj-google-style
def ed25519_public_key_to_string(key): return base64.b64encode(key.public_bytes(encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw), None).decode('utf-8')
Convert an ed25519 public key to a base64-encoded string. Args: key (Ed25519PublicKey): the key to write to the file. Returns: str: the key representation as a str
codesearchnet
def split(self, path): return os.path.split(os.path.abspath(path))
Splits the given path into two parts. Splits the path into a pair (head, tail) such that tail contains the last component of the path and head contains everything up to that. Args: path: path as a string Returns: a pair of path components as strings.
github-repos
def raise_on_errors(errors, level=logging.CRITICAL): if errors: log.log(level, '\n'.join(errors)) raise CoTError('\n'.join(errors))
Raise a CoTError if errors. Helper function because I had this code block everywhere. Args: errors (list): the error errors level (int, optional): the log level to use. Defaults to logging.CRITICAL Raises: CoTError: if errors is non-empty
codesearchnet
def post_slack_message(message=None, channel=None, username=None, icon_emoji=None): LOG.debug('Slack Channel: %s\nSlack Message: %s', channel, message) slack = slacker.Slacker(SLACK_TOKEN) try: slack.chat.post_message(channel=channel, text=message, username=username, icon_emoji=icon_emoji) LOG.info('Message posted to %s', channel) except slacker.Error: LOG.info('error posted message to %s', channel)
Format the message and post to the appropriate slack channel. Args: message (str): Message to post to slack channel (str): Desired channel. Must start with #
codesearchnet
def ParseFileObject(self, parser_mediator, file_object): filename = parser_mediator.GetFilename() file_size = file_object.get_size() if (file_size <= 0): raise errors.UnableToParseFile('File size: {0:d} bytes is less equal 0.'.format(file_size)) if (file_size > 50000000): raise errors.UnableToParseFile('File size: {0:d} bytes is larger than 50 MB.'.format(file_size)) top_level_object = self.GetTopLevel(file_object) if (not top_level_object): raise errors.UnableToParseFile('Unable to parse: {0:s} skipping.'.format(filename)) matching_plugin = None for plugin in self._plugins: try: plugin.UpdateChainAndProcess(parser_mediator, plist_name=filename, top_level=top_level_object) matching_plugin = plugin except errors.WrongPlistPlugin as exception: logger.debug('Wrong plugin: {0:s} for: {1:s}'.format(exception.args[0], exception.args[1])) if ((not matching_plugin) and self._default_plugin): self._default_plugin.UpdateChainAndProcess(parser_mediator, plist_name=filename, top_level=top_level_object)
Parses a plist file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
codesearchnet
def pnl_upsert(self, asset_manager_id, pnls): self.logger.info('Upsert PnL for - Asset Manager: %s', asset_manager_id) pnls = [pnls] if not isinstance(pnls, list) else pnls json_pnls = [pnl.to_interface() for pnl in pnls] url = '%s/pnls/%s' % (self.endpoint, asset_manager_id) response = self.session.put(url, json=json_pnls) if response.ok: results = [] for pnl_result in response.json(): results.append(json_to_pnl(pnl_result)) self.logger.info('Upserted %s PnL records', len(results)) return results else: self.logger.error(response.text) response.raise_for_status()
Upsert a list of pnls. Note: this performs a full update of existing records with matching keys, so the passed in pnl objects should be complete. Args: asset_manager_id (int): the id of the asset manager owning the pnl pnls (list): list of pnl objects to upsert
juraj-google-style
def HasDataStream(self, name, case_sensitive=True): if (not isinstance(name, py2to3.STRING_TYPES)): raise ValueError('Name is not a string.') name_lower = name.lower() for data_stream in self._GetDataStreams(): if (data_stream.name == name): return True if ((not case_sensitive) and (data_stream.name.lower() == name_lower)): return True return False
Determines if the file entry has specific data stream. Args: name (str): name of the data stream. case_sensitive (Optional[bool]): True if the name is case sensitive. Returns: bool: True if the file entry has the data stream. Raises: ValueError: if the name is not string.
codesearchnet
def get_firmware(self): uri = '{}/firmware'.format(self.data['uri']) return self._helper.do_get(uri)
Get the firmware inventory of a server. Note: This method is available for API version 300 or later. Returns: dict: Server Hardware firmware.
codesearchnet
def stop(name, file=sys.stderr): if is_enabled(): elapsed = (time() - __TIMERS[name]) if (elapsed > 60): elapsed_str = '{:.1f} m'.format((elapsed / 60)) elif (elapsed > 1): elapsed_str = '{:.1f} s'.format(elapsed) else: elapsed_str = '{:.1f} ms'.format((elapsed * 1000)) del __TIMERS[name] print('[prof]', name, elapsed_str, file=file) return is_enabled()
Stop a profiling timer. Arguments: name (str): The name of the timer to stop. If no name is given, stop the global anonymous timer. Returns: bool: Whether or not profiling is enabled. Raises: KeyError: If the named timer does not exist.
codesearchnet
def _ReadAttributeValueDateTime(self, attribute_values_data, record_offset, attribute_values_data_offset, attribute_value_offset): if (attribute_value_offset == 0): return None data_type_map = self._GetDataTypeMap('keychain_date_time') file_offset = ((record_offset + attribute_values_data_offset) + attribute_value_offset) attribute_value_offset -= (attribute_values_data_offset + 1) attribute_value_data = attribute_values_data[attribute_value_offset:] try: date_time_attribute_value = self._ReadStructureFromByteStream(attribute_value_data, file_offset, data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError('Unable to map date time attribute value data at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception)) return date_time_attribute_value.date_time.rstrip('\x00')
Reads a date time attribute value. Args: attribute_values_data (bytes): attribute values data. record_offset (int): offset of the record relative to the start of the file. attribute_values_data_offset (int): offset of the attribute values data relative to the start of the record. attribute_value_offset (int): offset of the attribute relative to the start of the record. Returns: str: date and time values. Raises: ParseError: if the attribute value cannot be read.
codesearchnet
def get_current_state(self, clearConfig: bool = False): json_state = self.download_configuration() if "errorCode" in json_state: LOGGER.error( "Could not get the current configuration. Error: %s", json_state["errorCode"], ) return False if clearConfig: self.devices = [] self.clients = [] self.groups = [] self.rules = [] self.functionalHomes = [] js_home = json_state["home"] self.from_json(js_home) self._get_devices(json_state) self._get_clients(json_state) self._get_groups(json_state) self._get_functionalHomes(js_home) self._load_functionalChannels() return True
downloads the current configuration and parses it into self Args: clearConfig(bool): if set to true, this function will remove all old objects from self.devices, self.client, ... to have a fresh config instead of reparsing them
juraj-google-style
def GetBalance(self, wallet, address, as_string=False): addr = PromptUtils.parse_param(address, wallet) if isinstance(addr, UInt160): addr = addr.Data sb = ScriptBuilder() sb.EmitAppCallWithOperationAndArgs(self.ScriptHash, 'balanceOf', [addr]) tx, fee, results, num_ops, engine_success = test_invoke(sb.ToArray(), wallet, []) if engine_success: try: val = results[0].GetBigInteger() precision_divisor = pow(10, self.decimals) balance = Decimal(val) / Decimal(precision_divisor) if as_string: formatter_str = '.%sf' % self.decimals balance_str = format(balance, formatter_str) return balance_str return balance except Exception as e: logger.error("could not get balance: %s " % e) traceback.print_stack() else: addr_str = Crypto.ToAddress(UInt160(data=addr)) logger.error( f"Could not get balance of address {addr_str} for token contract {self.ScriptHash}. VM execution failed. Make sure the contract exists on the network and that it adheres to the NEP-5 standard") return 0
Get the token balance. Args: wallet (neo.Wallets.Wallet): a wallet instance. address (str): public address of the account to get the token balance of. as_string (bool): whether the return value should be a string. Default is False, returning an integer. Returns: int/str: token balance value as int (default), token balanace as string if `as_string` is set to True. 0 if balance retrieval failed.
juraj-google-style
def SetStorageWriter(self, storage_writer): self._storage_writer = storage_writer self._last_event_data_hash = None self._last_event_data_identifier = None
Sets the storage writer. Args: storage_writer (StorageWriter): storage writer.
codesearchnet
def read_reply(self) -> Reply: _logger.debug('Read reply') reply = Reply() while True: line = (yield from self._connection.readline()) if (line[(- 1):] != b'\n'): raise NetworkError('Connection closed.') self._data_event_dispatcher.notify_read(line) reply.parse(line) if (reply.code is not None): break return reply
Read a reply from the stream. Returns: .ftp.request.Reply: The reply Coroutine.
codesearchnet
def ChunkedAttentionSelector(x, params, selector=None, **kwargs): del params, kwargs selector = (selector or (lambda x: ([] if (x < 1) else [(x - 1)]))) (triples, masks) = zip(*x) (queries, keys, values) = zip(*triples) result = [] for i in range(len(x)): selected = selector(i) new_key_list = [keys[j] for j in selected] new_key = np.concatenate((new_key_list + [keys[i]]), axis=1) new_value = np.concatenate(([values[j] for j in selected] + [values[i]]), axis=1) new_mask_shapes = [(1, queries[i].shape[1], key.shape[1]) for key in new_key_list] cur_mask = masks[i] new_mask_list = [np.ones(s, dtype=cur_mask.dtype) for s in new_mask_shapes] new_mask = np.concatenate((new_mask_list + [cur_mask]), axis=2) result.append(((queries[i], new_key, new_value), new_mask)) return tuple(result)
Select which chunks to attend to in chunked attention. Args: x: inputs, a list of elements of the form (q, k, v), mask for each chunk. params: parameters (unused). selector: a function from chunk_number -> list of chunk numbers that says which other chunks should be appended to the given one (previous if None). **kwargs: unused other arguments. Returns: a list of elements of the form (q, k', v'), mask' where k', v' and mask' are concatenations of k, v and identity-extended masks from selected chunks.
codesearchnet
def query_parameter(binding_key): pbk = ParsedBindingKey(binding_key) if (pbk.config_key not in _CONFIG): err_str = "Configurable '{}' has no bound parameters." raise ValueError(err_str.format(pbk.given_selector)) if (pbk.arg_name not in _CONFIG[pbk.config_key]): err_str = "Configurable '{}' has no value bound for parameter '{}'." raise ValueError(err_str.format(pbk.given_selector, pbk.arg_name)) return _CONFIG[pbk.config_key][pbk.arg_name]
Returns the currently bound value to the specified `binding_key`. The `binding_key` argument should look like 'maybe/some/scope/maybe.moduels.configurable_name.parameter_name'. Note that this will not include default parameters. Args: binding_key: The parameter whose value should be set. Returns: The value bound to the configurable/parameter combination given in `binding_key`. Raises: ValueError: If no function can be found matching the configurable name specified by `biding_key`, or if the specified parameter name is blacklisted or not in the function's whitelist (if present) or if there is no value bound for the queried parameter or configurable.
codesearchnet
def char_matches(s1, s2, n=3): return __matches(s1, s2, char_ngrams, n=n)
Character-level n-grams that match between two strings Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: the n-grams found in both strings
codesearchnet
def registration_backend(backend=None, namespace=None): backend = backend or ORGS_REGISTRATION_BACKEND class_module, class_name = backend.rsplit(".", 1) mod = import_module(class_module) return getattr(mod, class_name)(namespace=namespace)
Returns a specified registration backend Args: backend: dotted path to the registration backend class namespace: URL namespace to use Returns: an instance of an RegistrationBackend
juraj-google-style
def build(self): self._import_submodules() module_text_map = {} footer_text_map = {} for dest_module, dest_name_to_imports in self._module_imports.items(): imports_list = [get_canonical_import(imports) for _, imports in dest_name_to_imports.items()] if self._lazy_loading: module_text_map[dest_module] = _LAZY_LOADING_MODULE_TEXT_TEMPLATE % '\n'.join(sorted(imports_list)) else: module_text_map[dest_module] = '\n'.join(sorted(imports_list)) root_module_footer = '' if not self._lazy_loading: underscore_names_str = ', '.join(("'%s'" % name for name in sorted(self._underscore_names_in_root))) root_module_footer = "\n_names_with_underscore = [%s]\n__all__ = [_s for _s in dir() if not _s.startswith('_')]\n__all__.extend([_s for _s in _names_with_underscore])\n" % underscore_names_str if self._api_version == 1 or self._lazy_loading: for dest_module, _ in self._module_imports.items(): deprecation = 'False' has_lite = 'False' if self._api_version == 1: if not dest_module.startswith(_COMPAT_MODULE_PREFIX): deprecation = 'True' if not dest_module and 'lite' in self._module_imports and self._lazy_loading: has_lite = 'True' if self._lazy_loading: public_apis_name = '_PUBLIC_APIS' else: public_apis_name = 'None' footer_text_map[dest_module] = _DEPRECATION_FOOTER % (dest_module, public_apis_name, deprecation, has_lite) return (module_text_map, footer_text_map, root_module_footer)
Get a map from destination module to __init__.py code for that module. Returns: A dictionary where key: (string) destination module (for e.g. tf or tf.consts). value: (string) text that should be in __init__.py files for corresponding modules.
github-repos
def builtin_timescale(cls, name): names = {'isc': TIMESCALE__ISC, 'usgs_isc': TIMESCALE__USGS_ISC, 'dnag': TIMESCALE__DNAG} return cls.from_csv(text=names[name.lower()])
Generate a default timescale legend. No arguments. Returns: Legend: The timescale stored in `defaults.py`.
codesearchnet