code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get(self, personId): check_type(personId, basestring, may_be_none=False) json_data = self._session.get(((API_ENDPOINT + '/') + personId)) return self._object_factory(OBJECT_TYPE, json_data)
Get a person's details, by ID. Args: personId(basestring): The ID of the person to be retrieved. Returns: Person: A Person object with the details of the requested person. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
codesearchnet
def reformat_python_docstrings(top_dirs: List[str], correct_copyright_lines: List[str], show_only: bool=True, rewrite: bool=False, process_only_filenum: int=None) -> None: filenum = 0 for top_dir in top_dirs: for (dirpath, dirnames, filenames) in walk(top_dir): for filename in filenames: fullname = join(dirpath, filename) extension = splitext(filename)[1] if (extension != PYTHON_EXTENSION): continue filenum += 1 if (process_only_filenum and (filenum != process_only_filenum)): continue log.info('Processing file {}: {}', filenum, fullname) proc = PythonProcessor(full_path=fullname, top_dir=top_dir, correct_copyright_lines=correct_copyright_lines) if show_only: proc.show() elif rewrite: proc.rewrite_file()
Walk a directory, finding Python files and rewriting them. Args: top_dirs: list of directories to descend into correct_copyright_lines: list of lines (without newlines) representing the copyright docstring block, including the transition lines of equals symbols show_only: show results (to stdout) only; don't rewrite rewrite: write the changes process_only_filenum: only process this file number (1-based index); for debugging only
codesearchnet
def chained(self, text=None, fore=None, back=None, style=None): self.data = ''.join(( self.data, self.color(text=text, fore=fore, back=back, style=style), )) return self
Called by the various 'color' methods to colorize a single string. The RESET_ALL code is appended to the string unless text is empty. Raises ValueError on invalid color names. Arguments: text : String to colorize, or None for BG/Style change. fore : Name of fore color to use. back : Name of back color to use. style : Name of style to use.
juraj-google-style
def get_nltk_builder(languages): all_stemmers = [] all_stopwords_filters = [] all_word_characters = set() for language in languages: if language == "en": all_stemmers.append(lunr.stemmer.stemmer) all_stopwords_filters.append(stop_word_filter) all_word_characters.update({r"\w"}) else: stopwords, word_characters = _get_stopwords_and_word_characters(language) all_stemmers.append( Pipeline.registered_functions["stemmer-{}".format(language)] ) all_stopwords_filters.append( generate_stop_word_filter(stopwords, language=language) ) all_word_characters.update(word_characters) builder = Builder() multi_trimmer = generate_trimmer("".join(sorted(all_word_characters))) Pipeline.register_function( multi_trimmer, "lunr-multi-trimmer-{}".format("-".join(languages)) ) builder.pipeline.reset() for fn in chain([multi_trimmer], all_stopwords_filters, all_stemmers): builder.pipeline.add(fn) for fn in all_stemmers: builder.search_pipeline.add(fn) return builder
Returns a builder with stemmers for all languages added to it. Args: languages (list): A list of supported languages.
juraj-google-style
def getModPath(self, *paths): dirn = self.getModDir() return s_common.genpath(dirn, *paths)
Construct a path relative to this module's working directory. Args: *paths: A list of path strings Notes: This creates the module specific directory if it does not exist. Returns: (str): The full path (or None if no cortex dir is configured).
juraj-google-style
def _randomize_direction(base_heading, sigma) -> int: val = MissionWeather._gauss(base_heading, sigma) val = MissionWeather._normalize_direction(val) return val
Creates a variation in direction Args: base_heading: base direction sigma: sigma value for gaussian variation Returns: random direction
codesearchnet
def get_slot(self, *args, **kwargs): return self._opt.get_slot(*args, **kwargs)
Return a slot named "name" created for "var" by the Optimizer. This simply wraps the get_slot() from the actual optimizer. Args: *args: Arguments for get_slot(). **kwargs: Keyword arguments for get_slot(). Returns: The `Variable` for the slot if it was created, `None` otherwise.
github-repos
def verify(self, obj): if obj != self._literal: raise ValidationError("Object is not equal to literal", reason='%s is not equal to %s' % (str(obj), str(self._literal)), object=obj) return obj
Verify that the object conforms to this verifier's schema Args: obj (object): A python object to verify Raises: ValidationError: If there is a problem verifying the dictionary, a ValidationError is thrown with at least the reason key set indicating the reason for the lack of validation.
juraj-google-style
def model_from_json(json_string, custom_objects=None): from keras.src.saving import serialization_lib model_config = json.loads(json_string) return serialization_lib.deserialize_keras_object(model_config, custom_objects=custom_objects)
Parses a JSON model configuration string and returns a model instance. Example: >>> model = keras.Sequential([ ... keras.layers.Dense(5, input_shape=(3,)), ... keras.layers.Softmax()]) >>> config = model.to_json() >>> loaded_model = keras.models.model_from_json(config) Args: json_string: JSON string encoding a model configuration. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled).
github-repos
def dump_orm_object_as_insert_sql(engine: Engine, obj: object, fileobj: TextIO) -> None: insp = inspect(obj) meta = MetaData(bind=engine) table_name = insp.mapper.mapped_table.name table = Table(table_name, meta, autoload=True) query = select(table.columns) for orm_pkcol in insp.mapper.primary_key: core_pkcol = table.columns.get(orm_pkcol.name) pkval = getattr(obj, orm_pkcol.name) query = query.where((core_pkcol == pkval)) cursor = engine.execute(query) row = cursor.fetchone() row_dict = dict(row) statement = table.insert(values=row_dict) insert_str = get_literal_query(statement, bind=engine) writeline_nl(fileobj, insert_str)
Takes a SQLAlchemy ORM object, and writes ``INSERT`` SQL to replicate it to the output file-like object. Args: engine: SQLAlchemy :class:`Engine` obj: SQLAlchemy ORM object to write fileobj: file-like object to write to
codesearchnet
def from_voigt(cls, voigt_input): voigt_input = np.array(voigt_input) rank = (sum(voigt_input.shape) t = cls(np.zeros(([3] * rank))) if (voigt_input.shape != t._vscale.shape): raise ValueError('Invalid shape for voigt matrix') voigt_input = (voigt_input / t._vscale) this_voigt_map = t.get_voigt_dict(rank) for ind in this_voigt_map: t[ind] = voigt_input[this_voigt_map[ind]] return cls(t)
Constructor based on the voigt notation vector or matrix. Args: voigt_input (array-like): voigt input for a given tensor
codesearchnet
def _client_send(self, msg): try: self._client.write(msg.encode("utf8") + b'\n') self._client.flush() self.log.debug('Snippet sent %s.', msg) except socket.error as e: raise Error( self._ad, 'Encountered socket error "%s" sending RPC message "%s"' % (e, msg))
Sends an Rpc message through the connection. Args: msg: string, the message to send. Raises: Error: a socket error occurred during the send.
juraj-google-style
def FindCoinsByVins(self, vins): ret = [] for coin in self.GetCoins(): coinref = coin.Reference for vin in vins: if ((coinref.PrevIndex == vin.PrevIndex) and (coinref.PrevHash == vin.PrevHash)): ret.append(coin) return ret
Looks through the current collection of coins in a wallet and chooses coins that match the specified CoinReference objects. Args: vins: A list of ``neo.Core.CoinReference`` objects. Returns: list: A list of ``neo.Wallet.Coin`` objects.
codesearchnet
def is_match(self, subject: Union[(Expression, FlatTerm)]) -> bool: try: next(self.match(subject)) except StopIteration: return False return True
Check if the given subject matches any pattern in the net. Args: subject: The subject that is matched. Must be constant. Returns: True, if any pattern matches the subject.
codesearchnet
def get_cosine_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float=0.5, last_epoch: int=-1): lr_lambda = partial(_get_cosine_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles) return LambdaLR(optimizer, lr_lambda, last_epoch)
Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. num_cycles (`float`, *optional*, defaults to 0.5): The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
github-repos
def json_compat_obj_encode(data_type, obj, caller_permissions=None, alias_validators=None, old_style=False, for_msgpack=False, should_redact=False): serializer = StoneToPythonPrimitiveSerializer( caller_permissions, alias_validators, for_msgpack, old_style, should_redact) return serializer.encode(data_type, obj)
Encodes an object into a JSON-compatible dict based on its type. Args: data_type (Validator): Validator for obj. obj (object): Object to be serialized. caller_permissions (list): The list of raw-string caller permissions with which to serialize. Returns: An object that when passed to json.dumps() will produce a string giving the JSON-encoded object. See json_encode() for additional information about validation.
juraj-google-style
def Run(self, request, global_params=None): config = self.GetMethodConfig('Run') return self._RunMethod(config, request, global_params=global_params)
Runs a `BuildTrigger` at a particular source revision. Args: request: (CloudbuildProjectsLocationsTriggersRunRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Operation) The response message.
github-repos
def call(self, batch_size: Optional[int], input_points: Optional[Tuple[tf.Tensor, tf.Tensor]], input_labels: tf.Tensor | None, input_boxes: tf.Tensor | None, input_masks: tf.Tensor | None) -> Tuple[tf.Tensor, tf.Tensor]: sparse_embeddings = None if input_points is not None: batch_size, point_batch_size = shape_list(input_points)[:2] if input_labels is None: raise ValueError('If points are provided, labels must also be provided.') point_embeddings = self._embed_points(input_points, input_labels, pad=input_boxes is None) sparse_embeddings = tf.zeros((batch_size, point_batch_size, 0, self.hidden_size), dtype=point_embeddings.dtype) sparse_embeddings = tf.concat([sparse_embeddings, point_embeddings], axis=2) if input_boxes is not None: batch_size = shape_list(input_boxes)[0] box_embeddings = self._embed_boxes(input_boxes) if sparse_embeddings is None: sparse_embeddings = box_embeddings else: sparse_embeddings = tf.concat([sparse_embeddings, box_embeddings], axis=2) if input_masks is not None: dense_embeddings = self.mask_embed(input_masks) else: dense_embeddings = self.no_mask_embed[0] dense_embeddings = tf.reshape(dense_embeddings, (1, -1, 1, 1)) dense_embeddings = tf.tile(dense_embeddings, (batch_size, 1, self.image_embedding_size[0], self.image_embedding_size[1])) if sparse_embeddings is None: sparse_embeddings = tf.zeros((batch_size, 0, 1, self.hidden_size), dtype=dense_embeddings.dtype) return (sparse_embeddings, dense_embeddings)
Embeds different types of prompts, returning both sparse and dense embeddings. Args: points (`tf.Tensor`, *optional*): point coordinates and labels to embed. boxes (`tf.Tensor`, *optional*): boxes to embed masks (`tf.Tensor`, *optional*): masks to embed
github-repos
def get(self, container_id): resp = self.client.api.inspect_container(container_id) return self.prepare_model(resp)
Get a container by name or ID. Args: container_id (str): Container name or ID. Returns: A :py:class:`Container` object. Raises: :py:class:`docker.errors.NotFound` If the container does not exist. :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def padFrameRange(frange, zfill): def _do_pad(match): result = list(match.groups()) result[1] = pad(result[1], zfill) if result[4]: result[4] = pad(result[4], zfill) return ''.join((i for i in result if i)) return PAD_RE.sub(_do_pad, frange)
Return the zero-padded version of the frame range string. Args: frange (str): a frame range to test zfill (int): Returns: str:
juraj-google-style
def _FormatForCommand(token): if not isinstance(token, str): token = str(token) if token.startswith('_'): return token return token.replace('_', '-')
Replaces underscores with hyphens, unless the token starts with a token. This is because we typically prefer hyphens to underscores at the command line, but we reserve hyphens at the start of a token for flags. This becomes relevant when --verbose is activated, so that things like __str__ don't get transformed into --str--, which would get confused for a flag. Args: token: The token to transform. Returns: The transformed token.
github-repos
def request(self, send_terminator = False): try: retA = self.requestA() retB = self.requestB() if retA and retB: self.makeAB() self.calculateFields() self.updateObservers() return True except: ekm_log(traceback.format_exc(sys.exc_info())) return False
Combined A and B read for V4 meter. Args: send_terminator (bool): Send termination string at end of read. Returns: bool: True on completion.
juraj-google-style
def file_modify(filename, settings): for (k, v) in settings.items(): if (k == 'mode'): os.chmod(filename, v) if (k == 'owners'): os.chown(filename, v)
Modifies file access Args: filename (str): Filename. settings (dict): Can be "mode" or "owners"
codesearchnet
def Mint(self, wallet, mint_to_addr, attachment_args, invoke_attrs=None): invoke_args = [self.ScriptHash.ToString(), 'mintTokens', []] invoke_args = (invoke_args + attachment_args) (tx, fee, results, num_ops, engine_success) = TestInvokeContract(wallet, invoke_args, None, True, from_addr=mint_to_addr, invoke_attrs=invoke_attrs) return (tx, fee, results)
Call the "mintTokens" function of the smart contract. Args: wallet (neo.Wallets.Wallet): a wallet instance. mint_to_addr (str): public address of the account to mint the tokens to. attachment_args: (list): a list of arguments used to attach neo and/or gas to an invoke, eg ['--attach-gas=10.0','--attach-neo=3'] invoke_attrs: (list): a list of TransactionAttributes to be attached to the mint transaction Returns: tuple: InvocationTransaction: the transaction. int: the transaction fee. list: the neo VM evaluation stack results.
codesearchnet
def raster_dilation(rasterfile): if is_string(rasterfile): origin_raster = RasterUtilClass.read_raster(str(rasterfile)) elif isinstance(rasterfile, Raster): origin_raster = rasterfile.data elif isinstance(rasterfile, numpy.ndarray): origin_raster = rasterfile else: return 'Your rasterfile has a wrong type. Type must be string or numpy.array or class Raster in pygeoc.' min_value_raster = origin_raster.min() dilation_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1])) add_row = numpy.full((1, origin_raster.shape[1]), min_value_raster) temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row)) add_col = numpy.full(((origin_raster.shape[0] + 2), 1), min_value_raster) expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col)) for i in range(origin_raster.shape[0]): for j in range(origin_raster.shape[1]): max_pixel_value = min_value_raster for k in range(3): for l in range(3): if (expand_origin_raster[((i + k), (j + l))] >= max_pixel_value): max_pixel_value = expand_origin_raster[((i + k), (j + l))] dilation_raster[(i, j)] = max_pixel_value return dilation_raster
Dilate the raster image. Find the max pixel's value in 8-neighborhood. Then change the compute pixel's value into the max pixel's value. Args: rasterfile: input original raster image, type can be filename(string, like "test1.tif"), rasterfile(class Raster) or numpy.ndarray. Returns: dilation_raster: raster image after dilation, type is numpy.ndarray.
codesearchnet
def dom_processing(self, value): if value == self._defaults['domProcessing'] and 'domProcessing' in self._values: del self._values['domProcessing'] else: self._values['domProcessing'] = value
The dom_processing property. Args: value (string). the property value.
juraj-google-style
def __init__(self, app): self.app = app self.user_manager = app.user_manager self.password_crypt_context = CryptContext( schemes=self.user_manager.USER_PASSLIB_CRYPTCONTEXT_SCHEMES, **self.user_manager.USER_PASSLIB_CRYPTCONTEXT_KEYWORDS)
Create a passlib CryptContext. Args: password_hash(str): The name of a valid passlib password hash. Examples: ``'bcrypt', 'pbkdf2_sha512', 'sha512_crypt' or 'argon2'``. Example: ``password_manager = PasswordManager('bcrypt')``
juraj-google-style
def picture_view(request, user_id, year=None): try: user = User.objects.get(id=user_id) except User.DoesNotExist: raise Http404 default_image_path = os.path.join(settings.PROJECT_ROOT, 'static/img/default_profile_pic.png') if (user is None): raise Http404 else: if (year is None): preferred = user.preferred_photo if (preferred is None): data = user.default_photo if (data is None): image_buffer = io.open(default_image_path, mode='rb') else: image_buffer = io.BytesIO(data) else: data = preferred.binary if data: image_buffer = io.BytesIO(data) else: image_buffer = io.open(default_image_path, mode='rb') else: grade_number = Grade.number_from_name(year) if user.photos.filter(grade_number=grade_number).exists(): data = user.photos.filter(grade_number=grade_number).first().binary else: data = None if data: image_buffer = io.BytesIO(data) else: image_buffer = io.open(default_image_path, mode='rb') response = HttpResponse(content_type='image/jpeg') response['Content-Disposition'] = 'filename={}_{}.jpg'.format(user_id, (year or preferred)) try: img = image_buffer.read() except UnicodeDecodeError: img = io.open(default_image_path, mode='rb').read() image_buffer.close() response.write(img) return response
Displays a view of a user's picture. Args: user_id The ID of the user whose picture is being fetched. year The user's picture from this year is fetched. If not specified, use the preferred picture.
codesearchnet
def call_later(self, delay, callback): if hasattr(self._connection.ioloop, "call_later"): self._connection.ioloop.call_later(delay, callback) else: self._connection.ioloop.add_timeout(delay, callback)
Schedule a one-shot timeout given delay seconds. This method is only useful for compatibility with older versions of pika. Args: delay (float): Non-negative number of seconds from now until expiration callback (method): The callback method, having the signature `callback()`
juraj-google-style
def collect(val, collections, default_collections): if collections is None: collections = default_collections for key in collections: ops.add_to_collection(key, val)
Adds keys to a collection. Args: val: The value to add per each key. collections: A collection of keys to add. default_collections: Used if collections is None.
github-repos
def __init__(self, user_assist_guid): key_path = self._KEY_PATH_FORMAT.format(user_assist_guid) super(UserAssistWindowsRegistryKeyPathFilter, self).__init__(key_path)
Initializes Windows Registry key filter. Args: user_assist_guid (str): UserAssist GUID.
juraj-google-style
def _GetTableNames(self, database): table_names = [] for esedb_table in database.tables: table_names.append(esedb_table.name) return table_names
Retrieves the table names in a database. Args: database (pyesedb.file): ESE database. Returns: list[str]: table names.
codesearchnet
def filter_lines(lines, filter_regex, groups=None): pattern = re.compile(filter_regex) for line in lines: match = pattern.search(line) if match: if groups is None: yield line elif len(groups) == 1: yield match.group(groups[0]) else: matched_groups = match.groupdict() yield tuple(matched_groups.get(group) for group in groups)
Filters out the lines not matching the pattern. Args: lines: list[string]: lines to filter. pattern: string: regular expression to filter out lines. Returns: list[string]: the list of filtered lines.
juraj-google-style
def main(params=None): parser = getParser() if params != None: args = parser.parse_args(params) else: args = parser.parse_args() print(general.title(banner.text)) sayingHello = + general.LICENSE_URL + "\n" print(general.info(sayingHello)) urlDict = {} if args.url !=None: urlDict[str(args.url)] = None elif args.platforms != None: for p in args.platforms: with open(args.config, "r") as iF: lines = iF.read().splitlines() for l in lines: platform = l.split('\t')[0] url = l.split('\t')[1] notFound = l.split('\t')[2] if p == platform: urlDict[url] = notFound if not os.path.exists(args.output_folder): os.makedirs(args.output_folder) enumerateURL(urlDict, args.output_folder, startIndex = args.start_index, maxErrors = args.max_errors)
Main loop for the enumeration Args: ----- params: A list with the parameters as grabbed by the terminal. It is None when this is called by an entry_point.
juraj-google-style
def _generate_key_map(entity_list, key, entity_class): key_map = {} for obj in entity_list: key_map[obj[key]] = entity_class(**obj) return key_map
Helper method to generate map from key to entity object for given list of dicts. Args: entity_list: List consisting of dict. key: Key in each dict which will be key in the map. entity_class: Class representing the entity. Returns: Map mapping key to entity object.
juraj-google-style
def type(self, value): if value == self._defaults['type'] and 'type' in self._values: del self._values['type'] else: self._values['type'] = value
The type property. Args: value (string). the property value.
juraj-google-style
def register(self, name): def decorator(func): 'Inner decorator, not used directly.\n\n Args:\n func: obj. Parameterless function to register.\n\n Returns:\n func: decorated function.\n ' self.logic[name] = func @wraps(func) def wrapper(): 'Wrapper, not used directly.' raise RuntimeError('working outside of request context') return wrapper return decorator
Decorator for registering a named function in the sesion logic. Args: name: str. Function name. func: obj. Parameterless function to register. The following named functions must be registered: 'LaunchRequest' - logic for launch request. 'SessionEndedRequest': logic for session ended request. In addition, all intents must be registered by their names specified in the intent schema. The aliased decorators: @launch, @intent(name), and @session_ended exist as a convenience for registering specific functions.
codesearchnet
def stop_standing_subprocess(proc): import psutil pid = proc.pid logging.debug('Stopping standing subprocess %d', pid) process = psutil.Process(pid) failed = [] try: children = process.children(recursive=True) except AttributeError: children = process.get_children(recursive=True) for child in children: try: child.kill() child.wait(timeout=10) except psutil.NoSuchProcess: pass except: failed.append(child.pid) logging.exception('Failed to kill standing subprocess %d', child.pid) try: process.kill() process.wait(timeout=10) except psutil.NoSuchProcess: pass except: failed.append(pid) logging.exception('Failed to kill standing subprocess %d', pid) if failed: raise Error(('Failed to kill standing subprocesses: %s' % failed)) if proc.stdout: proc.stdout.close() if proc.stderr: proc.stderr.close() proc.wait() logging.debug('Stopped standing subprocess %d', pid)
Stops a subprocess started by start_standing_subprocess. Before killing the process, we check if the process is running, if it has terminated, Error is raised. Catches and ignores the PermissionError which only happens on Macs. Args: proc: Subprocess to terminate. Raises: Error: if the subprocess could not be stopped.
codesearchnet
def order_verification(self, institute, case, user, link, variant): LOG.info("Creating event for ordering validation for variant" \ " {0}".format(variant['display_name'])) updated_variant = self.variant_collection.find_one_and_update( {'_id': variant['_id']}, {'$set': {'sanger_ordered': True}}, return_document=pymongo.ReturnDocument.AFTER ) self.create_event( institute=institute, case=case, user=user, link=link, category='variant', verb='sanger', variant=variant, subject=variant['display_name'], ) LOG.info("Creating event for ordering sanger for case" \ " {0}".format(case['display_name'])) self.create_event( institute=institute, case=case, user=user, link=link, category='case', verb='sanger', variant=variant, subject=variant['display_name'], ) return updated_variant
Create an event for a variant verification for a variant and an event for a variant verification for a case Arguments: institute (dict): A Institute object case (dict): Case object user (dict): A User object link (str): The url to be used in the event variant (dict): A variant object Returns: updated_variant(dict)
juraj-google-style
def scalar_spec(value_spec: pg.typing.ValueSpec) -> pg.typing.ValueSpec: return pg.typing.Union([value_spec, pg.typing.Callable([pg.typing.Int()], returns=value_spec)])
Returns the value spec for a schedule scalar. Args: value_spec: a value spec for the schedule-based scalar type. Returns: A value spec for either the value itself or a callable that produces such value based on a step (integer).
github-repos
def fermi_fourier_trans_inverse_4(qubits): (yield (fswap(qubits[1], qubits[2]),)) (yield fermi_fourier_trans_2(qubits[0], qubits[1])) (yield fermi_fourier_trans_2(qubits[2], qubits[3])) (yield fswap(qubits[1], qubits[2])) (yield fermi_fourier_trans_2(qubits[0], qubits[1])) (yield cirq.S(qubits[2])) (yield fermi_fourier_trans_2(qubits[2], qubits[3])) (yield fswap(qubits[1], qubits[2]))
The reverse fermionic Fourier transformation implemented on 4 qubits on a line, which maps the momentum picture to the position picture. Using the fast Fourier transformation algorithm, the circuit can be decomposed into 2-mode fermionic Fourier transformation, the fermionic SWAP gates, and single-qubit rotations. Args: qubits: list of four qubits
codesearchnet
def load_tiff_multipage(tiff_filename, dtype='float32'): if not os.path.isfile(tiff_filename): raise RuntimeError('could not find file "%s"' % tiff_filename) data = tiff.imread(tiff_filename) im = [] while True: Xi = numpy.array(data, dtype=dtype) if Xi.ndim == 2: Xi = Xi[numpy.newaxis, ...] im.append(Xi) try: data.seek(data.tell()+1) except EOFError: break im = numpy.concatenate(im, axis=0) im = numpy.rollaxis(im, 1) im = numpy.rollaxis(im, 2) return im
Load a multipage tiff into a single variable in x,y,z format. Arguments: tiff_filename: Filename of source data dtype: data type to use for the returned tensor Returns: Array containing contents from input tiff file in xyz order
juraj-google-style
def read(self, queue, name=None): if isinstance(queue, tensor_lib.Tensor): queue_ref = queue else: queue_ref = queue.queue_ref if self._reader_ref.dtype == dtypes.resource: return gen_io_ops.reader_read_v2(self._reader_ref, queue_ref, name=name) else: old_queue_op = gen_data_flow_ops.fake_queue(queue_ref) return gen_io_ops.reader_read(self._reader_ref, old_queue_op, name=name)
Returns the next record (key, value) pair produced by a reader. Will dequeue a work unit from queue if necessary (e.g. when the Reader needs to start reading from a new file since it has finished with the previous file). Args: queue: A Queue or a mutable string Tensor representing a handle to a Queue, with string work items. name: A name for the operation (optional). Returns: A tuple of Tensors (key, value). key: A string scalar Tensor. value: A string scalar Tensor.
github-repos
def upload_file(filename: str, config: Config, full_table_id: str, action: Action=Action.APPEND, service_account_email: Optional[str]=None) -> None: if service_account_email: auth_config = AuthConfig(service_account_email=service_account_email) else: auth_config = None table_metadata = build_table_metadata(full_table_id) credentials = get_credentials(auth_config) bq_legacy_client = get_bq_legacy_client(table_metadata.project_id, credentials) table_exists = get_table(bq_legacy_client, table_metadata) is not None if table_exists and action == Action.REPLACE: bq_legacy_client.delete_table(table_metadata.full_table_id) table_exists = False if not table_exists: schema = generate_bigquery_schema(config) create_table(bq_legacy_client, table_metadata, schema) row_count = get_csv_row_count(filename) columns = [column.bq_name for column in config] with open(filename, 'r') as f: csv = DictReader(f, fieldnames=columns) buffer_: List[Row] = [] buffer_size = row_count buffer = Buffer[Row](buffer_, buffer_size, lambda rows: upload_rows(bq_legacy_client, table_metadata, rows)) for row in csv: buffer.push(row) buffer.flush(force=True)
Upload a data file conforming to the given config to BigQuery. Args: * filename: Local path to csv file to be uploaded * config: Config key from configs.CONFIGS dictionary * full_table_id: BigQuery table id * action: APPEND to table or REPLACE table * service_account_email: Email address of service account
github-repos
def register_tensor_conversion_function_internal(base_type, conversion_func, priority=100): base_types = base_type if isinstance(base_type, tuple) else (base_type,) if any((not isinstance(x, type) for x in base_types)): raise TypeError(f'Argument `base_type` must be a type or a tuple of types. Obtained: {base_type}') del base_types if not callable(conversion_func): raise TypeError(f'Argument `conversion_func` must be callable. Received {conversion_func}.') with _tensor_conversion_func_lock: _tensor_conversion_func_registry[priority].append((base_type, conversion_func)) _tensor_conversion_func_cache.clear()
Internal version of register_tensor_conversion_function. See docstring of `register_tensor_conversion_function` for details. The internal version of the function allows registering conversions for types in the _UNCONVERTIBLE_TYPES tuple. Args: base_type: The base type or tuple of base types for all objects that `conversion_func` accepts. conversion_func: A function that converts instances of `base_type` to `Tensor`. priority: Optional integer that indicates the priority for applying this conversion function. Conversion functions with smaller priority values run earlier than conversion functions with larger priority values. Defaults to 100. Raises: TypeError: If the arguments do not have the appropriate type.
github-repos
def LockRetryWrapper(self, subject, retrywrap_timeout=1, retrywrap_max_timeout=10, blocking=True, lease_time=None): timeout = 0 while timeout < retrywrap_max_timeout: try: return self.DBSubjectLock(subject, lease_time=lease_time) except DBSubjectLockError: if not blocking: raise stats_collector_instance.Get().IncrementCounter("datastore_retries") time.sleep(retrywrap_timeout) timeout += retrywrap_timeout raise DBSubjectLockError("Retry number exceeded.")
Retry a DBSubjectLock until it succeeds. Args: subject: The subject which the lock applies to. retrywrap_timeout: How long to wait before retrying the lock. retrywrap_max_timeout: The maximum time to wait for a retry until we raise. blocking: If False, raise on first lock failure. lease_time: lock lease time in seconds. Returns: The DBSubjectLock object Raises: DBSubjectLockError: If the maximum retry count has been reached.
juraj-google-style
def run_user_main(wrapped_test_module): tree = ast.parse(tf_inspect.getsource(wrapped_test_module)) target = ast.dump(ast.parse('if __name__ == "__main__": pass').body[0].test) for expr in reversed(tree.body): if isinstance(expr, ast.If) and ast.dump(expr.test) == target: break else: raise NotImplementedError(f'Could not find `if __name__ == "main":` block in {wrapped_test_module.__name__}.') new_ast = ast.Module(body=expr.body, type_ignores=[]) exec(compile(new_ast, '<ast>', 'exec'), globals(), wrapped_test_module.__dict__)
Runs the "if __name__ == '__main__'" at the bottom of a module. TensorFlow practice is to have a main if at the bottom of the module which might call an API compat function before calling test.main(). Since this is a statement, not a function, we can't cleanly reference it, but we can inspect it from the user module and run it in the context of that module so all imports and variables are available to it. Args: wrapped_test_module: The user-provided test code to run. Raises: NotImplementedError: If main block was not found in module. This should not be caught, as it is likely an error on the user's part -- absltest is all too happy to report a successful status (and zero tests executed) if a user forgets to end a class with "test.main()".
github-repos
def get_reference_points(spatial_shapes, valid_ratios, device): reference_points_list = [] for level, (height, width) in enumerate(spatial_shapes): ref_y, ref_x = meshgrid(torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device), torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device), indexing='ij') ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height) ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width) ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points
Get reference points for each feature map. Used in decoder. Args: spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Valid ratios of each feature map. device (`torch.device`): Device on which to create the tensors. Returns: `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`
github-repos
def get(self): if self.call_queue: return self.apply((lambda df: df)).data else: return self.data.copy()
Flushes the call_queue and returns the data. Note: Since this object is a simple wrapper, just return the data. Returns: The object that was `put`.
codesearchnet
def wulff_from_chempot(self, delu_dict=None, delu_default=0, symprec=1e-05, no_clean=False, no_doped=False): latt = SpacegroupAnalyzer(self.ucell_entry.structure).get_conventional_standard_structure().lattice miller_list = self.all_slab_entries.keys() e_surf_list = [] for hkl in miller_list: gamma = self.get_stable_entry_at_u(hkl, delu_dict=delu_dict, delu_default=delu_default, no_clean=no_clean, no_doped=no_doped)[1] e_surf_list.append(gamma) return WulffShape(latt, miller_list, e_surf_list, symprec=symprec)
Method to get the Wulff shape at a specific chemical potential. Args: delu_dict (Dict): Dictionary of the chemical potentials to be set as constant. Note the key should be a sympy Symbol object of the format: Symbol("delu_el") where el is the name of the element. delu_default (float): Default value for all unset chemical potentials symprec (float): See WulffShape. no_doped (bool): Consider stability of clean slabs only. no_clean (bool): Consider stability of doped slabs only. Returns: (WulffShape): The WulffShape at u_ref and u_ads.
codesearchnet
def destroy_walker(self, walker): if walker.buffered: self._queue_walkers.remove(walker) else: self._virtual_walkers.remove(walker)
Destroy a previously created stream walker. Args: walker (StreamWalker): The walker to remove from internal updating lists.
juraj-google-style
def get_service_credentials(pipeline_options): return _Credentials.get_service_credentials(pipeline_options)
For internal use only; no backwards-compatibility guarantees. Get credentials to access Google services. Args: pipeline_options: Pipeline options, used in creating credentials like impersonated credentials. Returns: A ``_ApitoolsCredentialsAdapter`` object or None if credentials not found. Returned object is thread-safe.
github-repos
def __init__(self, devpath): self._fd = None self._devpath = None self._open(devpath)
Instantiate an I2C object and open the i2c-dev device at the specified path. Args: devpath (str): i2c-dev device path. Returns: I2C: I2C object. Raises: I2CError: if an I/O or OS error occurs.
juraj-google-style
def is_valid_package_name(name, raise_error=False): is_valid = PACKAGE_NAME_REGEX.match(name) if (raise_error and (not is_valid)): raise PackageRequestError(('Not a valid package name: %r' % name)) return is_valid
Test the validity of a package name string. Args: name (str): Name to test. raise_error (bool): If True, raise an exception on failure Returns: bool.
codesearchnet
def process_event(event): if event.type == EventType.ON_CONVERSATION_TURN_STARTED: print() print(event) if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED and event.args and not event.args['with_follow_on_turn']): print() if event.type == EventType.ON_DEVICE_ACTION: for command, params in event.actions: print('Do command', command, 'with params', str(params))
Pretty prints events. Prints all events that occur with two spaces between each new conversation and a single space between turns of a conversation. Args: event(event.Event): The current event to process.
juraj-google-style
def retrieve_template(self): links = self.retrieve_instance_links() self.log.debug('Links is \n%s', pformat(links)) self.pipeline_config['instance_links'].update(links) jsondata = get_template(template_file='infrastructure/app_data.json.j2', appinfo=self.appinfo, pipeline_config=self.pipeline_config, formats=self.generated, run_as_user=DEFAULT_RUN_AS_USER) self.log.debug('jsondata is %s', pformat(jsondata)) return jsondata
Sets the instance links with pipeline_configs and then renders template files Returns: jsondata: A json objects containing templates
codesearchnet
def value_report(self, address, zipcode, report_type="full", format_type="json"): query_params = { "report_type": report_type, "format": format_type, "address": address, "zipcode": zipcode } return self._api_client.fetch_synchronous("property/value_report", query_params)
Call the value_report component Value Report only supports a single address. Args: - address - zipcode Kwargs: - report_type - "full" or "summary". Default is "full". - format_type - "json", "pdf", "xlsx" or "all". Default is "json".
juraj-google-style
def get_enterprise_customer_or_404(enterprise_uuid): EnterpriseCustomer = apps.get_model('enterprise', 'EnterpriseCustomer') try: enterprise_uuid = UUID(enterprise_uuid) return EnterpriseCustomer.objects.get(uuid=enterprise_uuid) except (TypeError, ValueError, EnterpriseCustomer.DoesNotExist): LOGGER.error('Unable to find enterprise customer for UUID: [%s]', enterprise_uuid) raise Http404
Given an EnterpriseCustomer UUID, return the corresponding EnterpriseCustomer or raise a 404. Arguments: enterprise_uuid (str): The UUID (in string form) of the EnterpriseCustomer to fetch. Returns: (EnterpriseCustomer): The EnterpriseCustomer given the UUID.
codesearchnet
def create(cls, application_namespace, application_data): namespace = ApplicationNamespace(application_namespace) data = ApplicationData(application_data) return ApplicationSpecificInformation(application_namespace=namespace, application_data=data)
Construct an ApplicationSpecificInformation object from provided data and namespace values. Args: application_namespace (str): The name of the application namespace. application_data (str): Application data related to the namespace. Returns: ApplicationSpecificInformation: The newly created set of application information. Example: >>> x = ApplicationSpecificInformation.create('namespace', 'data') >>> x.application_namespace.value 'namespace' >>> x.application_data.value 'data'
codesearchnet
def set_quickchart_resource(self, resource): if (isinstance(resource, int) and (not isinstance(resource, bool))): resource = self.get_resources()[resource] if (isinstance(resource, hdx.data.resource.Resource) or isinstance(resource, dict)): res = resource.get('id') if (res is None): resource = resource['name'] else: resource = res elif (not isinstance(resource, str)): raise hdx.data.hdxobject.HDXError(('Resource id cannot be found in type %s!' % type(resource).__name__)) if (is_valid_uuid(resource) is True): search = 'id' else: search = 'name' changed = False for dataset_resource in self.resources: if (dataset_resource[search] == resource): dataset_resource.enable_dataset_preview() self.preview_resource() changed = True else: dataset_resource.disable_dataset_preview() return changed
Set the resource that will be used for displaying QuickCharts in dataset preview Args: resource (Union[hdx.data.resource.Resource,Dict,str,int]): Either resource id or name, resource metadata from a Resource object or a dictionary or position Returns: bool: Returns True if resource for QuickCharts in dataset preview set or False if not
codesearchnet
def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None): out_logits, out_bbox = (outputs.logits, outputs.pred_boxes) if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') prob = nn.functional.softmax(out_logits, -1) scores, labels = prob[..., :-1].max(-1) boxes = center_to_corners_format(out_bbox) if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({'scores': score, 'labels': label, 'boxes': box}) return results
Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model.
github-repos
def setup(self, socket_type, complete_or_error_queue): try: if self._secured: if self._server_public_key is None or \ self._server_private_key is None: raise LocalConfigurationError( "Attempting to start socket in secure mode, " "but complete server keys were not provided") self._event_loop = zmq.asyncio.ZMQEventLoop() asyncio.set_event_loop(self._event_loop) self._context = zmq.asyncio.Context() self._socket = self._context.socket(socket_type) self._socket.set(zmq.TCP_KEEPALIVE, 1) self._socket.set(zmq.TCP_KEEPALIVE_IDLE, self._connection_timeout) self._socket.set(zmq.TCP_KEEPALIVE_INTVL, self._heartbeat_interval) if socket_type == zmq.DEALER: self._socket.identity = "{}-{}".format( self._zmq_identity, hashlib.sha512(uuid.uuid4().hex.encode() ).hexdigest()[:23]).encode('ascii') if self._secured: public_key, secretkey = zmq.curve_keypair() self._socket.curve_publickey = public_key self._socket.curve_secretkey = secretkey self._socket.curve_serverkey = self._server_public_key self._socket.connect(self._address) elif socket_type == zmq.ROUTER: if self._secured: auth = AsyncioAuthenticator(self._context) self._auth = auth auth.start() auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) self._socket.curve_secretkey = self._server_private_key self._socket.curve_publickey = self._server_public_key self._socket.curve_server = True try: self._socket.bind(self._address) except zmq.error.ZMQError as e: raise LocalConfigurationError( "Can't bind to {}: {}".format(self._address, str(e))) else: LOGGER.info("Listening on %s", self._address) self._dispatcher.add_send_message(self._connection, self.send_message) self._dispatcher.add_send_last_message(self._connection, self.send_last_message) asyncio.ensure_future(self._remove_expired_futures(), loop=self._event_loop) asyncio.ensure_future(self._receive_message(), loop=self._event_loop) asyncio.ensure_future(self._dispatch_message(), loop=self._event_loop) self._dispatcher_queue = asyncio.Queue() if self._monitor: self._monitor_fd = "inproc: _generate_id()[0:5]) self._monitor_sock = self._socket.get_monitor_socket( zmq.EVENT_DISCONNECTED, addr=self._monitor_fd) asyncio.ensure_future(self._monitor_disconnects(), loop=self._event_loop) except Exception as e: complete_or_error_queue.put_nowait(e) self._close_sockets() raise if self._heartbeat: asyncio.ensure_future(self._do_heartbeat(), loop=self._event_loop) complete_or_error_queue.put_nowait(_STARTUP_COMPLETE_SENTINEL) asyncio.ensure_future(self._notify_started(), loop=self._event_loop) self._event_loop.run_forever() self._event_loop.close() self._close_sockets()
Setup the asyncio event loop. Args: socket_type (int from zmq.*): One of zmq.DEALER or zmq.ROUTER complete_or_error_queue (queue.Queue): A way to propagate errors back to the calling thread. Needed since this function is directly used in Thread. Returns: None
juraj-google-style
def enc(self, byts, asscd=None): iv = os.urandom(16) encryptor = AESGCM(self.ekey) byts = encryptor.encrypt(iv, byts, asscd) envl = {'iv': iv, 'data': byts, 'asscd': asscd} return s_msgpack.en(envl)
Encrypt the given bytes and return an envelope dict in msgpack form. Args: byts (bytes): The message to be encrypted. asscd (bytes): Extra data that needs to be authenticated (but not encrypted). Returns: bytes: The encrypted message. This is a msgpacked dictionary containing the IV, ciphertext, and associated data.
codesearchnet
def __init__(self, name: str, snap_type: str): self._type = snap_type self._channel = SnapshotChannel() Command.__init__(self, duration=0, name=name) Instruction.__init__(self, self, self._channel, name=name)
Create new snapshot command. Args: name (str): Snapshot name which is used to identify the snapshot in the output. snap_type (str): Type of snapshot, e.g., “state” (take a snapshot of the quantum state). The types of snapshots offered are defined in a separate specification document for simulators.
juraj-google-style
def step(self, actions): if self._store_rollouts and \ self._rollouts_by_epoch_and_split[self.current_epoch]: raise ValueError( "Data for current epoch has already been loaded from disk." ) (obs, unclipped_rewards, dones) = self._step(actions) obs = self._preprocess_observations(obs) (min_reward, max_reward) = self.reward_range rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward)) if self._store_rollouts: unclipped_rewards = unclipped_rewards.astype(np.float64) encoded_obs = self._encode_observations(obs) for (rollout, frame, action) in zip( self._current_batch_rollouts, self._current_batch_frames, actions ): rollout.append(frame._replace(action=action)) self._current_batch_frames = [ Frame(*orud, action=None) for orud in zip(encoded_obs, rewards, unclipped_rewards, dones) ] return (obs, rewards, dones)
Makes a step in all environments. Does any preprocessing and records frames. Args: actions: Batch of actions. Returns: (obs, rewards, dones) - batches of observations, rewards and done flags respectively. Raises: ValueError: when the data for current epoch has already been loaded.
juraj-google-style
def forward(self, image_embeddings: torch.Tensor, image_positional_embeddings: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, multimask_output: bool, output_attentions: Optional[bool]=None, attention_similarity: Optional[torch.Tensor]=None, target_embedding: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]: batch_size, num_channels, height, width = image_embeddings.shape point_batch_size = sparse_prompt_embeddings.shape[1] output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1) if sparse_prompt_embeddings.sum().item() != 0: tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2) else: tokens = output_tokens point_embeddings = tokens.to(self.iou_token.weight.dtype) image_embeddings = image_embeddings + dense_prompt_embeddings image_embeddings = image_embeddings.repeat_interleave(point_batch_size, 0) image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0) point_embedding, image_embeddings, attentions = self.transformer(point_embeddings=point_embeddings, image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, attention_similarity=attention_similarity, target_embedding=target_embedding, output_attentions=output_attentions) iou_token_out = point_embedding[:, :, 0, :] mask_tokens_out = point_embedding[:, :, 1:1 + self.num_mask_tokens, :] image_embeddings = image_embeddings.transpose(2, 3).reshape(batch_size * point_batch_size, num_channels, height, width) upscaled_embedding = self.upscale_conv1(image_embeddings) upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding)) upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding)) hyper_in_list = [] for i in range(self.num_mask_tokens): current_mlp = self.output_hypernetworks_mlps[i] hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])] hyper_in = torch.stack(hyper_in_list, dim=2) _, num_channels, height, width = upscaled_embedding.shape upscaled_embedding = upscaled_embedding.reshape(batch_size, point_batch_size, num_channels, height * width) masks = (hyper_in @ upscaled_embedding).reshape(batch_size, point_batch_size, -1, height, width) iou_pred = self.iou_prediction_head(iou_token_out) if multimask_output: mask_slice = slice(1, None) else: mask_slice = slice(0, 1) masks = masks[:, :, mask_slice, :, :] iou_pred = iou_pred[:, :, mask_slice] outputs = (masks, iou_pred) if output_attentions: outputs = outputs + (attentions,) else: outputs = outputs + (None,) return outputs
Predict masks given image and prompt embeddings. Args: image_embeddings (`torch.Tensor`): the embeddings from the image encoder image_positional_embedding (`torch.Tensor`): positional encoding with the shape of image_embeddings sparse_prompt_embeddings (`torch.Tensor`): The embeddings of the points and boxes dense_prompt_embeddings (`torch.Tensor`): the embeddings of the mask inputs multimask_output (bool): Whether to return multiple masks or a single mask. output_attentions (bool, *optional*): Whether or not to return the attentions tensors of all attention layers.
github-repos
def _add_remove_team_member(self, url, email_address=None, account_id=None): if not email_address and not account_id: raise HSException("No email address or account_id specified") data = {} if account_id is not None: data = { "account_id": account_id } else: data = { "email_address": email_address } request = self._get_request() response = request.post(url, data) return response
Add or Remove a team member We use this function for two different tasks because they have the same API call Args: email_address (str): Email address of the Account to add/remove account_id (str): ID of the Account to add/remove Returns: A Team object
juraj-google-style
def _scope_vals(self, vals): if isinstance(vals, (list, tuple)): return vals elif isinstance(vals, dict): return vals.values() else: return [vals]
Return a list of values to pass to `name_scope()`. Args: vals: A tensor, a list or tuple of tensors, or a dictionary. Returns: The values in vals as a list.
github-repos
def _profile_table(self, batch_id): message = self._execute_command(batch_id, 'RAY.TABLE_LOOKUP', ray.gcs_utils.TablePrefix.PROFILE, '', batch_id.binary()) if (message is None): return [] gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(message, 0) profile_events = [] for i in range(gcs_entries.EntriesLength()): profile_table_message = ray.gcs_utils.ProfileTableData.GetRootAsProfileTableData(gcs_entries.Entries(i), 0) component_type = decode(profile_table_message.ComponentType()) component_id = binary_to_hex(profile_table_message.ComponentId()) node_ip_address = decode(profile_table_message.NodeIpAddress(), allow_none=True) for j in range(profile_table_message.ProfileEventsLength()): profile_event_message = profile_table_message.ProfileEvents(j) profile_event = {'event_type': decode(profile_event_message.EventType()), 'component_id': component_id, 'node_ip_address': node_ip_address, 'component_type': component_type, 'start_time': profile_event_message.StartTime(), 'end_time': profile_event_message.EndTime(), 'extra_data': json.loads(decode(profile_event_message.ExtraData()))} profile_events.append(profile_event) return profile_events
Get the profile events for a given batch of profile events. Args: batch_id: An identifier for a batch of profile events. Returns: A list of the profile events for the specified batch.
codesearchnet
def _Open(self, path_spec=None, mode='rb'): if not self._file_object_set_in_init and not path_spec: raise ValueError('Missing path specification.') if not self._file_object_set_in_init: if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') self._encryption_method = getattr(path_spec, 'encryption_method', None) if self._encryption_method is None: raise errors.PathSpecError( 'Path specification missing encryption method.') self._file_object = resolver.Resolver.OpenFileObject( path_spec.parent, resolver_context=self._resolver_context) self._path_spec = path_spec
Opens the file-like object. Args: path_spec (Optional[PathSpec]): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
juraj-google-style
def search(self, search_space, valid_data, init_args=[], train_args=[], init_kwargs={}, train_kwargs={}, module_args={}, module_kwargs={}, max_search=None, shuffle=True, verbose=True, seed=None, **score_kwargs): self._clear_state(seed) self.search_space = search_space n_models_scored = 0 for (bracket_index, bracket) in enumerate(self.hyperband_schedule): (n_starting_configurations, _) = bracket[0] configurations = list(self.config_generator(search_space, max_search=n_starting_configurations, rng=self.rng, shuffle=True)) for (band_index, (n_i, r_i)) in enumerate(bracket): assert (len(configurations) <= n_i) scored_configurations = [] for (i, configuration) in enumerate(configurations): cur_model_index = n_models_scored configuration['n_epochs'] = r_i (score, model) = self._test_model_config(f'{band_index}_{i}', configuration, valid_data, init_args=init_args, train_args=train_args, init_kwargs=init_kwargs, train_kwargs=train_kwargs, module_args=module_args, module_kwargs=module_kwargs, verbose=verbose, **score_kwargs) scored_configurations.append((score, cur_model_index, configuration)) n_models_scored += 1 scored_configurations.sort(key=(lambda x: x[0]), reverse=True) if ((band_index + 1) < len(bracket)): (n_to_keep, _) = bracket[(band_index + 1)] configurations = [x[2] for x in scored_configurations][:n_to_keep] print(('=' * 60)) print(f'[SUMMARY]') print(f'Best model: [{self.best_index}]') print(f'Best config: {self.best_config}') print(f'Best score: {self.best_score}') print(('=' * 60)) return self._load_best_model(clean_up=True)
Performs hyperband search according to the generated schedule. At the beginning of each bracket, we generate a list of random configurations and perform successive halving on it; we repeat this process for the number of brackets in the schedule. Args: init_args: (list) positional args for initializing the model train_args: (list) positional args for training the model valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the dev split search_space: see ModelTuner's config_generator() documentation max_search: see ModelTuner's config_generator() documentation shuffle: see ModelTuner's config_generator() documentation Returns: best_model: the highest performing trained model found by Hyperband best_config: (dict) the config corresponding to the best model Note: Initialization is performed by ModelTuner instead of passing a pre-initialized model so that tuning may be performed over all model parameters, including the network architecture (which is defined before the train loop).
codesearchnet
def get_serialization_context(self, driver_id): with self.lock: if driver_id not in self.serialization_context_map: _initialize_serialization(driver_id) return self.serialization_context_map[driver_id]
Get the SerializationContext of the driver that this worker is processing. Args: driver_id: The ID of the driver that indicates which driver to get the serialization context for. Returns: The serialization context of the given driver.
juraj-google-style
def _VerifyExplicitPaddings(self, tensor_in_sizes, filter_in_sizes, strides, padding, dilations=(1, 1), test_grappler_layout_optimizer=False, tol=1e-05, fp16_tol=0.001): input_tensor = self._CreateNumpyTensor(tensor_in_sizes) filter_tensor = self._CreateNumpyTensor(filter_in_sizes) input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)]) dilations = list(dilations) conv2d_result = nn_ops.conv2d(input_tensor, filter_tensor, [1] + list(strides) + [1], 'VALID', dilations=[1] + dilations + [1]) expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1]))) self._VerifyValues(tensor_in_sizes, filter_in_sizes, strides, padding, expected, dilations, test_grappler_layout_optimizer=test_grappler_layout_optimizer, tol=tol, fp16_tol=fp16_tol)
Verifies Conv2D with explicit padding generates correct values. It does this by comparing with Conv2D without explicit padding. This function assumes Conv2D without explicit padding works correctly. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. strides: [row_stride, col_stride] for the convolution; padding: Explicit padding amounts. dilations: Dilation values test_grappler_layout_optimizer: If True, allow the Grappler layout optimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds. tol: The absolute and relative tolerance for non-fp16 dtypes. fp16_tol: The absolute and relative tolerance for fp16.
github-repos
def heightmap_normalize(hm: np.ndarray, mi: float=0.0, ma: float=1.0) -> None: lib.TCOD_heightmap_normalize(_heightmap_cdata(hm), mi, ma)
Normalize heightmap values between ``mi`` and ``ma``. Args: mi (float): The lowest value after normalization. ma (float): The highest value after normalization.
codesearchnet
def get_app_names(self): app_names = set() for name in self.apps: app_names.add(name) return app_names
Return application names. Return the list of application names that are available in the database. Returns: set of str.
codesearchnet
def doit(self, classes=None, recursive=True, indices=None, max_terms=None, **kwargs): return super().doit(classes, recursive, indices=indices, max_terms=max_terms, **kwargs)
Write out the indexed sum explicitly If `classes` is None or :class:`IndexedSum` is in `classes`, (partially) write out the indexed sum in to an explicit sum of terms. If `recursive` is True, write out each of the new sum's summands by calling its :meth:`doit` method. Args: classes (None or list): see :meth:`.Expression.doit` recursive (bool): see :meth:`.Expression.doit` indices (list): List of :class:`IdxSym` indices for which the sum should be expanded. If `indices` is a subset of the indices over which the sum runs, it will be partially expanded. If not given, expand the sum completely max_terms (int): Number of terms after which to truncate the sum. This is particularly useful for infinite sums. If not given, expand all terms of the sum. Cannot be combined with `indices` kwargs: keyword arguments for recursive calls to :meth:`doit`. See :meth:`.Expression.doit`
codesearchnet
def delete_issue(self, issue_id, params=None): return self._delete((self.API_URL + 'issue/{}'.format(issue_id)), params=params)
Deletes an individual issue. If the issue has sub-tasks you must set the deleteSubtasks=true parameter to delete the issue. You cannot delete an issue without deleting its sub-tasks. Args: issue_id: params: Returns:
codesearchnet
def get_details(app='groupproject', env='dev', region='us-east-1'): url = '{host}/applications/{app}'.format(host=API_URL, app=app) request = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT) if not request.ok: raise SpinnakerAppNotFound('"{0}" not found.'.format(app)) app_details = request.json() LOG.debug('App details: %s', app_details) group = app_details['attributes'].get('repoProjectKey') project = app_details['attributes'].get('repoSlug') generated = gogoutils.Generator(group, project, env=env, region=region, formats=APP_FORMATS) LOG.debug('Application details: %s', generated) return generated
Extract details for Application. Args: app (str): Application Name env (str): Environment/account to get details from Returns: collections.namedtuple with _group_, _policy_, _profile_, _role_, _user_.
juraj-google-style
def _GetComparable(self, sub_comparable_string=''): string_parts = [] string_parts.append(getattr(self.parent, 'comparable', '')) string_parts.append('type: {0:s}'.format(self.type_indicator)) if sub_comparable_string: string_parts.append(', {0:s}'.format(sub_comparable_string)) string_parts.append('\n') return ''.join(string_parts)
Retrieves the comparable representation. This is a convenience function for constructing comparables. Args: sub_comparable_string (str): sub comparable string. Returns: str: comparable representation of the path specification.
juraj-google-style
def evaluate_cut(uncut_subsystem, cut, unpartitioned_ces): log.debug('Evaluating %s...', cut) cut_subsystem = uncut_subsystem.apply_cut(cut) if config.ASSUME_CUTS_CANNOT_CREATE_NEW_CONCEPTS: mechanisms = unpartitioned_ces.mechanisms else: mechanisms = set( unpartitioned_ces.mechanisms + list(cut_subsystem.cut_mechanisms)) partitioned_ces = ces(cut_subsystem, mechanisms) log.debug('Finished evaluating %s.', cut) phi_ = ces_distance(unpartitioned_ces, partitioned_ces) return SystemIrreducibilityAnalysis( phi=phi_, ces=unpartitioned_ces, partitioned_ces=partitioned_ces, subsystem=uncut_subsystem, cut_subsystem=cut_subsystem)
Compute the system irreducibility for a given cut. Args: uncut_subsystem (Subsystem): The subsystem without the cut applied. cut (Cut): The cut to evaluate. unpartitioned_ces (CauseEffectStructure): The cause-effect structure of the uncut subsystem. Returns: SystemIrreducibilityAnalysis: The |SystemIrreducibilityAnalysis| for that cut.
juraj-google-style
def CreateFeedItemAddOperation(name, price, date, ad_customizer_feed): feed_item = {'feedId': ad_customizer_feed['feedId'], 'attributeValues': [{'feedAttributeId': ad_customizer_feed['feedAttributes'][0]['id'], 'stringValue': name}, {'feedAttributeId': ad_customizer_feed['feedAttributes'][1]['id'], 'stringValue': price}, {'feedAttributeId': ad_customizer_feed['feedAttributes'][2]['id'], 'stringValue': date}]} operation = {'operator': 'ADD', 'operand': feed_item} return operation
Creates a FeedItemOperation. The generated FeedItemOperation will create a FeedItem with the specified values when sent to FeedItemService.mutate. Args: name: the value for the name attribute of the FeedItem. price: the value for the price attribute of the FeedItem. date: the value for the date attribute of the FeedItem. ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems with. Returns: A new FeedItemOperation for adding a FeedItem.
codesearchnet
def get_site_t2g_eg_resolved_dos(self, site): t2g_dos = [] eg_dos = [] for s, atom_dos in self.pdos.items(): if s == site: for orb, pdos in atom_dos.items(): if orb in (Orbital.dxy, Orbital.dxz, Orbital.dyz): t2g_dos.append(pdos) elif orb in (Orbital.dx2, Orbital.dz2): eg_dos.append(pdos) return {"t2g": Dos(self.efermi, self.energies, functools.reduce(add_densities, t2g_dos)), "e_g": Dos(self.efermi, self.energies, functools.reduce(add_densities, eg_dos))}
Get the t2g, eg projected DOS for a particular site. Args: site: Site in Structure associated with CompleteDos. Returns: A dict {"e_g": Dos, "t2g": Dos} containing summed e_g and t2g DOS for the site.
juraj-google-style
def sequence_like(instance, args): if _is_mutable_mapping(instance): result = dict(zip(_tf_core_sorted(instance), args)) instance_type = type(instance) if instance_type == _collections.defaultdict: d = _collections.defaultdict(instance.default_factory) else: d = instance_type() for key in instance: d[key] = result[key] return d elif _is_mapping(instance): result = dict(zip(_tf_core_sorted(instance), args)) instance_type = type(instance) if not getattr(instance_type, '__supported_by_tf_nest__', False): tf_logging.log_first_n(tf_logging.WARN, 'Mapping types may not work well with tf.nest. Prefer using MutableMapping for {}'.format(instance_type), 1) try: return instance_type(((key, result[key]) for key in instance)) except TypeError as err: raise TypeError('Error creating an object of type {} like {}. Note that it must accept a single positional argument representing an iterable of key-value pairs, in addition to self. Cause: {}'.format(type(instance), instance, err)) elif _is_mapping_view(instance): return list(args) elif is_namedtuple(instance) or _is_attrs(instance): if isinstance(instance, _wrapt.ObjectProxy): instance_type = type(instance.__wrapped__) else: instance_type = type(instance) return instance_type(*args) elif _is_composite_tensor(instance): assert len(args) == 1 spec = instance._type_spec return spec._from_components(args[0]) elif _is_type_spec(instance): assert len(args) == 1 return instance._from_components(args[0]) elif isinstance(instance, range): return sequence_like(list(instance), args) elif isinstance(instance, _wrapt.ObjectProxy): return type(instance)(sequence_like(instance.__wrapped__, args)) elif isinstance(instance, CustomNestProtocol): metadata = instance.__tf_flatten__()[0] return instance.__tf_unflatten__(metadata, tuple(args)) else: return type(instance)(args)
Converts the sequence `args` to the same type as `instance`. Args: instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, `collections.OrderedDict`, or `composite_tensor.Composite_Tensor` or `type_spec.TypeSpec`. args: items to be converted to the `instance` type. Returns: `args` with the type of `instance`.
github-repos
def request_stop(self): raise StopIteration('step_fn has requested the iterations to stop.')
Exit the training loop by causing `should_stop()` to return `True`. Causes `step_fn` to exit by raising an exception. Raises: StopIteration
github-repos
def _preprocess_resize_output_shape(image, output_shape): output_shape = tuple(output_shape) output_ndim = len(output_shape) input_shape = image.shape if output_ndim > image.ndim: input_shape += (1,) * (output_ndim - image.ndim) image = np.reshape(image, input_shape) elif output_ndim == image.ndim - 1: output_shape = output_shape + (image.shape[-1],) elif output_ndim < image.ndim: raise ValueError('output_shape length cannot be smaller than the image number of dimensions') return (image, output_shape)
Validate resize output shape according to input image. Args: image (`np.ndarray`): Image to be resized. output_shape (`iterable`): Size of the generated output image `(rows, cols[, ...][, dim])`. If `dim` is not provided, the number of channels is preserved. Returns image (`np.ndarray`): The input image, but with additional singleton dimensions appended in the case where `len(output_shape) > input.ndim`. output_shape (`Tuple`): The output shape converted to tuple. Raises ------ ValueError: If output_shape length is smaller than the image number of dimensions. Notes ----- The input image is reshaped if its number of dimensions is not equal to output_shape_length.
github-repos
def keypoint_flip(bbox, d, rows, cols): if (d == 0): bbox = keypoint_vflip(bbox, rows, cols) elif (d == 1): bbox = keypoint_hflip(bbox, rows, cols) elif (d == (- 1)): bbox = keypoint_hflip(bbox, rows, cols) bbox = keypoint_vflip(bbox, rows, cols) else: raise ValueError('Invalid d value {}. Valid values are -1, 0 and 1'.format(d)) return bbox
Flip a keypoint either vertically, horizontally or both depending on the value of `d`. Raises: ValueError: if value of `d` is not -1, 0 or 1.
codesearchnet
def argv(cls, name, short_name=None, type=None, help=None): cls.__hierarchy.append(argv.Argv(name, short_name, type, help))
Set command line arguments as a source Parses the command line arguments described by the parameters. Args: name: the long name of the argument (foo) short_name: the optional short name of the argument (f) type: the optional type of the argument, defaults to bool help: the optional help text for the argument
codesearchnet
def path_to_string(path): if isinstance(path, os.PathLike): return os.fspath(path) return path
Convert `PathLike` objects to their string representation. If given a non-string typed path object, converts it to its string representation. If the object passed to `path` is not among the above, then it is returned unchanged. This allows e.g. passthrough of file objects through this function. Args: path: `PathLike` object that represents a path Returns: A string representation of the path argument, if Python support exists.
github-repos
def quarter_ellipsis_functions(xx, yy): npxx = np.array(xx) npyy = np.array(yy) if np.any((npxx == npyy)): raise RuntimeError('Invalid points for quarter_ellipsis_functions') if (np.all((npxx < npyy)) or np.all((npxx > npyy))): if (npxx[0] < npyy[0]): p1 = npxx p2 = npyy else: p1 = npyy p2 = npxx c_lower = np.array([p1[0], p2[1]]) c_upper = np.array([p2[0], p1[1]]) b2 = ((p2[1] - p1[1]) ** 2) else: if (npxx[0] < npyy[0]): p1 = npxx p2 = npyy else: p1 = npyy p2 = npxx c_lower = np.array([p2[0], p1[1]]) c_upper = np.array([p1[0], p2[1]]) b2 = ((p1[1] - p2[1]) ** 2) b2overa2 = (b2 / ((p2[0] - p1[0]) ** 2)) def lower(x): return (c_lower[1] - np.sqrt((b2 - (b2overa2 * ((x - c_lower[0]) ** 2))))) def upper(x): return (c_upper[1] + np.sqrt((b2 - (b2overa2 * ((x - c_upper[0]) ** 2))))) return {'lower': lower, 'upper': upper}
Method that creates two quarter-ellipse functions based on points xx and yy. The ellipsis is supposed to be aligned with the axes. The two ellipsis pass through the two points xx and yy. Args: xx: First point yy: Second point Returns: A dictionary with the lower and upper quarter ellipsis functions.
codesearchnet
def _replace_oov(original_vocab, line): return u' '.join([(word if (word in original_vocab) else u'UNK') for word in line.split()])
Replace out-of-vocab words with "UNK". This maintains compatibility with published results. Args: original_vocab: a set of strings (The standard vocabulary for the dataset) line: a unicode string - a space-delimited sequence of words. Returns: a unicode string - a space-delimited sequence of words.
codesearchnet
def GetRealPath(filename): if os.path.isabs(filename): return filename if filename.startswith('./') or filename.startswith('../'): return os.path.abspath(filename) path = os.getenv('PATH', '') for directory in path.split(':'): tryname = os.path.join(directory, filename) if os.path.exists(tryname): if not os.path.isabs(directory): return os.path.abspath(tryname) return tryname if os.path.exists(filename): return os.path.abspath(filename) return None
Given an executable filename, find in the PATH or find absolute path. Args: filename An executable filename (string) Returns: Absolute version of filename. None if filename could not be found locally, absolutely, or in PATH
juraj-google-style
def _fetch_events_files_on_disk(self): all_files = tf.io.gfile.listdir(self._events_directory) relevant_files = [file_name for file_name in all_files if _DEBUGGER_EVENTS_FILE_NAME_REGEX.match(file_name)] return sorted(relevant_files, key=self._obtain_file_index)
Obtains the names of debugger-related events files within the directory. Returns: The names of the debugger-related events files written to disk. The names are sorted in increasing events file index.
codesearchnet
def __init__(self, device, configs=None): self._device = device self._configs = configs
Constructor of the class. The constructor is the only place to pass in a config. If you need to change the config later, you should unregister the service instance from `ServiceManager` and register again with the new config. Args: device: the device object this service is associated with. config: optional configuration defined by the author of the service class.
juraj-google-style
def process_tag(self, tag_proc_name, tag): tag_processor = self.tag_procs[tag_proc_name] db_entry = (tag_processor.get_name(tag), tag_processor.get_entry_type(tag), tag_processor.get_filename(tag)) self.zeal_db.insert(*db_entry) self.entry_count += 1
Process a tag with a tag processor and insert a DB entry. Args: tag_proc_name: A string key that maps to the TagProcessor to use. tag: A BeautifulSoup Tag to process.
codesearchnet
def protocol_version_to_kmip_version(value): if (not isinstance(value, ProtocolVersion)): return None if (value.major == 1): if (value.minor == 0): return enums.KMIPVersion.KMIP_1_0 elif (value.minor == 1): return enums.KMIPVersion.KMIP_1_1 elif (value.minor == 2): return enums.KMIPVersion.KMIP_1_2 elif (value.minor == 3): return enums.KMIPVersion.KMIP_1_3 elif (value.minor == 4): return enums.KMIPVersion.KMIP_1_4 else: return None else: return None
Convert a ProtocolVersion struct to its KMIPVersion enumeration equivalent. Args: value (ProtocolVersion): A ProtocolVersion struct to be converted into a KMIPVersion enumeration. Returns: KMIPVersion: The enumeration equivalent of the struct. If the struct cannot be converted to a valid enumeration, None is returned.
codesearchnet
def append_paulis(self, paulis=None, pauli_labels=None): return self.insert_paulis(None, paulis=paulis, pauli_labels=pauli_labels)
Append pauli at the end. Args: paulis (Pauli): the to-be-inserted or appended pauli pauli_labels (list[str]): the to-be-inserted or appended pauli label Returns: Pauli: self
juraj-google-style
def get_account_info(self): request = self._get_request() response = request.get(self.ACCOUNT_INFO_URL) self.account.json_data = response['account'] return self.account
Get current account information The information then will be saved in `self.account` so that you can access the information like this: >>> hsclient = HSClient() >>> acct = hsclient.get_account_info() >>> print acct.email_address Returns: An Account object
codesearchnet
def set_mode(self, name, value=None, default=False, disable=False): string = 'switchport mode' command = self.command_builder(string, value=value, default=default, disable=disable) return self.configure_interface(name, command)
Configures the switchport mode Args: name (string): The interface identifier to create the logical layer 2 switchport for. The name must be the full interface name and not an abbreviated interface name (eg Ethernet1, not Et1) value (string): The value to set the mode to. Accepted values for this argument are access or trunk default (bool): Configures the mode parameter to its default value using the EOS CLI disable (bool): Negate the mode parameter using the EOS CLI Returns: True if the create operation succeeds otherwise False.
codesearchnet
def Run(self, conf, args): try: options, args = self.parser.parse_args(args) except SystemExit as e: return e.code if options.maps: self.log.info('Setting configured maps to %s', options.maps) conf.maps = options.maps if not options.incremental: self.log.debug('performing FULL update of caches') else: self.log.debug('performing INCREMENTAL update of caches') if options.delay: self.log.info('Delaying %d seconds before executing', options.delay) time.sleep(options.delay) return self.UpdateMaps(conf, incremental=options.incremental, force_write=options.force_write, force_lock=options.force_lock)
Run the Update command. See Command.Run() for full documentation on the Run() method. Args: conf: a nss_cache.config.Config object args: a list of arguments to be parsed by this command Returns: 0 on success, nonzero on error
github-repos
def _tag_sharding_attribute_for_dequeued_tensor(tensor, dims): if dims is None: return xla_sharding.replicate(tensor, assign_tuple_sharding=True) elif np.prod(dims) == 1: return xla_sharding.assign_device(tensor, 0, assign_tuple_sharding=True) else: tile_assignment = np.arange(np.prod(dims)).reshape(dims) return xla_sharding.tile(tensor=tensor, tile_assignment=tile_assignment, assign_tuple_sharding=True)
Tags appropriate XLA sharding attribute to the dequeued tensor. The sharding attribute of the dequeued tensor will be a tuple. Args: tensor: The dequeued tensor on TPU. dims: A list of integer describes how the tensor is partitioned. Returns: The same tensor with the xla_sharding attribute.
github-repos