code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def edit_profile(request):
(form, handled) = _handle_profile(request, 'profile')
if (handled and (not form.errors)):
messages.success(request, 'Your attendee profile was updated.')
return redirect('dashboard')
data = {'form': form}
return render(request, 'registrasion/profile_form.html', data)
|
View for editing an attendee's profile
The user must be logged in to edit their profile.
Returns:
redirect or render:
In the case of a ``POST`` request, it'll redirect to ``dashboard``,
or otherwise, it will render ``registrasion/profile_form.html``
with data::
{
"form": form, # Instance of ATTENDEE_PROFILE_FORM.
}
|
codesearchnet
|
def _snapshot_tensor(self, tensor):
snapshot_variable = self._create_or_get_tensor_values_cache(tensor.name, tensor.op.graph, tensor.shape.as_list(), tensor.dtype)
return state_ops.assign(snapshot_variable, tensor).op
|
Creates a new tf.Variable and a new tf.Operation that assigns the value of the tensor to this variable.
Args:
tensor: tensor whose values will be stored in a new tf.Variable.
Returns:
An assignment operation.
|
github-repos
|
def past_stop_threshold(stop_threshold, eval_metric):
if (stop_threshold is None):
return False
if (not isinstance(stop_threshold, numbers.Number)):
raise ValueError('Threshold for checking stop conditions must be a number.')
if (not isinstance(eval_metric, numbers.Number)):
raise ValueError('Eval metric being checked against stop conditions must be a number.')
if (eval_metric >= stop_threshold):
tf.logging.info('Stop threshold of {} was passed with metric value {}.'.format(stop_threshold, eval_metric))
return True
return False
|
Return a boolean representing whether a model should be stopped.
Args:
stop_threshold: float, the threshold above which a model should stop
training.
eval_metric: float, the current value of the relevant metric to check.
Returns:
True if training should stop, False otherwise.
Raises:
ValueError: if either stop_threshold or eval_metric is not a number
|
codesearchnet
|
def find_latest_change_point_index(metric_values: List[Union[float, int]]):
change_points_indices = find_change_points(metric_values)
change_points_indices = filter_change_points_by_median_threshold(metric_values, change_points_indices)
if not change_points_indices:
return None
change_points_indices.sort()
change_point_index = change_points_indices[-1]
if is_edge_change_point(change_point_index, len(metric_values), constants._EDGE_SEGMENT_SIZE):
logging.info('The change point %s is located at the edge of the data with an edge segment size of %s. This change point will be ignored for now, awaiting additional data. Should the change point persist after gathering more data, an alert will be raised.' % (change_point_index, constants._EDGE_SEGMENT_SIZE))
return None
return change_point_index
|
Args:
metric_values: Metric values used to run change point analysis.
Returns:
int: Right most change point index observed on metric_values.
|
github-repos
|
def open_connection(self, connection: str, configuration: Config, hostname: Optional[str]=None, username: Optional[str]=None, password: Optional[str]=None, port: Optional[int]=None, platform: Optional[str]=None, extras: Optional[Dict[(str, Any)]]=None, default_to_host_attributes: bool=True) -> ConnectionPlugin:
if (connection in self.connections):
raise ConnectionAlreadyOpen(connection)
self.connections[connection] = self.connections.get_plugin(connection)()
if default_to_host_attributes:
conn_params = self.get_connection_parameters(connection)
self.connections[connection].open(hostname=(hostname if (hostname is not None) else conn_params.hostname), username=(username if (username is not None) else conn_params.username), password=(password if (password is not None) else conn_params.password), port=(port if (port is not None) else conn_params.port), platform=(platform if (platform is not None) else conn_params.platform), extras=(extras if (extras is not None) else conn_params.extras), configuration=configuration)
else:
self.connections[connection].open(hostname=hostname, username=username, password=password, port=port, platform=platform, extras=extras, configuration=configuration)
return self.connections[connection]
|
Open a new connection.
If ``default_to_host_attributes`` is set to ``True`` arguments will default to host
attributes if not specified.
Raises:
AttributeError: if it's unknown how to establish a connection for the given type
Returns:
An already established connection
|
codesearchnet
|
def __le__(self, other):
if not isinstance(other, interface.DateTimeValues):
raise ValueError('Other not an instance of DateTimeValues')
return isinstance(other, Never)
|
Determines if the date time values are less than or equal to other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are greater than or equal to other.
Raises:
ValueError: if other is not an instance of DateTimeValues.
|
juraj-google-style
|
def parse(self, filename):
filehandle = storage.open_vos_or_local(filename, "rb")
assert filehandle is not None, "Failed to open file {} ".format(filename)
filestr = filehandle.read()
filehandle.close()
assert filestr is not None, "File contents are None"
observations = self._parse_observation_list(filestr)
self._parse_observation_headers(filestr, observations)
sys_header = self._parse_system_header(filestr)
sources = self._parse_source_data(filestr, observations)
return AstromData(observations, sys_header, sources, discovery_only=self.discovery_only)
|
Parses a file into an AstromData structure.
Args:
filename: str
The name of the file whose contents will be parsed.
Returns:
data: AstromData
The file contents extracted into a data structure for programmatic
access.
|
juraj-google-style
|
def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: torch.LongTensor=None):
batch_size, num_images, num_channels, height, width = pixel_values.shape
pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:])
nb_values_per_image = pixel_values.shape[1:].numel()
real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image
if not any(real_images_inds):
real_images_inds[0] = True
pixel_values = pixel_values[real_images_inds].contiguous()
if pixel_attention_mask is None:
pixel_attention_mask = torch.ones(size=[pixel_values.shape[i] for i in (0, 2, 3)], dtype=torch.bool, device=pixel_values.device)
else:
pixel_attention_mask = pixel_attention_mask.view(batch_size * num_images, *pixel_attention_mask.shape[2:])
pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous()
patch_size = self.config.vision_config.patch_size
patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size)
patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size)
patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
image_hidden_states = self.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
image_hidden_states = image_hidden_states.last_hidden_state
image_hidden_states = self.connector(image_hidden_states)
return image_hidden_states
|
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
pixel_attention_mask (`torch.LongTensor`, *optional*):
The attention mask indicating padded regions in the image.
|
github-repos
|
def lex_index(n, k, lst):
if (len(lst) != k):
raise VisualizationError('list should have length k')
comb = list(map((lambda x: ((n - 1) - x)), lst))
dualm = sum([n_choose_k(comb[((k - 1) - i)], (i + 1)) for i in range(k)])
return int(dualm)
|
Return the lex index of a combination..
Args:
n (int): the total number of options .
k (int): The number of elements.
lst (list): list
Returns:
int: returns int index for lex order
Raises:
VisualizationError: if length of list is not equal to k
|
codesearchnet
|
def argsort2(indexable, key=None, reverse=False):
if isinstance(indexable, dict):
vk_iter = ((v, k) for (k, v) in indexable.items())
else:
vk_iter = ((v, k) for (k, v) in enumerate(indexable))
if (key is None):
indices = [k for (v, k) in sorted(vk_iter, reverse=reverse)]
else:
indices = [k for (v, k) in sorted(vk_iter, key=(lambda vk: key(vk[0])), reverse=reverse)]
return indices
|
Returns the indices that would sort a indexable object.
This is similar to np.argsort, but it is written in pure python and works
on both lists and dictionaries.
Args:
indexable (list or dict): indexable to sort by
Returns:
list: indices: list of indices such that sorts the indexable
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> # argsort works on dicts
>>> dict_ = indexable = {'a': 3, 'b': 2, 'c': 100}
>>> indices = ut.argsort2(indexable)
>>> assert list(ut.take(dict_, indices)) == sorted(dict_.values())
>>> # argsort works on lists
>>> indexable = [100, 2, 432, 10]
>>> indices = ut.argsort2(indexable)
>>> assert list(ut.take(indexable, indices)) == sorted(indexable)
>>> # argsort works on iterators
>>> indexable = reversed(range(100))
>>> indices = ut.argsort2(indexable)
>>> assert indices[0] == 99
|
codesearchnet
|
def _FindKeys(self, key, names, matches):
for name, subkey in iter(key.items()):
if name in names:
matches.append((name, subkey))
if isinstance(subkey, dict):
self._FindKeys(subkey, names, matches)
|
Searches the plist key hierarchy for keys with matching names.
If a match is found a tuple of the key name and value is added to
the matches list.
Args:
key (dict[str, object]): plist key.
names (list[str]): names of the keys to match.
matches (list[str]): keys with matching names.
|
juraj-google-style
|
def _expand_str_alias(path_cfg, alias_dict, overriding_kargs):
new_path_cfg = alias_dict[path_cfg]
new_overriding_kargs = dict(alias=path_cfg)
new_overriding_kargs.update(overriding_kargs)
return expand_path_cfg(new_path_cfg, alias_dict,new_overriding_kargs)
|
expand a path config given as a string
Args:
path_cfg (str): an alias
alias_dict (dict):
overriding_kargs (dict):
|
juraj-google-style
|
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
vision_data = {}
if image_sizes is not None:
images_kwargs = AriaProcessorKwargs._defaults.get('images_kwargs', {})
images_kwargs.update(kwargs)
max_size = images_kwargs.get('max_image_size', None) or self.image_processor.max_image_size
num_image_patches = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes]
num_image_tokens = [self.size_conversion[max_size] * num_patches for num_patches in num_image_patches]
vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})
return MultiModalData(**vision_data)
|
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`List[List[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
|
github-repos
|
def check_error_response(self, body, status):
status_code = int(status.split(' ', 1)[0])
if (status_code >= 300):
raise errors.BackendError(body, status)
|
Raise an exception if the response from the backend was an error.
Args:
body: A string containing the backend response body.
status: A string containing the backend response status.
Raises:
BackendError if the response is an error.
|
codesearchnet
|
def hex_to_name(hexx):
for n, h in defaults.COLOURS.items():
if (len(n) > 1) and (h == hexx.upper()):
return n.lower()
return None
|
Convert hex to a color name, using matplotlib's colour names.
Args:
hexx (str): A hexadecimal colour, starting with '#'.
Returns:
str: The name of the colour, or None if not found.
|
juraj-google-style
|
def _fetch_preprocessing_callback(fetch):
if isinstance(fetch, ops.Operation):
operation_fetches.append(fetch)
return fetch
elif isinstance(fetch, meta_graph_pb2.TensorInfo):
tensor_infos.append(fetch)
decoded = _get_element_from_tensor_info(fetch, self._func_graph)
if tensor_util.is_tf_type(decoded) or isinstance(decoded, composite_tensor.CompositeTensor):
tensor_fetches.append(decoded)
else:
operation_fetches.append(decoded)
return decoded
elif isinstance(fetch, (tensor_lib.Tensor, composite_tensor.CompositeTensor)):
tensor_fetches.append(fetch)
return fetch
else:
graph_element = self.graph.as_graph_element(fetch)
return _fetch_preprocessing_callback(graph_element)
|
Extract out lists of ops, tensors, and tensor type info.
Turns TensorInfos into Tensors in the original `fetches` structure.
Also extracts ops from `fetches`.
Args:
fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or
string identifying a Tensor or Operation.
Returns:
`fetch` converted to a Tensor.
|
github-repos
|
def string_set(namespace: Union[Type, str], name: str) -> 'Metrics.DelegatingStringSet':
namespace = Metrics.get_namespace(namespace)
return Metrics.DelegatingStringSet(MetricName(namespace, name))
|
Obtains or creates a String set metric.
String set metrics are restricted to string values.
Args:
namespace: A class or string that gives the namespace to a metric
name: A string that gives a unique name to a metric
Returns:
A StringSet object.
|
github-repos
|
def set_colourtemp(self, colourtemp):
if not 0 <= colourtemp <= 255:
raise ValueError("The colour temperature needs to be between 0 and 255.")
payload = self.generate_payload(SET, {self.DPS_INDEX_COLOURTEMP: colourtemp})
data = self._send_receive(payload)
return data
|
Set the colour temperature of an rgb bulb.
Args:
colourtemp(int): Value for the colour temperature (0-255).
|
juraj-google-style
|
def resize_for_vision_encoder(self, image: np.ndarray, vision_encoder_max_size: int, resample: PILImageResampling=PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
height, width = get_image_size(image, channel_dim=input_data_format)
aspect_ratio = width / height
if width >= height:
width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size
height = int(width / aspect_ratio)
height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size
elif height > width:
height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size
width = int(height * aspect_ratio)
width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size
new_size = {'height': height, 'width': width}
return self.resize(image, size=new_size, resample=resample, input_data_format=input_data_format, data_format=data_format)
|
Resize images to be multiples of `vision_encoder_max_size` while preserving the aspect ratio.
Args:
image (`np.ndarray`):
Images to resize.
vision_encoder_max_size (`int`):
Maximum size of the output image. If the image is larger than this size, it will be split into
patches of this size, and the original image will be concatenated with the patches, resized to max_size.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred
|
github-repos
|
def update(self, properties=None, description=None):
cv = ApplicationConfiguration._props(properties=properties, description=description)
res = self.rest_client.session.patch(self.rest_self, headers={'Accept': 'application/json', 'Content-Type': 'application/json'}, json=cv)
_handle_http_errors(res)
self.json_rep = res.json()
return self
|
Update this application configuration.
To create or update a property provide its key-value
pair in `properties`.
To delete a property provide its key with the value ``None``
in properties.
Args:
properties (dict): Property values to be updated. If ``None`` the properties are unchanged.
description (str): Description for the configuration. If ``None`` the description is unchanged.
Returns:
ApplicationConfiguration: self
|
codesearchnet
|
def _adjusted_script_code(self, script):
script_code = ByteData()
if (script[0] == (len(script) - 1)):
return script
script_code += VarInt(len(script))
script_code += script
return script_code
|
Checks if the script code pased in to the sighash function is already
length-prepended
This will break if there's a redeem script that's just a pushdata
That won't happen in practice
Args:
script (bytes): the spend script
Returns:
(bytes): the length-prepended script (if necessary)
|
codesearchnet
|
def add(self, data, name=None):
if (name is None):
n = len(self.data)
while (('Series %d' % n) in self.data):
n += 1
name = ('Series %d' % n)
self.data[name] = data
return name
|
Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
|
codesearchnet
|
def end_of_chunk(prev_tag, tag, prev_type, type_):
chunk_end = False
if (prev_tag == 'E'):
chunk_end = True
if (prev_tag == 'S'):
chunk_end = True
if ((prev_tag == 'B') and (tag == 'B')):
chunk_end = True
if ((prev_tag == 'B') and (tag == 'S')):
chunk_end = True
if ((prev_tag == 'B') and (tag == 'O')):
chunk_end = True
if ((prev_tag == 'I') and (tag == 'B')):
chunk_end = True
if ((prev_tag == 'I') and (tag == 'S')):
chunk_end = True
if ((prev_tag == 'I') and (tag == 'O')):
chunk_end = True
if ((prev_tag != 'O') and (prev_tag != '.') and (prev_type != type_)):
chunk_end = True
return chunk_end
|
Checks if a chunk ended between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_end: boolean.
|
codesearchnet
|
def dv(self, orb):
orb = orb.copy(form='cartesian')
if (self.frame == 'QSW'):
mat = to_qsw(orb).T
elif (self.frame == 'TNW'):
mat = to_tnw(orb).T
else:
mat = np.identity(3)
return (mat @ self._dv)
|
Computation of the velocity increment in the reference frame of the orbit
Args:
orb (Orbit):
Return:
numpy.array: Velocity increment, length 3
|
codesearchnet
|
def _prepare_for_training(self, job_name=None):
super(Framework, self)._prepare_for_training(job_name=job_name)
if self.source_dir and not self.source_dir.lower().startswith('s3:
validate_source_dir(self.entry_point, self.source_dir)
local_code = get_config_value('local.local_code', self.sagemaker_session.config)
if self.sagemaker_session.local_mode and local_code:
if self.source_dir is None:
self.source_dir = os.path.dirname(self.entry_point)
self.entry_point = os.path.basename(self.entry_point)
code_dir = 'file:
script = self.entry_point
else:
self.uploaded_code = self._stage_user_code_in_s3()
code_dir = self.uploaded_code.s3_prefix
script = self.uploaded_code.script_name
self._hyperparameters[DIR_PARAM_NAME] = code_dir
self._hyperparameters[SCRIPT_PARAM_NAME] = script
self._hyperparameters[CLOUDWATCH_METRICS_PARAM_NAME] = self.enable_cloudwatch_metrics
self._hyperparameters[CONTAINER_LOG_LEVEL_PARAM_NAME] = self.container_log_level
self._hyperparameters[JOB_NAME_PARAM_NAME] = self._current_job_name
self._hyperparameters[SAGEMAKER_REGION_PARAM_NAME] = self.sagemaker_session.boto_region_name
|
Set hyperparameters needed for training. This method will also validate ``source_dir``.
Args:
* job_name (str): Name of the training job to be created. If not specified, one is generated,
using the base name given to the constructor if applicable.
|
juraj-google-style
|
def _ExtractJQuery(self, jquery_raw):
data_part = ''
if (not jquery_raw):
return {}
if ('[' in jquery_raw):
(_, _, first_part) = jquery_raw.partition('[')
(data_part, _, _) = first_part.partition(']')
elif jquery_raw.startswith('
(_, _, first_part) = jquery_raw.partition('{')
data_part = '{{{0:s}'.format(first_part)
elif ('({' in jquery_raw):
(_, _, first_part) = jquery_raw.partition('(')
(data_part, _, _) = first_part.rpartition(')')
if (not data_part):
return {}
try:
data_dict = json.loads(data_part)
except ValueError:
return {}
return data_dict
|
Extracts values from a JQuery string.
Args:
jquery_raw (str): JQuery string.
Returns:
dict[str, str]: extracted values.
|
codesearchnet
|
def extract_subtree(self, node):
if not isinstance(node, Node):
raise TypeError("node must be a Node")
r = self.root; self.root = node; o = copy(self); self.root = r; return o
|
Return a copy of the subtree rooted at ``node``
Args:
``node`` (``Node``): The root of the desired subtree
Returns:
``Tree``: A copy of the subtree rooted at ``node``
|
juraj-google-style
|
def get_thread(self, thread_key):
uri = '/'.join([self.api_uri,
self.threads_suffix,
thread_key
])
return self._req('get', uri)
|
Gets a thread specified by thread_key
Args:
thread_key thread to get
returns a thread dict
|
juraj-google-style
|
def _get_dataset_showcase_dict(self, showcase):
if isinstance(showcase, hdx.data.showcase.Showcase) or isinstance(showcase, dict):
if 'id' not in showcase:
showcase = hdx.data.showcase.Showcase.read_from_hdx(showcase['name'])
showcase = showcase['id']
elif not isinstance(showcase, str):
raise HDXError('Type %s cannot be added as a showcase!' % type(showcase).__name__)
if is_valid_uuid(showcase) is False:
raise HDXError('%s is not a valid showcase id!' % showcase)
return {'package_id': self.data['id'], 'showcase_id': showcase}
|
Get dataset showcase dict
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary
Returns:
dict: dataset showcase dict
|
juraj-google-style
|
def is50or60(msg, spd_ref, trk_ref, alt_ref):
def vxy(v, angle):
vx = (v * np.sin(np.radians(angle)))
vy = (v * np.cos(np.radians(angle)))
return (vx, vy)
if (not (bds50.is50(msg) and bds60.is60(msg))):
return None
h50 = bds50.trk50(msg)
v50 = bds50.gs50(msg)
if ((h50 is None) or (v50 is None)):
return 'BDS50,BDS60'
h60 = bds60.hdg60(msg)
m60 = bds60.mach60(msg)
i60 = bds60.ias60(msg)
if ((h60 is None) or ((m60 is None) and (i60 is None))):
return 'BDS50,BDS60'
m60 = (np.nan if (m60 is None) else m60)
i60 = (np.nan if (i60 is None) else i60)
XY5 = vxy((v50 * aero.kts), h50)
XY6m = vxy(aero.mach2tas(m60, (alt_ref * aero.ft)), h60)
XY6i = vxy(aero.cas2tas((i60 * aero.kts), (alt_ref * aero.ft)), h60)
allbds = ['BDS50', 'BDS60', 'BDS60']
X = np.array([XY5, XY6m, XY6i])
Mu = np.array(vxy((spd_ref * aero.kts), trk_ref))
try:
dist = np.linalg.norm((X - Mu), axis=1)
BDS = allbds[np.nanargmin(dist)]
except ValueError:
return 'BDS50,BDS60'
return BDS
|
Use reference ground speed and trk to determine BDS50 and DBS60.
Args:
msg (String): 28 bytes hexadecimal message string
spd_ref (float): reference speed (ADS-B ground speed), kts
trk_ref (float): reference track (ADS-B track angle), deg
alt_ref (float): reference altitude (ADS-B altitude), ft
Returns:
String or None: BDS version, or possible versions, or None if nothing matches.
|
codesearchnet
|
def CompressedHistograms(self, run, tag):
accumulator = self.GetAccumulator(run)
return accumulator.CompressedHistograms(tag)
|
Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`.
|
codesearchnet
|
def get_itasser_models(self, homology_raw_dir, custom_itasser_name_mapping=None, outdir=None, force_rerun=False):
counter = 0
for g in tqdm(self.genes):
if (custom_itasser_name_mapping and (g.id in custom_itasser_name_mapping)):
hom_id = custom_itasser_name_mapping[g.id]
if (not op.exists(op.join(homology_raw_dir, hom_id))):
hom_id = g.id
else:
hom_id = g.id
new_itasser_name = (hom_id + '_model1')
orig_itasser_dir = op.join(homology_raw_dir, hom_id)
try:
itasser_prop = g.protein.load_itasser_folder(ident=hom_id, itasser_folder=orig_itasser_dir, organize=True, outdir=outdir, organize_name=new_itasser_name, force_rerun=force_rerun)
except OSError:
log.debug('{}: homology model folder unavailable'.format(g.id))
continue
except IOError:
log.debug('{}: homology model unavailable'.format(g.id))
continue
if itasser_prop.structure_file:
counter += 1
else:
log.debug('{}: homology model file unavailable, perhaps modelling did not finish'.format(g.id))
log.info('Completed copying of {} I-TASSER models to GEM-PRO directory. See the "df_homology_models" attribute for a summary dataframe.'.format(counter))
|
Copy generated I-TASSER models from a directory to the GEM-PRO directory.
Args:
homology_raw_dir (str): Root directory of I-TASSER folders.
custom_itasser_name_mapping (dict): Use this if your I-TASSER folder names differ from your model gene names.
Input a dict of {model_gene: ITASSER_folder}.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory
|
codesearchnet
|
def get_node(self, role: str, default=None) -> BioCNode:
return next((node for node in self.nodes if node.role == role), default)
|
Get the first node with role
Args:
role: role
default: node returned instead of raising StopIteration
Returns:
the first node with role
|
juraj-google-style
|
def create_from_binary(cls, mft_config, binary_data, entry_number):
bin_view = memoryview(binary_data)
entry = None
if (bin_view[0:4] != b'\x00\x00\x00\x00'):
try:
header = MFTHeader.create_from_binary(mft_config.ignore_signature_check, bin_view[:MFTHeader.get_representation_size()])
except HeaderError as e:
e.update_entry_number(entry_number)
e.update_entry_binary(binary_data)
raise
entry = cls(header, _defaultdict(list))
if (header.mft_record != entry_number):
_MOD_LOGGER.warning("The MFT entry number doesn't match. %d != %d", entry_number, header.mft_record)
if (len(binary_data) != header.entry_alloc_len):
_MOD_LOGGER.error('Expected MFT size is different than entry size.')
raise EntryError(f'Expected MFT size ({len(binary_data)}) is different than entry size ({header.entry_alloc_len}).', binary_data, entry_number)
if mft_config.apply_fixup_array:
apply_fixup_array(bin_view, header.fx_offset, header.fx_count, header.entry_alloc_len)
entry._load_attributes(mft_config, bin_view[header.first_attr_offset:])
bin_view.release()
return entry
|
Creates a MFTEntry from a binary stream. It correctly process
the binary data extracting the MFTHeader, all the attributes and the
slack information from the binary stream.
The binary data WILL be changed to apply the fixup array.
Args:
mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells
how the library will interpret data.
binary_data (bytearray) - A binary stream with the data to extract.
This has to be a writeable and support the memoryview call
entry_number (int) - The entry number for this entry
Returns:
MFTEntry: If the object is empty, returns None, otherwise, new object MFTEntry
|
codesearchnet
|
def near(point, dist, points):
for cmpt in points:
if (haversine(point, cmpt) <= dist):
return True
return False
|
Determine if the given point is within dist of any of points.
Args:
point ((float,float)): A latitude, longitude float tuple.
dist (int): A distance in mm ( base units )
points (list): A list of latitude, longitude float tuples to compare against.
|
codesearchnet
|
def __init__(self, structure_matcher=StructureMatcher(
comparator=ElementComparator()), symprec=None):
self.symprec = symprec
self.structure_list = defaultdict(list)
if isinstance(structure_matcher, dict):
self.structure_matcher = StructureMatcher.from_dict(structure_matcher)
else:
self.structure_matcher = structure_matcher
|
Remove duplicate structures based on the structure matcher
and symmetry (if symprec is given).
Args:
structure_matcher: Provides a structure matcher to be used for
structure comparison.
symprec: The precision in the symmetry finder algorithm if None (
default value), no symmetry check is performed and only the
structure matcher is used. A recommended value is 1e-5.
|
juraj-google-style
|
def setDocumentedBy(self, documented_pid, documenting_pid):
self._check_initialized()
documented_id = self.getObjectByPid(documented_pid)
documenting_id = self.getObjectByPid(documenting_pid)
self.add((documented_id, CITO.isDocumentedBy, documenting_id))
|
Add a CiTO, the Citation Typing Ontology, triple asserting that
``documented_pid`` isDocumentedBy ``documenting_pid``.
Adds assertion: ``documented_pid cito:isDocumentedBy documenting_pid``
Args:
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
|
codesearchnet
|
def cp(src, dst):
if isdir(src):
if isdir(dst):
rm(dst)
shutil.copytree(src, dst)
elif isfile(src):
shutil.copy(src, dst)
else:
raise IOError("Source '{0}' not found".format(src))
|
Copy a file or directory.
If source is a directory, this recursively copies the directory
and its contents. If the destination is a directory, then this
creates a copy of the source in the destination directory with the
same basename.
If the destination already exists, this will attempt to overwrite
it.
Arguments:
src (string): path to the source file or directory.
dst (string): path to the destination file or directory.
Raises:
IOError: if source does not exist.
|
codesearchnet
|
def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor]=None, dim=None) -> torch.Tensor:
if weights is not None:
weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor))
sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)
return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights
else:
return input_tensor.mean(dim=dim)
|
Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero,
meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`.
Args:
input_tensor (`torch.FloatTensor`):
Input tensor, of which the average must be computed.
weights (`torch.FloatTensor`, *optional*):
Weights tensor, of the same shape as `input_tensor`.
dim (`int`, *optional*):
The dim along which to average `input_tensor`.
Returns:
`torch.FloatTensor`: The tensor with values averaged along the specified `dim`.
|
github-repos
|
def transform_table(self, table, table_meta, missing=None):
if (missing is None):
missing = self.missing
else:
self.missing = missing
warnings.warn(DEPRECATION_MESSAGE.format('transform_table'), DeprecationWarning)
content = {}
columns = []
table_name = table_meta['name']
for field in table_meta['fields']:
column_name = field['name']
if (missing and table[column_name].isnull().any()):
null_transformer = transformers.NullTransformer(field)
clean_column = null_transformer.fit_transform(table[column_name])
null_name = ('?' + column_name)
columns.append(null_name)
content[null_name] = clean_column[null_name].values
column = clean_column[column_name]
else:
column = table[column_name].to_frame()
transformer = self.transformers[(table_name, column_name)]
content[column_name] = transformer.transform(column)[column_name].values
columns.append(column_name)
return pd.DataFrame(content, columns=columns)
|
Apply the stored transformers to `table`.
Args:
table(pandas.DataFrame): Contents of the table to be transformed.
table_meta(dict): Metadata for the given table.
missing(bool): Wheter or not use NullTransformer to handle missing values.
Returns:
pandas.DataFrame: Transformed table.
|
codesearchnet
|
def sort_response(response: Dict[str, Any]) -> OrderedDict:
root_order = ["jsonrpc", "result", "error", "id"]
error_order = ["code", "message", "data"]
req = OrderedDict(sorted(response.items(), key=lambda k: root_order.index(k[0])))
if "error" in response:
req["error"] = OrderedDict(
sorted(response["error"].items(), key=lambda k: error_order.index(k[0]))
)
return req
|
Sort the keys in a JSON-RPC response object.
This has no effect other than making it nicer to read. Useful in Python 3.5 only,
dictionaries are already sorted in newer Python versions.
Example::
>>> json.dumps(sort_response({'id': 2, 'result': 5, 'jsonrpc': '2.0'}))
{"jsonrpc": "2.0", "result": 5, "id": 1}
Args:
response: Deserialized JSON-RPC response.
Returns:
The same response, sorted in an OrderedDict.
|
juraj-google-style
|
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
|
github-repos
|
def job_monitor(job, interval=None, monitor_async=False, quiet=False, output=sys.stdout):
if (interval is None):
_interval_set = False
interval = 2
else:
_interval_set = True
if _NOTEBOOK_ENV:
if monitor_async:
try:
import ipywidgets as widgets
except ImportError:
raise ImportError('These functions need ipywidgets. Run "pip install ipywidgets" before.')
from qiskit.tools.jupyter.jupyter_magics import _html_checker
style = 'font-size:16px;'
header = "<p style='{style}'>Job Status: %s </p>".format(style=style)
status = widgets.HTML(value=(header % job.status().value))
display(status)
thread = threading.Thread(target=_html_checker, args=(job, interval, status, header))
thread.start()
else:
_text_checker(job, interval, _interval_set, quiet=quiet, output=output)
else:
if monitor_async:
raise QiskitError('monitor_async only available in Jupyter notebooks.')
_text_checker(job, interval, _interval_set, quiet=quiet, output=output)
|
Monitor the status of a IBMQJob instance.
Args:
job (BaseJob): Job to monitor.
interval (int): Time interval between status queries.
monitor_async (bool): Monitor asyncronously (in Jupyter only).
quiet (bool): If True, do not print status messages.
output (file): The file like object to write status messages to.
By default this is sys.stdout.
Raises:
QiskitError: When trying to run async outside of Jupyter
ImportError: ipywidgets not available for notebook.
|
codesearchnet
|
def set_filename_and_line_from_caller(self, offset: int=0) -> int:
retcode = self.SUCCESS
frame = inspect.currentframe()
if not frame:
return self.FAILURE
frame = cast(types.FrameType, frame)
for _ in range(offset + 1):
parent = frame.f_back
if parent is None:
retcode = self.HEURISTIC_USED
break
parent = cast(types.FrameType, parent)
frame = parent
self.filename = frame.f_code.co_filename
self.lineno = cast(int, frame.f_lineno)
return retcode
|
Set filename and line using the caller's stack frame.
If the requested stack information is not available, a heuristic may
be applied and self.HEURISTIC USED will be returned. If the heuristic
fails then no change will be made to the filename and lineno members
(None by default) and self.FAILURE will be returned.
Args:
offset: Integer. If 0, the caller's stack frame is used. If 1,
the caller's caller's stack frame is used. Larger values are
permissible but if out-of-range (larger than the number of stack
frames available) the outermost stack frame will be used.
Returns:
TraceableObject.SUCCESS if appropriate stack information was found,
TraceableObject.HEURISTIC_USED if the offset was larger than the stack,
and TraceableObject.FAILURE if the stack was empty.
|
github-repos
|
def are_equal(self, mol1, mol2):
b1 = set(self._get_bonds(mol1))
b2 = set(self._get_bonds(mol2))
return b1 == b2
|
Compare the bond table of the two molecules.
Args:
mol1: first molecule. pymatgen Molecule object.
mol2: second moleculs. pymatgen Molecule objec.
|
juraj-google-style
|
def load_yaml_config(conf_file):
global g_config
with open(conf_file) as fp:
g_config = util.yaml_load(fp)
src_dir = get_path('src_dir', None)
if src_dir is not None:
sys.path.insert(0, src_dir)
for cmd in get('commands', []):
_import(cmd)
|
Load a YAML configuration.
This will not update the configuration but replace it entirely.
Args:
conf_file (str):
Path to the YAML config. This function will not check the file name
or extension and will just crash if the given file does not exist or
is not a valid YAML file.
|
juraj-google-style
|
def inspect_image(self, image):
return self._result(self._get(self._url('/images/{0}/json', image)), True)
|
Get detailed information about an image. Similar to the ``docker
inspect`` command, but only for images.
Args:
image (str): The image to inspect
Returns:
(dict): Similar to the output of ``docker inspect``, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def initial_value(self):
return self._initial_value
|
Returns the Tensor used as the initial value for the variable.
Note that this is different from `initialized_value()` which runs
the op that initializes the variable before returning its value.
This method returns the tensor that is used by the op that initializes
the variable.
Returns:
A `Tensor`.
|
github-repos
|
def __init__(self, choices=None, validator=None, **kwargs):
self.choices = choices
subvalidator = validator or String()
self.validator = List(validator=subvalidator)
for choice in self.choices:
subvalidator.Validate(choice)
super(MultiChoice, self).__init__(**kwargs)
|
Create a multichoice object and validate choices.
Args:
choices: list of available choices
validator: validator to use for each of the list *items* the validator for
the top level is a list.
**kwargs: passed through to parent class.
|
juraj-google-style
|
def load_model_using_search_path(
self, filename, model, search_path, is_main_model=False,
encoding='utf8', add_to_local_models=True):
if (model):
self.update_model_in_repo_based_on_filename(model)
for the_path in search_path:
full_filename = join(the_path, filename)
if exists(full_filename):
the_metamodel = \
MetaModelProvider.get_metamodel(model, full_filename)
return self.load_model(the_metamodel,
full_filename,
is_main_model,
encoding=encoding,
add_to_local_models=add_to_local_models)
raise IOError(
errno.ENOENT, os.strerror(errno.ENOENT), filename)
|
add a new model to all relevant objects
Args:
filename: models to be loaded
model: model holding the loaded models in its _tx_model_repository
field (may be None).
search_path: list of search directories.
Returns:
the loaded model
|
juraj-google-style
|
def generate_func_call(name, args=None, kwargs=None):
all_args = []
if args:
all_args.extend(args)
if kwargs:
all_args.extend('{}={}'.format(k, v)
for k, v in kwargs if v is not None)
return '{}({})'.format(name, ', '.join(all_args))
|
Generates code to call a function.
Args:
name (str): The function name.
args (list[str]): Each positional argument.
kwargs (list[tuple]): Each tuple is (arg: str, value: str). If
value is None, then the keyword argument is omitted. Otherwise,
if the value is not a string, then str() is called on it.
Returns:
str: Code to call a function.
|
juraj-google-style
|
def metadata(self) -> Dict[str, Any]:
return self._metadata
|
Metadata of this field.
Metadata is defined as a dict type, so we can add multiple annotations
to a field.
userdata = field.metadata.get('userdata', None):
Returns:
Metadata of this field as a dict.
|
github-repos
|
def eval(x):
return get_value(to_dense(x))
|
Evaluates the value of a variable.
Args:
x: A variable.
Returns:
A Numpy array.
Examples:
>>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]),
... dtype='float32')
>>> tf.keras.backend.eval(kvar)
array([[1., 2.],
[3., 4.]], dtype=float32)
|
github-repos
|
def minimum(x1, x2, output_shape=None, name=None):
output_shape = convert_to_shape(output_shape)
with tf.name_scope(name, default_name='minimum'):
(x1, x2) = binary_arguments_to_tensors(x1, x2)
return MinMaxOperation(tf.minimum, x1, x2, output_shape=_infer_binary_broadcast_shape(x1.shape, x2.shape, output_shape)).outputs[0]
|
Binary minimum with broadcsting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor
|
codesearchnet
|
def compare_with_existing(self, region='us-east-1', onetime=False):
pipelines = self.get_existing_pipelines()
pipeline_id = None
found = False
for pipeline in pipelines:
correct_app_and_region = (pipeline['application'] == self.app_name) and (region in pipeline['name'])
if onetime:
onetime_str = "(onetime-{})".format(self.environments[0])
if correct_app_and_region and onetime_str in pipeline['name']:
found = True
elif correct_app_and_region:
found = True
if found:
self.log.info('Existing pipeline found - %s', pipeline['name'])
pipeline_id = pipeline['id']
break
else:
self.log.info('No existing pipeline found')
return pipeline_id
|
Compare desired pipeline with existing pipelines.
Args:
region (str): Region of desired pipeline.
onetime (bool): Looks for different pipeline if Onetime
Returns:
str: pipeline_id if existing, empty string of not.
|
juraj-google-style
|
def _send_notification(self, handle, value):
value_len = len(value)
value = bytes(value)
payload = struct.pack(('<BHB%ds' % value_len), 255, handle, value_len, value)
response = self._send_command(2, 5, payload)
(result,) = unpack('<H', response.payload)
if (result != 0):
return (False, {'reason': 'Error code from BLED112 notifying a value', 'code': result, 'handle': handle, 'value': value})
return (True, None)
|
Send a notification to all connected clients on a characteristic
Args:
handle (int): The handle we wish to notify on
value (bytearray): The value we wish to send
|
codesearchnet
|
def GetMetadata(self, metadata_key='', recursive=True, timeout=None, retry=True):
return self._HandleMetadataUpdate(metadata_key=metadata_key, recursive=recursive, wait=False, timeout=timeout, retry=retry)
|
Retrieve the contents of metadata server for a metadata key.
Args:
metadata_key: string, the metadata key to watch for changes.
recursive: bool, True if we should recursively watch for metadata changes.
timeout: int, timeout in seconds for returning metadata output.
retry: bool, True if we should retry on failure.
Returns:
json, the deserialized contents of the metadata server or None if error.
|
codesearchnet
|
def __init__(self, name, pivot):
super(XLACompileContext, self).__init__()
self._name = name
self._name_as_bytes = compat.as_bytes(name)
self._unsupported_ops = []
self._pivot = pivot
|
Builds a new XLACompileContext.
Args:
name: a unique name for the context, used to populate the
`_xla_compile_id` attribute.
pivot: a pivot node. Nodes in the XLACompileContext that do not have any
inputs will have a control dependency on the pivot node. This ensures
that nodes are correctly included in any enclosing control flow
contexts.
|
github-repos
|
def get_html_content(id_: str) -> str:
try:
node = nodes.Node.from_id(id_)
return node.inner_html
except Exception as e:
epy.reraise(e, prefix='`ecolab.inspect` internal error. Please report an issue.\n')
|
Returns the inner content of the block id.
Is called the first time a block is expanded.
Args:
id_: Id of the block to load
Returns:
The html to add.
|
github-repos
|
def remove(self, processor_identity):
with self._condition:
processor_types = self._identities.get(processor_identity)
if (processor_types is None):
LOGGER.warning('transaction processor with identity %s tried to unregister but was not registered', processor_identity)
return
for processor_type in processor_types:
if (processor_type not in self._processors):
LOGGER.warning('processor type %s not a known processor type but is associated with identity %s', processor_type, processor_identity)
continue
self._processors[processor_type].remove_processor(processor_identity=processor_identity)
if (not self._processors[processor_type]):
del self._processors[processor_type]
|
Removes all of the Processors for
a particular transaction processor zeromq identity.
Args:
processor_identity (str): The zeromq identity of the transaction
processor.
|
codesearchnet
|
def log_error(cls, msg):
cls.error_logger.error(msg)
cls.debug_logger.debug(msg)
|
Logs the provided error message to both the error logger and the debug logger logging
instances.
Args:
msg: `str`. The error message to log.
|
juraj-google-style
|
def dist_point_line(p, l1, l2):
cross_prod = np.cross((l2 - l1), (p - l1))
return (np.linalg.norm(cross_prod) / np.linalg.norm((l2 - l1)))
|
compute the orthogonal distance between from the line that goes through
the points l1, l2 and the point p
Args:
p, l1, l2 : iterable
point
indices 0, 1, 2 corresponding to cartesian coordinates
|
codesearchnet
|
def check_column(state, name, missing_msg=None, expand_msg=None):
if (missing_msg is None):
missing_msg = "We expected to find a column named `{{name}}` in the result of your query, but couldn't."
if (expand_msg is None):
expand_msg = 'Have another look at your query result. '
msg_kwargs = {'name': name}
has_result(state)
stu_res = state.student_result
sol_res = state.solution_result
if (name not in sol_res):
raise BaseException(('name %s not in solution column names' % name))
if (name not in stu_res):
_msg = state.build_message(missing_msg, fmt_kwargs=msg_kwargs)
state.do_test(_msg)
return state.to_child(append_message={'msg': expand_msg, 'kwargs': msg_kwargs}, student_result={name: stu_res[name]}, solution_result={name: sol_res[name]})
|
Zoom in on a particular column in the query result, by name.
After zooming in on a column, which is represented as a single-column query result,
you can use ``has_equal_value()`` to verify whether the column in the solution query result
matches the column in student query result.
Args:
name: name of the column to zoom in on.
missing_msg: if specified, this overrides the automatically generated feedback
message in case the column is missing in the student query result.
expand_msg: if specified, this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain.
:Example:
Suppose we are testing the following SELECT statements
* solution: ``SELECT artist_id as id, name FROM artists``
* student : ``SELECT artist_id, name FROM artists``
We can write the following SCTs: ::
# fails, since no column named id in student result
Ex().check_column('id')
# passes, since a column named name is in student_result
Ex().check_column('name')
|
codesearchnet
|
def setup_session(self, server, hooks, graph_default_context):
if self.execution_type == "distributed":
session_creator = tf.train.ChiefSessionCreator(
scaffold=self.scaffold,
master=server.target,
config=self.session_config,
checkpoint_dir=None,
checkpoint_filename_with_path=None
)
self.monitored_session = tf.train.MonitoredSession(
session_creator=session_creator,
hooks=hooks,
stop_grace_period_secs=120
)
if self.tf_session_dump_dir != "":
self.monitored_session = DumpingDebugWrapperSession(self.monitored_session, self.tf_session_dump_dir)
else:
self.monitored_session = tf.train.SingularMonitoredSession(
hooks=hooks,
scaffold=self.scaffold,
master='',
config=self.session_config,
checkpoint_dir=None
)
if graph_default_context:
graph_default_context.__exit__(None, None, None)
self.graph.finalize()
self.monitored_session.__enter__()
self.session = self.monitored_session._tf_sess()
|
Creates and then enters the session for this model (finalizes the graph).
Args:
server (tf.train.Server): The tf.train.Server object to connect to (None for single execution).
hooks (list): A list of (saver, summary, etc..) hooks to be passed to the session.
graph_default_context: The graph as_default() context that we are currently in.
|
juraj-google-style
|
def add_custom_column_spec(self, spec: ColumnSpec) -> 'ColumnSpecsBuilder':
self._specs.append(spec)
return self
|
Add a custom :class:`.ColumnSpec` to the builder.
Use this method when you need complete control over the :class:`.ColumnSpec`
, including custom value extraction and type handling.
Args:
spec: A :class:`.ColumnSpec` instance defining the column name, type,
value extraction, and optional SQL type casting.
Returns:
Self for method chaining
Examples:
Custom text column from chunk metadata:
>>> builder.add_custom_column_spec(
... ColumnSpec.text(
... name="source_and_id",
... value_fn=lambda chunk: ... f"{chunk.metadata.get('source')}_{chunk.id}"
... )
... )
|
github-repos
|
def load(self, fobj, index=None):
if index is None:
index = self._get_tab_index()
page = self.pages[index]
if fobj is None:
return
if not isinstance(fobj, tuple(page.clss_load)):
raise RuntimeError('Object to load must be in {0!s} (not a {1!s})'.format(
[x.__name__ for x in page.clss_load], fobj.__class__.__name__))
page.editor.load(fobj)
self._update_gui_text_tabs()
|
Loads given DataFile object. **tolerant with None**
Args:
fobj: object of one of accepted classes
index: tab index to load fobj into. If not passed, loads into current tab
|
juraj-google-style
|
def horizontal_infrared_radiation_intensity(self, value=9999.0):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `horizontal_infrared_radiation_intensity`'.format(value))
if (value < 0.0):
raise ValueError('value need to be greater or equal 0.0 for field `horizontal_infrared_radiation_intensity`')
self._horizontal_infrared_radiation_intensity = value
|
Corresponds to IDD Field `horizontal_infrared_radiation_intensity`
Args:
value (float): value for IDD Field `horizontal_infrared_radiation_intensity`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def run(self, circuit):
name = circuit.name
dag = circuit_to_dag(circuit)
del circuit
for passset in self.working_list:
for pass_ in passset:
dag = self._do_pass(pass_, dag, passset.options)
circuit = dag_to_circuit(dag)
circuit.name = name
return circuit
|
Run all the passes on a QuantumCircuit
Args:
circuit (QuantumCircuit): circuit to transform via all the registered passes
Returns:
QuantumCircuit: Transformed circuit.
|
codesearchnet
|
def __init__(self, func, name=None, indexed=None,
repeated=None, verbose_name=None):
super(ComputedProperty, self).__init__(name=name, indexed=indexed,
repeated=repeated,
verbose_name=verbose_name)
self._func = func
|
Constructor.
Args:
func: A function that takes one argument, the model instance, and returns
a calculated value.
|
juraj-google-style
|
def modified_files(root, tracked_only=False, commit=None):
assert os.path.isabs(root), ('Root has to be absolute, got: %s' % root)
command = ['hg', 'status']
if commit:
command.append(('--change=%s' % commit))
status_lines = subprocess.check_output(command).decode('utf-8').split(os.linesep)
modes = ['M', 'A']
if (not tracked_only):
modes.append('\\?')
modes_str = '|'.join(modes)
modified_file_status = utils.filter_lines(status_lines, ('(?P<mode>%s) (?P<filename>.+)' % modes_str), groups=('filename', 'mode'))
return dict(((os.path.join(root, filename), mode) for (filename, mode) in modified_file_status))
|
Returns a list of files that has been modified since the last commit.
Args:
root: the root of the repository, it has to be an absolute path.
tracked_only: exclude untracked files when True.
commit: SHA1 of the commit. If None, it will get the modified files in the
working copy.
Returns: a dictionary with the modified files as keys, and additional
information as value. In this case it adds the status returned by
hg status.
|
codesearchnet
|
def parse_config_file(config_file, skip_unknown=False):
for reader, existence_check in _FILE_READERS:
if existence_check(config_file):
with reader(config_file) as f:
parse_config(f, skip_unknown=skip_unknown)
return
raise IOError('Unable to open file: {}'.format(config_file))
|
Parse a Gin config file.
Args:
config_file: The path to a Gin config file.
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details.
Raises:
IOError: If `config_file` cannot be read using any register file reader.
|
juraj-google-style
|
def ParseOptions(self, options):
self._ParseInformationalOptions(options)
self._verbose = getattr(options, 'verbose', False)
self._output_filename = getattr(options, 'write', None)
argument_helper_names = ['process_resources', 'storage_file']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
if not self._storage_file_path:
raise errors.BadConfigOption('Missing storage file option.')
if not os.path.isfile(self._storage_file_path):
raise errors.BadConfigOption(
'No such storage file: {0:s}.'.format(self._storage_file_path))
compare_storage_file_path = self.ParseStringOption(
options, 'compare_storage_file')
if compare_storage_file_path:
if not os.path.isfile(compare_storage_file_path):
raise errors.BadConfigOption(
'No such storage file: {0:s}.'.format(compare_storage_file_path))
self._compare_storage_file_path = compare_storage_file_path
self.compare_storage_information = True
self._output_format = self.ParseStringOption(options, 'output_format')
if self._output_filename:
if os.path.exists(self._output_filename):
raise errors.BadConfigOption(
'Output file already exists: {0:s}.'.format(self._output_filename))
output_file_object = open(self._output_filename, 'wb')
self._output_writer = tools.FileObjectOutputWriter(output_file_object)
self._EnforceProcessMemoryLimit(self._process_memory_limit)
|
Parses the options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
juraj-google-style
|
def genome_name_from_fasta_path(fasta_path):
filename = os.path.basename(fasta_path)
return re.sub(r'(\.fa$)|(\.fas$)|(\.fasta$)|(\.fna$)|(\.\w{1,}$)', '', filename)
|
Extract genome name from fasta filename
Get the filename without directory and remove the file extension.
Example:
With fasta file path ``/path/to/genome_1.fasta``::
fasta_path = '/path/to/genome_1.fasta'
genome_name = genome_name_from_fasta_path(fasta_path)
print(genome_name)
# => "genome_1"
Args:
fasta_path (str): fasta file path
Returns:
str: genome name
|
juraj-google-style
|
async def remove(self, *, node_id: str, force: bool=False) -> Mapping[(str, Any)]:
params = {'force': force}
response = (await self.docker._query_json('nodes/{node_id}'.format(node_id=node_id), method='DELETE', params=params))
return response
|
Remove a node from a swarm.
Args:
node_id: The ID or name of the node
|
codesearchnet
|
def __init__(self, options):
self._options = options
self._tasks = {}
self._task_lock = threading.RLock()
|
Constructor.
Args:
options (gax.BundleOptions): configures strategy this instance
uses when executing bundled functions.
|
juraj-google-style
|
def decompress_decoder_2d(x, hparams, name=None):
return decompress_decoder(x, hparams,
strides=(2, 2),
kernel=(hparams.kernel_size, hparams.kernel_size),
name=name)
|
Decoder that decompresses 2-D inputs by 2**num_compress_steps.
Args:
x: Tensor of shape [batch, compress_height, compress_width, channels].
hparams: HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, height, width, hparams.hidden_size].
|
juraj-google-style
|
def _NormalizedVolumeIdentifiers(
self, volume_system, volume_identifiers, prefix='v'):
normalized_volume_identifiers = []
for volume_identifier in volume_identifiers:
if isinstance(volume_identifier, int):
volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)
elif not volume_identifier.startswith(prefix):
try:
volume_identifier = int(volume_identifier, 10)
volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)
except (TypeError, ValueError):
pass
try:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
except KeyError:
volume = None
if not volume:
raise errors.ScannerError(
'Volume missing for identifier: {0:s}.'.format(volume_identifier))
normalized_volume_identifiers.append(volume_identifier)
return normalized_volume_identifiers
|
Normalizes volume identifiers.
Args:
volume_system (VolumeSystem): volume system.
volume_identifiers (list[int|str]): allowed volume identifiers, formatted
as an integer or string with prefix.
prefix (Optional[str]): volume identifier prefix.
Returns:
list[str]: volume identifiers with prefix.
Raises:
ScannerError: if the volume identifier is not supported or no volume
could be found that corresponds with the identifier.
|
juraj-google-style
|
def get_device_name():
return context().device_name
|
Get the device name for the current thread.
Returns:
The device name for the current thread.
|
github-repos
|
def extension_to_message(extension: message.Message, message_cls: Type[_T]) -> _T:
msg = message_cls()
add_extension_to_message(extension, msg)
return msg
|
Serializes a provided FHIR extension into a message of type message_cls.
This function is a convenience wrapper around add_extension_to_message.
Args:
extension: The FHIR extension to serialize.
message_cls: The type of protobuf message to serialize extension to.
Returns:
A message of type message_cls.
|
github-repos
|
def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
event = event_values.get('event', None)
if event:
event_values['event_map'] = self.EVENT_NAMES.get(event, 'Unknown')
category = event_values.get('cat', None)
if category:
event_values['category_map'] = self.CATEGORY_NAMES.get(
category, 'Unknown')
action = event_values.get('action0', None)
if action:
event_values['action0_map'] = self.ACTION_0_NAMES.get(action, 'Unknown')
action = event_values.get('action1', None)
if action:
event_values['action1_map'] = self.ACTION_1_2_NAMES.get(
action, 'Unknown')
action = event_values.get('action2', None)
if action:
event_values['action2_map'] = self.ACTION_1_2_NAMES.get(
action, 'Unknown')
return self._ConditionalFormatMessages(event_values)
|
Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
|
juraj-google-style
|
def convert_dict_to_compatible_tensor(values, targets):
result = {}
for (key, value) in sorted(values.items()):
result[key] = _convert_to_compatible_tensor(value, targets[key], error_prefix=("Can't convert %r" % key))
return result
|
Converts dict `values` in tensors that are compatible with `targets`.
Args:
values: A dict to objects to convert with same keys as `targets`.
targets: A dict returned by `parse_tensor_info_map`.
Returns:
A map with the same keys as `values` but values converted into
Tensor/SparseTensors that can be fed into `protomap`.
Raises:
TypeError: If it fails to convert.
|
codesearchnet
|
def extract_report_spec(service, label_is_supported=label_descriptor.KnownLabels.is_supported, metric_is_supported=metric_descriptor.KnownMetrics.is_supported):
resource_descs = service.monitoredResources
labels_dict = {}
logs = set()
if service.logging:
logs = _add_logging_destinations(service.logging.producerDestinations, resource_descs, service.logs, labels_dict, label_is_supported)
metrics_dict = {}
monitoring = service.monitoring
if monitoring:
for destinations in (monitoring.consumerDestinations, monitoring.producerDestinations):
_add_monitoring_destinations(destinations, resource_descs, service.metrics, metrics_dict, metric_is_supported, labels_dict, label_is_supported)
return (logs, metrics_dict.keys(), labels_dict.keys())
|
Obtains the used logs, metrics and labels from a service.
label_is_supported and metric_is_supported are filter functions used to
determine if label_descriptors or metric_descriptors found in the service
are supported.
Args:
service (:class:`endpoints_management.gen.servicecontrol_v1_messages.Service`):
a service instance
label_is_supported (:func): determines if a given label is supported
metric_is_supported (:func): determines if a given metric is supported
Return:
tuple: (
logs (set[string}), # the logs to report to
metrics (list[string]), # the metrics to use
labels (list[string]) # the labels to add
)
|
codesearchnet
|
def remove_handler(self, name):
index = None
for (i, h) in enumerate(self.capture_handlers):
if (h['name'] == name):
index = i
if (index is not None):
self.capture_handlers[index]['logger'].close()
del self.capture_handlers[index]
|
Remove a handler given a name
Note, if multiple handlers have the same name the last matching
instance in the handler list will be removed.
Args:
name:
The name of the handler to remove
|
codesearchnet
|
def from_string(cls, string, format_=None, fps=None, **kwargs):
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs)
|
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
|
codesearchnet
|
def get_block(self, parent, config='running_config'):
try:
parent = r'^%s$' % parent
return self.node.section(parent, config=config)
except TypeError:
return None
|
Scans the config and returns a block of code
Args:
parent (str): The parent string to search the config for and
return the block
config (str): A text config string to be searched. Default
is to search the running-config of the Node.
Returns:
A string object that represents the block from the config. If
the parent string is not found, then this method will
return None.
|
juraj-google-style
|
def _mark_func_graph_as_unsaveable(graph, learning_phase):
if graph.building_function and is_placeholder(learning_phase):
graph.mark_as_unsaveable('The keras learning phase placeholder was used inside a function. Exporting placeholders is not supported when saving out a SavedModel. Please call `tf.keras.backend.set_learning_phase(0)` in the function to set the learning phase to a constant value.')
|
Mark func graph as unsaveable due to use of symbolic keras learning phase.
Functions that capture the symbolic learning phase cannot be exported to
SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised
if it is exported.
Args:
graph: Graph or FuncGraph object.
learning_phase: Learning phase placeholder or int defined in the graph.
|
github-repos
|
def convert_bbox_yolo_to_pascal(boxes: torch.Tensor, image_size: tuple[int, int]) -> torch.Tensor:
boxes = center_to_corners_format(boxes)
height, width = image_size
boxes = boxes * torch.tensor([[width, height, width, height]])
return boxes
|
Convert bounding boxes from YOLO format (x_center, y_center, width, height) in range [0, 1]
to Pascal VOC format (x_min, y_min, x_max, y_max) in absolute coordinates.
Args:
boxes (torch.Tensor): Bounding boxes in YOLO format
image_size (Tuple[int, int]): Image size in format (height, width)
Returns:
torch.Tensor: Bounding boxes in Pascal VOC format (x_min, y_min, x_max, y_max)
|
github-repos
|
def _ReadFormatDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):
if is_member:
error_message = 'data type not supported as member'
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object = self._ReadLayoutDataTypeDefinition(definitions_registry, definition_values, data_types.FormatDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_FORMAT)
definition_object.metadata = definition_values.get('metadata', {})
attributes = definition_values.get('attributes', None)
if attributes:
unsupported_attributes = set(attributes.keys()).difference(self._SUPPORTED_ATTRIBUTES_FORMAT)
if unsupported_attributes:
error_message = 'unsupported attributes: {0:s}'.format(', '.join(unsupported_attributes))
raise errors.DefinitionReaderError(definition_name, error_message)
byte_order = attributes.get('byte_order', definitions.BYTE_ORDER_NATIVE)
if (byte_order not in definitions.BYTE_ORDERS):
error_message = 'unsupported byte-order attribute: {0!s}'.format(byte_order)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.byte_order = byte_order
return definition_object
|
Reads a format data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
FormatDefinition: format definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
|
codesearchnet
|
def GetIndentLevel(line):
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
|
Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
|
juraj-google-style
|
def add_payload(self, key, val, append=False):
self._request.add_payload(key, val, append)
|
Add a key value pair to payload for this request.
.. Note:: For ``_search`` you can pass a search argument. (e.g. _search?summary=1.1.1.1).
Args:
key (string): The payload key
val (string): The payload value
append (bool): Indicate whether the value should be appended
|
juraj-google-style
|
def to_dict(self):
return dict(addr=self.addr, protocol=self.protocol, weight=self.weight, last_checked=self.last_checked)
|
convert detailed proxy info into a dict
Returns:
dict: A dict with four keys: ``addr``, ``protocol``,
``weight`` and ``last_checked``
|
codesearchnet
|
def add_redistribution(self, protocol, route_map_name=None):
protocols = ['bgp', 'rip', 'static', 'connected']
if protocol not in protocols:
raise ValueError('redistributed protocol must be'
'bgp, connected, rip or static')
if route_map_name is None:
cmd = 'redistribute {}'.format(protocol)
else:
cmd = 'redistribute {} route-map {}'.format(protocol,
route_map_name)
return self.configure_ospf(cmd)
|
Adds a protocol redistribution to OSPF
Args:
protocol (str): protocol to redistribute
route_map_name (str): route-map to be used to
filter the protocols
Returns:
bool: True if the command completes successfully
Exception:
ValueError: This will be raised if the protocol pass is not one
of the following: [rip, bgp, static, connected]
|
juraj-google-style
|
def reorder(miz_file_path: typing.Union[str, Path],
target_dir: typing.Union[str, Path],
skip_options_file: bool,
):
miz_file_path = Path(miz_file_path).absolute()
if not miz_file_path.exists():
raise FileNotFoundError(miz_file_path)
if not miz_file_path.is_file():
raise ValueError(f'not a file: {miz_file_path}')
target_dir_path = Path(target_dir).absolute()
if not target_dir_path.exists():
target_dir_path.mkdir(parents=True)
else:
if not target_dir_path.is_dir():
raise ValueError(f'not a directory: {target_dir_path}')
LOGGER.debug('re-ordering miz file: %s', miz_file_path)
LOGGER.debug('destination folder: %s', target_dir)
LOGGER.debug('%s option file', "skipping" if skip_options_file else "including")
if not target_dir_path.exists():
LOGGER.debug('creating directory %s', target_dir_path)
target_dir_path.mkdir(exist_ok=True)
Miz._do_reorder(miz_file_path, skip_options_file, target_dir_path)
|
Re-orders a miz file into a folder (flattened)
Args:
miz_file_path: source miz file
target_dir: folder to flatten the content into
skip_options_file: do not re-order option file
|
juraj-google-style
|
def get_interpolated_value(self, energy, integrated=False):
inter = {}
for spin in self.cohp:
if not integrated:
inter[spin] = get_linear_interpolated_value(self.energies,
self.cohp[spin],
energy)
elif self.icohp is not None:
inter[spin] = get_linear_interpolated_value(self.energies,
self.icohp[spin],
energy)
else:
raise ValueError("ICOHP is empty.")
return inter
|
Returns the COHP for a particular energy.
Args:
energy: Energy to return the COHP value for.
|
juraj-google-style
|
def report_file(config, auth, report_id=None, name=None, timeout=60, chunksize=DBM_CHUNKSIZE):
storage_path = report_fetch(config, auth, report_id, name, timeout)
if storage_path == False:
return (None, None)
elif storage_path == True:
return ('report_running.csv', None)
else:
filename = RE_FILENAME.search(storage_path).groups(0)[0]
if chunksize:
if config.verbose:
print('REPORT FILE STREAM:', storage_path)
return (filename, response_utf8_stream(urlopen(storage_path), chunksize))
else:
if config.verbose:
print('REPORT FILE SINGLE:', storage_path)
return (filename, urlopen(storage_path).read().decode('UTF-8'))
|
Retrieves most recent DBM file by name or ID, if in progress, waits for it to complete.
Timeout is in minutes ( retries will happen at 1 minute interval, default
total time is 60 minutes )
If chunksize is set to None then the whole file is downloaded at once.
Args:
* auth: (string) Either user or service.
* report_id: (int) ID of DCm report to fetch ( either or name ).
* name: (string) Name of report to fetch ( either or report_id ).
* timeout: (int) Minutes to wait for in progress report before giving up.
* chunksize: (int) number of bytes to download at a time, for memory
constrained systems.
Returns:
* (filename, iterator) if file exists and is ready to download in chunks.
* (filename, file) if file exists and chunking is off.
* ('report_running.csv', None) if report is in progress.
* (None, None) if file does not exist.
|
github-repos
|
def market_exact(self, session, start_time: str, end_time: str) -> Session:
if session not in self.exch: return SessNA
ss = self.exch[session]
same_day = ss[0] < ss[-1]
if not start_time: s_time = ss[0]
else:
s_time = param.to_hour(start_time)
if same_day: s_time = max(s_time, ss[0])
if not end_time: e_time = ss[-1]
else:
e_time = param.to_hour(end_time)
if same_day: e_time = min(e_time, ss[-1])
if same_day and (s_time > e_time): return SessNA
return Session(start_time=s_time, end_time=e_time)
|
Explicitly specify start time and end time
Args:
session: predefined session
start_time: start time in terms of HHMM string
end_time: end time in terms of HHMM string
Returns:
Session of start_time and end_time
|
juraj-google-style
|
def _GetFileByPath(self, key_path_upper):
(key_path_prefix, registry_file) = self._GetCachedFileByPath(key_path_upper)
if (not registry_file):
for mapping in self._GetFileMappingsByPath(key_path_upper):
try:
registry_file = self._OpenFile(mapping.windows_path)
except IOError:
registry_file = None
if (not registry_file):
continue
if (not key_path_prefix):
key_path_prefix = mapping.key_path_prefix
self.MapFile(key_path_prefix, registry_file)
key_path_prefix = key_path_prefix.upper()
break
return (key_path_prefix, registry_file)
|
Retrieves a Windows Registry file for a specific path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Returns:
tuple: consists:
str: upper case key path prefix
WinRegistryFile: corresponding Windows Registry file or None if not
available.
|
codesearchnet
|
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
return self._get_dataloader(dataset=test_dataset, description='test', batch_size=self.args.eval_batch_size, sampler_fn=self._get_eval_sampler)
|
Returns the test [`~torch.utils.data.DataLoader`].
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (`torch.utils.data.Dataset`, *optional*):
The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed. It must implement `__len__`.
|
github-repos
|
def create_nanopubs_fh(output_fn: str):
(json_flag, jsonl_flag, yaml_flag) = (False, False, False)
if output_fn:
if re.search('gz$', output_fn):
out_fh = gzip.open(output_fn, 'wt')
else:
out_fh = click.open_file(output_fn, mode='wt')
if re.search('ya?ml', output_fn):
yaml_flag = True
elif (('jsonl' in output_fn) or ('-' == output_fn)):
jsonl_flag = True
elif ('json' in output_fn):
json_flag = True
else:
out_fh = sys.stdout
return (out_fh, yaml_flag, jsonl_flag, json_flag)
|
Create Nanopubs output filehandle
\b
If output fn is '-' will write JSONlines to STDOUT
If output fn has *.gz, will written as a gzip file
If output fn has *.jsonl*, will written as a JSONLines file
IF output fn has *.json*, will be written as a JSON file
If output fn has *.yaml* or *.yml*, will be written as a YAML file
Args:
output_fn: Name of output file
Returns:
(filehandle, yaml_flag, jsonl_flag, json_flag)
|
codesearchnet
|
def add_info_field(self, field):
if field in self.info_dict:
msg = "New info field [{}] already exists.".format(field)
raise KeyError(msg)
if "=" in field:
key, value = field.split("=")
self.info_dict[key] = value
else:
self.info_dict[field] = field
self._join_info_fields()
|
Adds new info field (flag or key=value pair).
Args:
field: String flag (e.g. "SOMATIC") or key-value ("NEW_DP=42")
Raises:
KeyError: if info field already exists
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.