code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def word_matches(s1, s2, n=3):
return __matches(s1, s2, word_ngrams, n=n)
|
Word-level n-grams that match between two strings
Args:
s1: a string
s2: another string
n: an int for the n in n-gram
Returns:
set: the n-grams found in both strings
|
juraj-google-style
|
def all_tokens(self, delimiter=' ', label_list_ids=None):
tokens = set()
for label_list in self.label_lists.values():
if label_list_ids is None or label_list.idx in label_list_ids:
tokens = tokens.union(label_list.all_tokens(delimiter=delimiter))
return tokens
|
Return a list of all tokens occurring in
one of the labels in the label-lists.
Args:
delimiter (str): The delimiter used to split labels into tokens
(see :meth:`audiomate.annotations.Label.tokenized`).
label_list_ids (list): If not None, only labels from label-lists with
an idx contained in this list are considered.
Returns:
:class:`set`: A set of distinct tokens.
|
juraj-google-style
|
def add_node(self, binary_descriptor):
try:
node_string = parse_binary_descriptor(binary_descriptor)
except:
self._logger.exception('Error parsing binary node descriptor: %s', binary_descriptor)
return _pack_sgerror(SensorGraphError.INVALID_NODE_STREAM)
try:
self.graph.add_node(node_string)
except NodeConnectionError:
return _pack_sgerror(SensorGraphError.STREAM_NOT_IN_USE)
except ProcessingFunctionError:
return _pack_sgerror(SensorGraphError.INVALID_PROCESSING_FUNCTION)
except ResourceUsageError:
return _pack_sgerror(SensorGraphError.NO_NODE_SPACE_AVAILABLE)
return Error.NO_ERROR
|
Add a node to the sensor_graph using a binary node descriptor.
Args:
binary_descriptor (bytes): An encoded binary node descriptor.
Returns:
int: A packed error code.
|
codesearchnet
|
def splitGenoSlidingWindow(pos,out_file,size=5e4,step=None):
if step is None: step = 0.5*size
chroms = SP.unique(pos[:,0])
RV = []
wnd_i = 0
wnd_file = csv.writer(open(out_file,'w'),delimiter='\t')
nSnps = []
for chrom_i in chroms:
Ichrom = pos[:,0]==chrom_i
idx_chrom_start = SP.where(Ichrom)[0][0]
pos_chr = pos[Ichrom,1]
start = pos_chr.min()
pos_chr_max = pos_chr.max()
while 1:
if start>pos_chr_max: break
end = start+size
Ir = (pos_chr>=start)*(pos_chr<end)
_nSnps = Ir.sum()
if _nSnps>0:
idx_wnd_start = idx_chrom_start+SP.where(Ir)[0][0]
nSnps.append(_nSnps)
line = SP.array([wnd_i,chrom_i,start,end,idx_wnd_start,_nSnps],dtype=int)
wnd_file.writerow(line)
wnd_i+=1
start += step
nSnps = SP.array(nSnps)
return wnd_i,nSnps
|
split into windows using a slide criterion
Args:
size: window size
step: moving step (default: 0.5*size)
Returns:
wnd_i: number of windows
nSnps: vector of per-window number of SNPs
|
juraj-google-style
|
def _create_and_save_state(cls, mapreduce_spec, _app):
state = model.MapreduceState.create_new(mapreduce_spec.mapreduce_id)
state.mapreduce_spec = mapreduce_spec
state.active = True
state.active_shards = 0
if _app:
state.app_id = _app
config = util.create_datastore_write_config(mapreduce_spec)
state.put(config=config)
return state
|
Save mapreduce state to datastore.
Save state to datastore so that UI can see it immediately.
Args:
mapreduce_spec: model.MapreduceSpec,
_app: app id if specified. None otherwise.
Returns:
The saved Mapreduce state.
|
codesearchnet
|
def get(self, username=None, password=None, headers={}):
if all((username, password)):
return BasicAuth(username, password, headers)
elif (not any((username, password))):
return AnonymousAuth(headers)
else:
if (username is None):
data = ('username', username)
else:
data = ('Password', password)
msg = ("%s must have a value (instead of '%s')" % (data[0], data[1]))
raise ValueError(msg)
|
Factory method to get the correct AuthInfo object.
The returned value depends on the arguments given. In case the
username and password don't have a value (ie evaluate to False),
return an object for anonymous access. Else, return an auth
object that supports basic authentication.
Args:
`username`: The username of the user.
`password`: The password of the user.
`headers`: Custom headers to be sent to each request.
Raises:
ValueError in case one of the two arguments evaluates to False,
(such as having the None value).
|
codesearchnet
|
def setup(config_root=''):
config = _load_config(root=config_root)
logging_config = config.get('core', {}).get('logging', {})
log_level = logging_config.get('level', 'INFO').upper()
log_handlers = logging_config.get('handlers') or ['syslog']
ulogger.setup_logging(
progname='gordon-janitor', level=log_level, handlers=log_handlers)
return config
|
Service configuration and logging setup.
Configuration defined in ``gordon-janitor-user.toml`` will overwrite
``gordon-janitor.toml``.
Args:
config_root (str): where configuration should load from,
defaults to current working directory.
Returns:
A dict for Gordon service configuration
|
juraj-google-style
|
def spec_like(self, tree: Tree[Array], *, ignore_other: bool=True) -> Tree[enp.ArraySpec]:
def _to_spec_array(array):
if not enp.ArraySpec.is_array(array):
if ignore_other:
return array
else:
raise TypeError(f'Unknown array type: {type(array)}')
else:
return enp.ArraySpec.from_array(array)
return self.backend.map(_to_spec_array, tree)
|
Inspect a tree of array, works with any array type.
Example:
```python
model = MyModel()
variables = model.init(jax.random.PRNGKey(0), x)
# Inspect the `variables` tree structures
print(etree.spec_like(variables))
```
Args:
tree: The tree of array
ignore_other: If `True`, non-array are forwarded as-is.
Returns:
The tree of `enp.ArraySpec`.
|
github-repos
|
def date_added(self, date_added):
date_added = self._utils.format_datetime(date_added, date_format='%Y-%m-%dT%H:%M:%SZ')
self._data['dateAdded'] = date_added
request = self._base_request
request['dateAdded'] = date_added
return self._tc_requests.update(request, owner=self.owner)
|
Updates the security labels date_added
Args:
date_added: Converted to %Y-%m-%dT%H:%M:%SZ date format
|
juraj-google-style
|
def _restore_path(table):
name = None
splited = table.split('___')
path = splited[0]
if (len(splited) == 2):
name = splited[1]
path = path.replace('__', os.path.sep)
path += '.csv'
return (path, name)
|
Restore resource's path and name from storage's table.
Args:
table (str): table name
Returns:
(str, str): resource path and name
|
codesearchnet
|
def HelpText(component, trace=None, verbose=False):
info = inspectutils.Info(component)
actions_grouped_by_kind = _GetActionsGroupedByKind(component, verbose=verbose)
spec = inspectutils.GetFullArgSpec(component)
metadata = decorators.GetMetadata(component)
name_section = _NameSection(component, info, trace=trace, verbose=verbose)
synopsis_section = _SynopsisSection(component, actions_grouped_by_kind, spec, metadata, trace=trace)
description_section = _DescriptionSection(component, info)
if callable(component):
args_and_flags_sections, notes_sections = _ArgsAndFlagsSections(info, spec, metadata)
else:
args_and_flags_sections = []
notes_sections = []
usage_details_sections = _UsageDetailsSections(component, actions_grouped_by_kind)
sections = [name_section, synopsis_section, description_section] + args_and_flags_sections + usage_details_sections + notes_sections
return '\n\n'.join((_CreateOutputSection(*section) for section in sections if section is not None))
|
Gets the help string for the current component, suitable for a help screen.
Args:
component: The component to construct the help string for.
trace: The Fire trace of the command so far. The command executed so far
can be extracted from this trace.
verbose: Whether to include private members in the help screen.
Returns:
The full help screen as a string.
|
github-repos
|
def check_hours(tickers, tz_exch, tz_loc=DEFAULT_TZ) -> pd.DataFrame:
cols = ['Trading_Day_Start_Time_EOD', 'Trading_Day_End_Time_EOD']
(con, _) = create_connection()
hours = con.ref(tickers=tickers, flds=cols)
cur_dt = pd.Timestamp('today').strftime('%Y-%m-%d ')
hours.loc[(:, 'local')] = hours.value.astype(str).str[:(- 3)]
hours.loc[(:, 'exch')] = pd.DatetimeIndex((cur_dt + hours.value.astype(str))).tz_localize(tz_loc).tz_convert(tz_exch).strftime('%H:%M')
hours = pd.concat([hours.set_index(['ticker', 'field']).exch.unstack().loc[(:, cols)], hours.set_index(['ticker', 'field']).local.unstack().loc[(:, cols)]], axis=1)
hours.columns = ['Exch_Start', 'Exch_End', 'Local_Start', 'Local_End']
return hours
|
Check exchange hours vs local hours
Args:
tickers: list of tickers
tz_exch: exchange timezone
tz_loc: local timezone
Returns:
Local and exchange hours
|
codesearchnet
|
def l1_l2(l1=0.01, l2=0.01):
return L1L2(l1=l1, l2=l2)
|
Create a regularizer that applies both L1 and L2 penalties.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
The L2 regularization penalty is computed as:
`loss = l2 * reduce_sum(square(x))`
Args:
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
Returns:
An L1L2 Regularizer with the given regularization factors.
|
github-repos
|
def reshape_by_blocks(x, x_shape, memory_block_size):
x = tf.reshape(x, [x_shape[0], x_shape[1], (x_shape[2]
return x
|
Reshapes input by splitting its length over blocks of memory_block_size.
Args:
x: a Tensor with shape [batch, heads, length, depth]
x_shape: tf.TensorShape of x.
memory_block_size: Integer which divides length.
Returns:
Tensor with shape
[batch, heads, length // memory_block_size, memory_block_size, depth].
|
codesearchnet
|
def __init__(self, session, proxy_class):
assert isinstance(proxy_class, type)
self.session = session
self.proxy_class = proxy_class
|
Instantiate an API Authentication Proxy.
Args:
auth (requests.Session): Authenticated requests Session.
proxy_class (type): A class implementing the ``BaseApi``
interface.
|
juraj-google-style
|
def get(cls, blob_key, **ctx_options):
fut = cls.get_async(blob_key, **ctx_options)
return fut.get_result()
|
Retrieve a BlobInfo by key.
Args:
blob_key: A blob key. This may be a str, unicode or BlobKey instance.
**ctx_options: Context options for Model().get_by_id().
Returns:
A BlobInfo entity associated with the provided key, If there was
no such entity, returns None.
|
codesearchnet
|
def _set_value(self, slot_record):
if (slot_record.status == _SlotRecord.FILLED):
self.filled = True
self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore(slot_record)
self._fill_datetime = slot_record.fill_time
self._value = slot_record.value
|
Sets the value of this slot based on its corresponding _SlotRecord.
Does nothing if the slot has not yet been filled.
Args:
slot_record: The _SlotRecord containing this Slot's value.
|
codesearchnet
|
def block_view(self, mri):
controller = self.get_controller(mri)
block = controller.block_view(weakref.proxy(self))
return block
|
Get a view of a block
Args:
mri: The mri of the controller hosting the block
Returns:
Block: The block we control
|
juraj-google-style
|
def _parse_parameters(val_type, val):
if (val_type == 'logical'):
return (val == 'T')
elif (val_type == 'int'):
return int(val)
elif (val_type == 'string'):
return val.strip()
else:
return float(val)
|
Helper function to convert a Vasprun parameter into the proper type.
Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
|
codesearchnet
|
def GetAutomountMapMetadata(self, conf, epoch=False):
map_name = config.MAP_AUTOMOUNT
cache_options = conf.options[map_name].cache
value_list = []
values = self.GetSingleMapMetadata(map_name, conf, automount_mountpoint=None, epoch=epoch)
value_list.extend(values)
cache = cache_factory.Create(cache_options, config.MAP_AUTOMOUNT, automount_mountpoint=None)
master_map = cache.GetMap()
for map_entry in master_map:
values = self.GetSingleMapMetadata(map_name, conf, automount_mountpoint=map_entry.key, epoch=epoch)
value_list.extend(values)
return value_list
|
Return status of automount master map and all listed automount maps.
We retrieve the automount master map, and build a list of dicts which
are used by the caller to print the status output.
Args:
conf: a config.Config object
epoch: return times as an integer epoch (time_t) instead of a
human readable name
Returns:
a list of dicts of metadata key/value pairs
|
github-repos
|
def __init__(self, namespace=None):
assert namespace != DEFAULT_REQUEST_CACHE_NAMESPACE,\
'Optional namespace can not be {}.'.format(DEFAULT_REQUEST_CACHE_NAMESPACE)
self.namespace = namespace or DEFAULT_REQUEST_CACHE_NAMESPACE
|
Creates a request cache with the provided namespace.
Args:
namespace (string): (optional) uses 'default' if not provided.
|
juraj-google-style
|
def _get_local_folder(self, root=None):
if root is None:
root = Path()
for folders in ['.'], [self.user, self.napp]:
kytos_json = root / Path(*folders) / 'kytos.json'
if kytos_json.exists():
with kytos_json.open() as file_descriptor:
meta = json.load(file_descriptor)
username = meta.get('username', meta.get('author'))
if username == self.user and meta.get('name') == self.napp:
return kytos_json.parent
raise FileNotFoundError('kytos.json not found.')
|
Return local NApp root folder.
Search for kytos.json in _./_ folder and _./user/napp_.
Args:
root (pathlib.Path): Where to begin searching.
Return:
pathlib.Path: NApp root folder.
Raises:
FileNotFoundError: If there is no such local NApp.
|
juraj-google-style
|
def _shape_union(shapes):
return Shape(sorted(list(set(sum([s.dims for s in shapes], [])))))
|
A shape containing the union of all dimensions in the input shapes.
Args:
shapes: a list of Shapes
Returns:
a Shape
|
juraj-google-style
|
def get_conversion_factor(self, new_unit):
(uo_base, ofactor) = self.as_base_units
(un_base, nfactor) = Unit(new_unit).as_base_units
units_new = sorted(un_base.items(), key=(lambda d: _UNAME2UTYPE[d[0]]))
units_old = sorted(uo_base.items(), key=(lambda d: _UNAME2UTYPE[d[0]]))
factor = (ofactor / nfactor)
for (uo, un) in zip(units_old, units_new):
if (uo[1] != un[1]):
raise UnitError(('Units %s and %s are not compatible!' % (uo, un)))
c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]
factor *= ((c[uo[0]] / c[un[0]]) ** uo[1])
return factor
|
Returns a conversion factor between this unit and a new unit.
Compound units are supported, but must have the same powers in each
unit type.
Args:
new_unit: The new unit.
|
codesearchnet
|
def convert(self):
graph_def, input_tensors, output_tensors = self._load_saved_model(self.saved_model_dir, self._saved_model_tags)
if self.saved_model_dir is None or not self.experimental_new_converter:
graph_def, _, _, _ = _freeze_saved_model(self.saved_model_dir, None, None, None, self._saved_model_tags, _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
self.saved_model_dir = None
return super(TFLiteSavedModelConverterV2, self).convert(graph_def, input_tensors, output_tensors)
trackable_obj = _load(self.saved_model_dir, self._saved_model_tags)
if trackable_obj is None:
self._debug_info = _get_debug_info(_build_debug_info_func(self._funcs[0].graph), graph_def)
else:
self._debug_info = _get_debug_info(_convert_debug_info_func(trackable_obj.graph_debug_info), graph_def)
del trackable_obj
gc.collect()
return self._convert_from_saved_model(graph_def)
|
Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete function is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
|
github-repos
|
def squeeze(name, x, factor=2, reverse=True):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
shape = common_layers.shape_list(x)
if factor == 1:
return x
height = int(shape[1])
width = int(shape[2])
n_channels = int(shape[3])
if not reverse:
assert height % factor == 0 and width % factor == 0
x = tf.reshape(x, [-1, height
width
x = tf.transpose(x, [0, 1, 3, 5, 2, 4])
x = tf.reshape(x, [-1, height
factor, n_channels*factor*factor])
else:
x = tf.reshape(
x, (-1, height, width, int(n_channels/factor**2), factor, factor))
x = tf.transpose(x, [0, 1, 4, 2, 5, 3])
x = tf.reshape(x, (-1, int(height*factor),
int(width*factor), int(n_channels/factor**2)))
return x
|
Block-wise spatial squeezing of x to increase the number of channels.
Args:
name: Used for variable scoping.
x: 4-D Tensor of shape (batch_size X H X W X C)
factor: Factor by which the spatial dimensions should be squeezed.
reverse: Squueze or unsqueeze operation.
Returns:
x: 4-D Tensor of shape (batch_size X (H//factor) X (W//factor) X
(cXfactor^2). If reverse is True, then it is factor = (1 / factor)
|
juraj-google-style
|
def stage_tc_create_attribute(self, attribute_type, attribute_value, resource):
attribute_data = {'type': str(attribute_type), 'value': str(attribute_value)}
if attribute_type in ['Description', 'Source']:
attribute_data['displayed'] = True
attrib_resource = resource.attributes()
attrib_resource.body = json.dumps(attribute_data)
attrib_resource.http_method = 'POST'
a_response = attrib_resource.request()
if a_response.get('status') != 'Success':
self.log.warning(
'[stage] Failed adding attribute type "{}":"{}" ({}).'.format(
attribute_type, attribute_value, a_response.get('response').text
)
)
|
Add an attribute to a resource.
Args:
attribute_type (str): The attribute type (e.g., Description).
attribute_value (str): The attribute value.
resource (obj): An instance of tcex resource class.
|
juraj-google-style
|
def removedirs(self, target_directory):
target_directory = self.filesystem.absnormpath(target_directory)
directory = self.filesystem.confirmdir(target_directory)
if directory.contents:
self.filesystem.raise_os_error(
errno.ENOTEMPTY, self.path.basename(target_directory))
else:
self.rmdir(target_directory)
head, tail = self.path.split(target_directory)
if not tail:
head, tail = self.path.split(head)
while head and tail:
head_dir = self.filesystem.confirmdir(head)
if head_dir.contents:
break
self.filesystem.rmdir(head, allow_symlink=True)
head, tail = self.path.split(head)
|
Remove a leaf fake directory and all empty intermediate ones.
Args:
target_directory: the directory to be removed.
Raises:
OSError: if target_directory does not exist or is not a directory.
OSError: if target_directory is not empty.
|
juraj-google-style
|
def defaultStorable(self, python_type=None, storable_type=None, version=None, **kwargs):
if python_type is None:
python_type = lookup_type(storable_type)
if self.verbose:
print('generating storable instance for type: {}'.format(python_type))
self.storables.registerStorable(default_storable(python_type, \
version=version, storable_type=storable_type), **kwargs)
return self.byPythonType(python_type, True).asVersion(version)
|
Generate a default storable instance.
Arguments:
python_type (type): Python type of the object.
storable_type (str): storable type name.
version (tuple): version number of the storable handler.
Returns:
StorableHandler: storable instance.
Extra keyword arguments are passed to :meth:`registerStorable`.
|
juraj-google-style
|
def validate(self, data):
try:
self._validator.validate(data)
except jsonschema.ValidationError as e:
six.raise_from(ValidationError.create_from(e), e)
|
Validates a data dict against this schema.
Args:
data (dict): The data to be validated.
Raises:
ValidationError: If the data is invalid.
|
juraj-google-style
|
def PrepareForExport(module_name, ast, loader):
src = pytd_utils.Print(ast)
return SourceToExportableAst(module_name, src, loader)
|
Prepare an ast as if it was parsed and loaded.
External dependencies will not be resolved, as the ast generated by this
method is supposed to be exported.
Args:
module_name: The module_name as a string for the returned ast.
ast: pytd.TypeDeclUnit, is only used if src is None.
loader: A load_pytd.Loader instance.
Returns:
A pytd.TypeDeclUnit representing the supplied AST as it would look after
being written to a file and parsed.
|
github-repos
|
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(PollRequestPayload, self).read(input_stream, kmip_version=kmip_version)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE, local_stream):
self._asynchronous_correlation_value = primitives.ByteString(tag=enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE)
self._asynchronous_correlation_value.read(local_stream, kmip_version=kmip_version)
self.is_oversized(local_stream)
|
Read the data encoding the Poll request payload and decode it into
its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is missing from the
encoded payload.
|
codesearchnet
|
def write(self, path=None, *args, **kwargs):
if (path is None):
print(self.format(*args, **kwargs))
else:
with io.open(path, 'w', newline='') as f:
f.write(self.format(*args, **kwargs))
|
Perform formatting and write the formatted string to a file or stdout.
Optional arguments can be used to format the editor's contents. If no
file path is given, prints to standard output.
Args:
path (str): Full file path (default None, prints to stdout)
*args: Positional arguments to format the editor with
**kwargs: Keyword arguments to format the editor with
|
codesearchnet
|
def enable(self, timeout=0):
self.client.api.enable_plugin(self.name, timeout)
self.reload()
|
Enable the plugin.
Args:
timeout (int): Timeout in seconds. Default: 0
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def __convertLongToString(self, iValue):
string = ''
strValue = str(hex(iValue))
string = strValue.lstrip('0x')
string = string.rstrip('L')
return string
|
convert a long hex integer to string
remove '0x' and 'L' return string
Args:
iValue: long integer in hex format
Returns:
string of this long integer without "0x" and "L"
|
codesearchnet
|
def convert_datetime_array(array):
if (not isinstance(array, np.ndarray)):
return array
try:
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') == dt2001.astype('datetime64[ms]').astype('int64'))
except AttributeError as e:
if (e.args == ("'module' object has no attribute 'datetime64'",)):
if ('PyPy' in sys.version):
legacy_datetime64 = False
pass
else:
raise e
else:
raise e
if (array.dtype.kind == 'M'):
if legacy_datetime64:
if (array.dtype == np.dtype('datetime64[ns]')):
array = (array.astype('int64') / (10 ** 6.0))
else:
array = (array.astype('datetime64[us]').astype('int64') / 1000.0)
elif (array.dtype.kind == 'm'):
array = (array.astype('timedelta64[us]').astype('int64') / 1000.0)
return array
|
Convert NumPy datetime arrays to arrays to milliseconds since epoch.
Args:
array : (obj)
A NumPy array of datetime to convert
If the value passed in is not a NumPy array, it will be returned as-is.
Returns:
array
|
codesearchnet
|
def markdown_compatible(text: str) -> str:
text = re.sub('^\\(([\\d.]+[a-zA-Z]?)\\) \\\\\\[(.+?)\\\\\\]$', '\\[\\2 \\\\tag{\\1}\\]', text, flags=re.M)
text = re.sub('^\\\\\\[(.+?)\\\\\\] \\(([\\d.]+[a-zA-Z]?)\\)$', '\\[\\1 \\\\tag{\\2}\\]', text, flags=re.M)
text = re.sub('^\\\\\\[(.+?)\\\\\\] \\(([\\d.]+[a-zA-Z]?)\\) (\\\\\\[.+?\\\\\\])$', '\\[\\1 \\\\tag{\\2}\\] \\3', text, flags=re.M)
text = text.replace('\\. ', '. ')
text = text.replace('\\bm{', '\\mathbf{').replace('{\\\\bm ', '\\mathbf{')
text = re.sub('\\\\mbox{ ?\\\\boldmath\\$(.*?)\\$}', '\\\\mathbf{\\1}', text)
text = re.sub('((?:http|ftp|https):\\/\\/(?:[\\w_-]+(?:(?:\\.[\\w_-]+)+))(?:[\\w.,@?^=%&:\\/~+
text = re.sub('```\\s*(.+?)\\s*```', '```\\n\\1\\n```', text, flags=re.S)
return text
|
Make text compatible with Markdown formatting.
This function makes various text formatting adjustments to make it compatible with Markdown.
Args:
text (`str`):
The input text to be made Markdown-compatible.
Returns:
`str`: The Markdown-compatible text.
|
github-repos
|
def create_metadata(self, resource, keys_vals):
self.metadata_service.set_auth(self._token_metadata)
self.metadata_service.create(resource, keys_vals)
|
Associates new key-value pairs with the given resource.
Will attempt to add all key-value pairs even if some fail.
Args:
resource (intern.resource.boss.BossResource)
keys_vals (dictionary): Collection of key-value pairs to assign to
given resource.
Raises:
HTTPErrorList on failure.
|
codesearchnet
|
def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size)
shortest_edge = min(size['height'], size['width'])
output_size = get_resize_output_image_size(image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format)
resized_image = resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
return resized_image
|
Resizes `image` to `(height, width)` specified by `size` using the PIL library.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
|
github-repos
|
def _column_name_with_class_name(fc):
return fc.__class__.__name__ + ':' + fc.name
|
Returns a unique name for the feature column used during deduping.
Without this two FeatureColumns that have the same name and where
one wraps the other, such as an IndicatorColumn wrapping a
SequenceCategoricalColumn, will fail to deserialize because they will have the
same name in columns_by_name, causing the wrong column to be returned.
Args:
fc: A FeatureColumn.
Returns:
A unique name as a string.
|
github-repos
|
def try_listify_dict_with_int_keys(src: Dict[Any, Any], convert_when_sparse: bool=False) -> Tuple[Union[List[Any], Dict[Any, Any]], bool]:
if not src:
return (src, False)
min_key = None
max_key = None
for key in src.keys():
if not isinstance(key, int):
return (src, False)
if min_key is None or min_key > key:
min_key = key
if max_key is None or max_key < key:
max_key = key
if convert_when_sparse or (min_key == 0 and max_key == len(src) - 1):
return ([src[key] for key in sorted(src.keys())], True)
return (src, False)
|
Try to convert a dictionary with consequentive integer keys to a list.
Args:
src: A dict whose keys may be int type and their range form a perfect
range(0, N) list unless convert_when_sparse is set to True.
convert_when_sparse: When src is a int-key dict, force convert
it to a list ordered by key, even it's sparse.
Returns:
converted list or src unchanged.
|
github-repos
|
def create_mapping(record, keys):
ordered = OrderedDict()
field_mappings = []
for (key, value) in record.items():
ordered[key] = value
field_mappings.append({'columnNumber': len(ordered), 'fieldName': key, 'key': (key in keys)})
return {'field_mappings': field_mappings, 'data': ordered, 'fields': list(ordered.values())}
|
Create a field mapping for use in API updates and creates.
Args:
record (BaseModel): Record that should be mapped.
keys (list[str]): Fields that should be mapped as keys.
Returns:
dict: Dictionary with keys:
* ``field_mappings``: Field mappings as required by API.
* ``data``: Ordered data dictionary for input record.
|
codesearchnet
|
def legacy_raw_flush(writer=None, name=None):
if writer is None or isinstance(writer, SummaryWriter):
return flush(writer, name)
else:
with ops.device('cpu:0'):
return gen_summary_ops.flush_summary_writer(writer, name=name)
|
Legacy version of flush() that accepts a raw resource tensor for `writer`.
Do not use this function in any new code. Not supported and not part of the
public TF APIs.
Args:
writer: The `tf.summary.SummaryWriter` to flush. If None, the current
default writer will be used instead; if there is no current writer, this
returns `tf.no_op`. For this legacy version only, also accepts a raw
resource tensor pointing to the underlying C++ writer resource.
name: Ignored legacy argument for a name for the operation.
Returns:
The created `tf.Operation`.
|
github-repos
|
def _merge_run_options(self, options, incoming_options):
options.trace_level = max(options.trace_level, incoming_options.trace_level)
options.timeout_in_ms = max(options.timeout_in_ms, incoming_options.timeout_in_ms)
options.inter_op_thread_pool = max(options.inter_op_thread_pool, incoming_options.inter_op_thread_pool)
options.output_partition_graphs = max(options.output_partition_graphs, incoming_options.output_partition_graphs)
options.debug_options.debug_tensor_watch_opts.extend(incoming_options.debug_options.debug_tensor_watch_opts)
options.debug_options.reset_disk_byte_usage = options.debug_options.reset_disk_byte_usage or incoming_options.debug_options.reset_disk_byte_usage
options.report_tensor_allocations_upon_oom = options.report_tensor_allocations_upon_oom or incoming_options.report_tensor_allocations_upon_oom
|
Merge two instances of RunOptions into the first one.
During the merger, the numerical fields including trace_level,
timeout_in_ms, inter_op_thread_pool are set to the larger one of the two.
The boolean value is set to the logical OR of the two.
debug_tensor_watch_opts of the original options is extended with that from
the incoming one.
Args:
options: The options to merge into.
incoming_options: The options to be merged into the first argument.
|
github-repos
|
def GetPrototype(self, descriptor):
if (descriptor.full_name not in self._classes):
descriptor_name = descriptor.name
if (str is bytes):
descriptor_name = descriptor.name.encode('ascii', 'ignore')
result_class = reflection.GeneratedProtocolMessageType(descriptor_name, (message.Message,), {'DESCRIPTOR': descriptor, '__module__': None})
self._classes[descriptor.full_name] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
for extension in result_class.DESCRIPTOR.extensions:
if (extension.containing_type.full_name not in self._classes):
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type.full_name]
extended_class.RegisterExtension(extension)
return self._classes[descriptor.full_name]
|
Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
|
codesearchnet
|
def disaggregate_wind(wind_daily, method='equal', a=None, b=None, t_shift=None):
assert method in ('equal', 'cosine', 'random'), 'Invalid method'
wind_eq = melodist.distribute_equally(wind_daily)
if method == 'equal':
wind_disagg = wind_eq
elif method == 'cosine':
assert None not in (a, b, t_shift)
wind_disagg = _cosine_function(np.array([wind_eq.values, wind_eq.index.hour]), a, b, t_shift)
elif method == 'random':
wind_disagg = wind_eq * (-np.log(np.random.rand(len(wind_eq))))**0.3
return wind_disagg
|
general function for windspeed disaggregation
Args:
wind_daily: daily values
method: keyword specifying the disaggregation method to be used
a: parameter a for the cosine function
b: parameter b for the cosine function
t_shift: parameter t_shift for the cosine function
Returns:
Disaggregated hourly values of windspeed.
|
juraj-google-style
|
def expand_batch_coordinates(bc, length_factor):
assert (bc.get_shape().as_list() == [1, None, 1])
bc *= tf.constant([([1] * length_factor)])
bc = tf.reshape(bc, [1, (- 1), 1])
return bc
|
Duplicate elements of bc by length_factor.
Args:
bc (tf.Tensor): int32 tensor of shape [1, length, 1]
length_factor (int):
Returns:
tf.Tensor: of shape [1, length*length_factor, 1] where every elements has
been duplicated length_factor times.
|
codesearchnet
|
def __init__(self, logger, script_type):
self.logger = logger
self.script_type = script_type
self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
|
Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
script_type: string, the metadata script type to run.
|
juraj-google-style
|
def query_put_bounders(query, partition_column, start, end):
where = ' WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}'.format(partition_column, start, end)
query_with_bounders = 'SELECT * FROM ({0}) AS TMP_TABLE {1}'.format(query, where)
return query_with_bounders
|
Put bounders in the query
Args:
query: SQL query string
partition_column: partition_column name
start: lower_bound
end: upper_bound
Returns:
Query with bounders
|
codesearchnet
|
def goto(self, rules, symbol):
return self.closure(
{rule.move_dot() for rule in rules
if not rule.at_end and rule.rhs[rule.pos] == symbol},
)
|
Computes the next closure for rules based on the symbol we got.
Args:
rules - an iterable of DottedRules
symbol - a string denoting the symbol we've just seen
Returns: frozenset of DottedRules
|
juraj-google-style
|
def Optimize(node, deps=None, lossy=False, use_abcs=False, max_union=7, remove_mutable=False, can_do_lookup=True):
node = node.Visit(NormalizeGenericSelfTypes())
node = node.Visit(RemoveDuplicates())
node = node.Visit(SimplifyUnions())
node = node.Visit(CombineReturnsAndExceptions())
node = node.Visit(CombineContainers())
node = node.Visit(SimplifyContainers())
if deps:
superclasses = deps.Visit(visitors.ExtractSuperClassesByName())
superclasses.update(node.Visit(visitors.ExtractSuperClassesByName()))
if use_abcs:
superclasses.update(abc_hierarchy.GetSuperClasses())
hierarchy = SuperClassHierarchy(superclasses)
node = node.Visit(SimplifyUnionsWithSuperclasses(hierarchy))
if lossy:
node = node.Visit(FindCommonSuperClasses(hierarchy))
if max_union:
node = node.Visit(CollapseLongUnions(max_union))
node = node.Visit(AdjustReturnAndConstantGenericType())
if remove_mutable:
node = node.Visit(AbsorbMutableParameters())
node = node.Visit(CombineContainers())
node = node.Visit(MergeTypeParameters())
node = node.Visit(visitors.AdjustSelf())
node = node.Visit(SimplifyContainers())
if deps and can_do_lookup:
node = visitors.LookupClasses(node, deps, ignore_late_types=True)
return node
|
Optimize a PYTD tree.
Tries to shrink a PYTD tree by applying various optimizations.
Arguments:
node: A pytd node to be optimized. It won't be modified - this function will
return a new node.
deps: Definitions of all of the external types in node.
lossy: Allow optimizations that change the meaning of the pytd.
use_abcs: Use abstract base classes to represent unions like e.g.
"Union[float, int]" as "Real".
max_union: How many types we allow in a union before we simplify it to just
"object".
remove_mutable: Whether to simplify mutable parameters to normal parameters.
can_do_lookup: True: We're either allowed to try to resolve NamedType
instances in the AST, or the AST is already resolved. False: Skip any
optimizations that would require NamedTypes to be resolved.
Returns:
An optimized node.
|
github-repos
|
def hget(self, key):
data = self.r.hget(self.hash, key)
if data is not None and not isinstance(data, str):
data = str(self.r.hget(self.hash, key), 'utf-8')
return data
|
Read data from Redis for the provided key.
Args:
key (string): The key to read in Redis.
Returns:
(any): The response data from Redis.
|
juraj-google-style
|
def _eval_indexed_slices(a):
if isinstance(a, indexed_slices.IndexedSlices) and context.executing_eagerly():
return indexed_slices.IndexedSlicesValue(indices=[x.numpy() for x in a.indices], values=[x.numpy() for x in a.values], dense_shape=a.dense_shape)
return a
|
Converts IndexedSlices to IndexedSlicesValue with numpy indices/values.
When eager execution is enabled, converts IndexedSlices
to IndexedSlicesValue with numpy indices/values.
Args:
a: any value.
Returns:
If a is IndexedSlices and eager execution is enabled, calls numpy() on a's
fields. Otherwise returns a unchanged.
|
github-repos
|
def main(raw_args=None):
if raw_args is None:
raw_args = sys.argv[1:]
parser = build_parser()
args = parser.parse_args(raw_args)
if args.firmware_image is None and args.gdb is None:
print("You must specify either a firmware image or attach a debugger with --gdb <PORT>")
return 1
test_args = ['qemu-system-gnuarmeclipse', '-verbose', '-verbose', '-board', 'STM32F0-Discovery',
'-nographic', '-monitor', 'null', '-serial', 'null', '--semihosting-config',
'enable=on,target=native', '-d', 'unimp,guest_errors']
if args.firmware_image:
test_args += ['-image', args.firmware_image]
if args.gdb:
test_args += ['--gdb', 'tcp::%d' % args.gdb]
proc = subprocess.Popen(test_args, stdout=sys.stdout, stderr=sys.stderr)
try:
proc.communicate()
except KeyboardInterrupt:
proc.terminate()
return 0
|
Run the iotile-emulate script.
Args:
raw_args (list): Optional list of commmand line arguments. If not
passed these are pulled from sys.argv.
|
juraj-google-style
|
def generator_next_fn(iterator_id_t):
if output_types and output_shapes:
flattened_types = [dtypes.as_dtype(dt) for dt in nest.flatten(output_types)]
flattened_shapes = nest.flatten(output_shapes)
def generator_py_func(iterator_id):
values = next(generator_state.get_iterator(iterator_id))
try:
flattened_values = nest.flatten_up_to(output_types, values)
except (TypeError, ValueError) as e:
raise TypeError(f'`generator` yielded an element that did not match the expected structure. The expected structure was {output_types}, but the yielded element was {values}.') from e
ret_arrays = []
for ret, dtype in zip(flattened_values, flattened_types):
try:
ret_arrays.append(script_ops.FuncRegistry._convert(ret, dtype=dtype.as_numpy_dtype))
except (TypeError, ValueError) as e:
raise TypeError(f'`generator` yielded an element that could not be converted to the expected type. The expected type was {dtype.name}, but the yielded element was {ret}.') from e
for ret_array, expected_dtype, expected_shape in zip(ret_arrays, flattened_types, flattened_shapes):
if ret_array.dtype != expected_dtype.as_numpy_dtype:
raise TypeError(f'`generator` yielded an element of type {ret_array.dtype} where an element of type {expected_dtype.as_numpy_dtype} was expected.')
if not expected_shape.is_compatible_with(ret_array.shape):
raise TypeError(f'`generator` yielded an element of shape {ret_array.shape} where an element of shape {expected_shape} was expected.')
return ret_arrays
flat_values = script_ops.numpy_function(generator_py_func, [iterator_id_t], flattened_types)
if not isinstance(flat_values, (list, tuple)):
flat_values = [flat_values]
if output_shapes is not None:
for ret_t, shape in zip(flat_values, flattened_shapes):
ret_t.set_shape(shape)
return nest.pack_sequence_as(output_types, flat_values)
else:
flat_output_types = structure.get_flat_tensor_types(output_signature)
def generator_py_func(iterator_id):
values = next(generator_state.get_iterator(iterator_id.numpy()))
try:
values = structure.normalize_element(values, output_signature)
except (TypeError, ValueError) as e:
raise TypeError(f'`generator` yielded an element that did not match the expected structure. The expected structure was {output_signature}, but the yielded element was {values}.') from e
values_spec = structure.type_spec_from_value(values)
if not structure.are_compatible(values_spec, output_signature):
raise TypeError(f'`generator` yielded an element of {values_spec} where an element of {output_signature} was expected.')
return structure.to_tensor_list(output_signature, values)
return script_ops.eager_py_func(generator_py_func, inp=[iterator_id_t], Tout=flat_output_types)
|
Generates the next element from iterator with ID `iterator_id_t`.
We map this function across an infinite repetition of the
`iterator_id_t`, and raise `StopIteration` to terminate the iteration.
Args:
iterator_id_t: A `tf.int64` tensor whose value uniquely identifies the
iterator in `generator_state` from which to generate an element.
Returns:
The next element to generate from the iterator.
|
github-repos
|
def get_nets_jpnic(self, response):
nets = []
for match in re.finditer(
r'^.*?(\[Network Number\])[^\S\n]+.+?>(?P<val>.+?)</A>$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
tmp = ip_network(match.group(2))
try:
network_address = tmp.network_address
except AttributeError:
network_address = tmp.ip
pass
try:
broadcast_address = tmp.broadcast_address
except AttributeError:
broadcast_address = tmp.broadcast
pass
net['range'] = '{0} - {1}'.format(
network_address + 1, broadcast_address
)
cidr = ip_network(match.group(2).strip()).__str__()
net['cidr'] = cidr
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except (ValueError, TypeError):
pass
return nets
|
The function for parsing network blocks from jpnic whois data.
Args:
response (:obj:`str`): The response from the jpnic server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
|
juraj-google-style
|
def _convert_observ(self, observ):
if (not np.isfinite(observ).all()):
raise ValueError('Infinite observation encountered.')
if (observ.dtype == np.float64):
return observ.astype(np.float32)
if (observ.dtype == np.int64):
return observ.astype(np.int32)
return observ
|
Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
|
codesearchnet
|
def console_set_alignment(con: tcod.console.Console, alignment: int) -> None:
lib.TCOD_console_set_alignment(_console(con), alignment)
|
Change this consoles current alignment mode.
* tcod.LEFT
* tcod.CENTER
* tcod.RIGHT
Args:
con (Console): Any Console instance.
alignment (int):
.. deprecated:: 8.5
Set :any:`Console.default_alignment` instead.
|
juraj-google-style
|
def get_by_name(self, name):
managed_sans = self.get_all()
result = [x for x in managed_sans if (x['name'] == name)]
resource = (result[0] if result else None)
if resource:
resource = self.new(self._connection, resource)
return resource
|
Gets a Managed SAN by name.
Args:
name: Name of the Managed SAN
Returns:
dict: Managed SAN.
|
codesearchnet
|
def fit_transform(self, X, y=None, **params):
return self.fit(X, y).transform(X, y)
|
Learn vocabulary and return document id matrix.
This is equivalent to fit followed by transform.
Args:
X : iterable
an iterable which yields either str, unicode or file objects.
Returns:
list : document id matrix.
list: label id matrix.
|
juraj-google-style
|
def _add_qasm_measure(self, qubit, cmembit, cregbit=None):
(outcome, probability) = self._get_measure_outcome(qubit)
membit = (1 << cmembit)
self._classical_memory = ((self._classical_memory & (~ membit)) | (int(outcome) << cmembit))
if (cregbit is not None):
regbit = (1 << cregbit)
self._classical_register = ((self._classical_register & (~ regbit)) | (int(outcome) << cregbit))
if (outcome == '0'):
update_diag = [[(1 / np.sqrt(probability)), 0], [0, 0]]
else:
update_diag = [[0, 0], [0, (1 / np.sqrt(probability))]]
self._add_unitary_single(update_diag, qubit)
|
Apply a measure instruction to a qubit.
Args:
qubit (int): qubit is the qubit measured.
cmembit (int): is the classical memory bit to store outcome in.
cregbit (int, optional): is the classical register bit to store outcome in.
|
codesearchnet
|
def SetName(obj, name):
precondition.AssertType(name, str)
if PY2:
obj.__name__ = name.encode('ascii')
else:
obj.__name__ = name
|
A compatibility wrapper for setting object's name.
See documentation for `GetName` for more information.
Args:
obj: A type or function object to set the name for.
name: A name to set.
|
codesearchnet
|
def distance(cls, q0, q1):
q = Quaternion.log_map(q0, q1)
return q.norm
|
Quaternion intrinsic distance.
Find the intrinsic geodesic distance between q0 and q1.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive amount corresponding to the length of the geodesic arc
connecting q0 to q1.
Note:
Although the q0^(-1)*q1 != q1^(-1)*q0, the length of the path joining
them is given by the logarithm of those product quaternions, the norm
of which is the same.
|
codesearchnet
|
def repack_weights(packed_parameter: torch.Tensor, sharded_dim: int, world_size: int, num_blocks: int=2) -> torch.Tensor:
if num_blocks != 2:
raise ValueError('Num blocks different from 2 is not supported yet. This is most likely a bug in your implementation as we only pack gate and up projections together.')
actual_sharded_dim = sharded_dim if sharded_dim >= 0 else sharded_dim + packed_parameter.ndim
total_size_on_sharded_dim = packed_parameter.shape[actual_sharded_dim]
original_block_size_on_dim = total_size_on_sharded_dim
shard_chunk_size = original_block_size_on_dim
prefix_shape = packed_parameter.shape[:actual_sharded_dim]
suffix_shape = packed_parameter.shape[actual_sharded_dim + 1:]
tensor_view = packed_parameter.view(*prefix_shape, world_size, num_blocks, shard_chunk_size, *suffix_shape)
axis_ws_abs = len(prefix_shape)
axis_npp_abs = len(prefix_shape) + 1
permute_order = list(range(tensor_view.ndim))
permute_order[axis_ws_abs], permute_order[axis_npp_abs] = (permute_order[axis_npp_abs], permute_order[axis_ws_abs])
tensor_permuted = tensor_view.permute(*permute_order)
final_ordered_tensor = tensor_permuted.reshape_as(packed_parameter)
return final_ordered_tensor
|
Reorders a tensor that was reconstructed from sharded packed weights into its canonical packed format.
For example, if a weight was packed (e.g., gate_proj and up_proj) and then sharded,
DTensor.full_tensor() might produce an interleaved layout like [G0, U0, G1, U1, ...]
along the sharded dimension. This function reorders it to [G0, G1, ..., U0, U1, ...].
This is an inverse operation to get_packed_weights.
Args:
reconstructed_tensor: The tensor reconstructed from DTensor (e.g., via .full_tensor().contiguous()).
sharded_dim: The dimension index in the reconstructed_tensor that was originally sharded.
world_size: The tensor parallel world size.
num_packed_projs: The number of projections that were packed together (e.g., 2 for gate_up_proj).
Returns:
The reordered tensor in canonical packed format.
|
github-repos
|
def __init__(self, app, db, UserClass, UserEmailClass=None, UserInvitationClass=None, RoleClass=None):
self.app = app
self.db = db
self.UserClass = UserClass
self.UserEmailClass = UserEmailClass
self.UserInvitationClass = UserInvitationClass
self.RoleClass = RoleClass
self.user_manager = app.user_manager
self.db_adapter = None
if self.db_adapter is None:
try:
from flask_sqlalchemy import SQLAlchemy
if isinstance(db, SQLAlchemy):
self.db_adapter = SQLDbAdapter(app, db)
except ImportError:
pass
if self.db_adapter is None:
try:
from flask_mongoengine import MongoEngine
if isinstance(db, MongoEngine):
self.db_adapter = MongoDbAdapter(app, db)
except ImportError:
pass
if self.db_adapter is None:
try:
from flask_flywheel import Flywheel
if isinstance(db, Flywheel):
self.db_adapter = DynamoDbAdapter(app, db)
except ImportError:
pass
if self.db_adapter is None:
try:
from pynamodb.models import Model
if issubclass(UserClass, Model):
self.db_adapter = PynamoDbAdapter(app)
except ImportError:
pass
if self.db_adapter is None:
raise ConfigError(
'No Flask-SQLAlchemy, Flask-MongoEngine or Flask-Flywheel installed and no Pynamo Model in use.'\
' You must install one of these Flask extensions.')
|
Initialize the appropriate DbAdapter, based on the ``db`` parameter type.
Args:
app(Flask): The Flask application instance.
db: The Object-Database Mapper instance.
UserClass: The User class.
UserEmailClass: Optional UserEmail class for multiple-emails-per-user feature.
UserInvitationClass: Optional UserInvitation class for user-invitation feature.
RoleClass: For testing purposes only.
|
juraj-google-style
|
def input(self):
return self._nested_inputs
|
Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
|
github-repos
|
def _GetMetadataUpdate(
self, metadata_key='', recursive=True, wait=True, timeout=None):
metadata_key = os.path.join(metadata_key, '') if recursive else metadata_key
metadata_url = os.path.join(METADATA_SERVER, metadata_key)
params = {
'alt': 'json',
'last_etag': self.etag,
'recursive': recursive,
'timeout_sec': timeout or self.timeout,
'wait_for_change': wait,
}
while True:
response = self._GetMetadataRequest(
metadata_url, params=params, timeout=timeout)
etag_updated = self._UpdateEtag(response)
if wait and not etag_updated and not timeout:
continue
else:
break
return json.loads(response.read().decode('utf-8'))
|
Request the contents of metadata server and deserialize the response.
Args:
metadata_key: string, the metadata key to watch for changes.
recursive: bool, True if we should recursively watch for metadata changes.
wait: bool, True if we should wait for a metadata change.
timeout: int, timeout in seconds for returning metadata output.
Returns:
json, the deserialized contents of the metadata server.
|
juraj-google-style
|
def run(self, *args, **kwargs):
self.log.debug('Starting EBSAuditor')
data = self.update_data()
notices = defaultdict(list)
for account, issues in data.items():
for issue in issues:
for recipient in account.contacts:
notices[NotificationContact(type=recipient['type'], value=recipient['value'])].append(issue)
self.notify(notices)
|
Main execution point for the auditor
Args:
*args:
**kwargs:
Returns:
`None`
|
juraj-google-style
|
def split_to_tiles(image: np.ndarray, num_tiles_height: int, num_tiles_width: int) -> np.ndarray:
num_channels, height, width = image.shape
tile_height = height
tile_width = width
image = image.reshape(num_channels, num_tiles_height, tile_height, num_tiles_width, tile_width)
image = image.transpose(1, 3, 0, 2, 4)
image = image.reshape(num_tiles_width * num_tiles_height, num_channels, tile_height, tile_width)
return np.ascontiguousarray(image)
|
Split an image into a specified number of tiles along its width and height dimensions.
Args:
image (`np.ndarray`):
Input image with shape (num_channels, height, width).
num_tiles_height (`int`):
Number of tiles to split the image into along its height.
num_tiles_width (`int`):
Number of tiles to split the image into along its width.
Returns:
`np.ndarray`:
Array of image tiles with shape (num_tiles_width * num_tiles_height, num_channels, tile_height, tile_width).
|
github-repos
|
def make_slot_check(wanted):
if isinstance(wanted, types.FunctionType):
return wanted
if isinstance(wanted, int):
(item, meta) = (wanted, None)
elif isinstance(wanted, Slot):
(item, meta) = (wanted.item_id, wanted.damage)
elif isinstance(wanted, (Item, Block)):
(item, meta) = (wanted.id, wanted.metadata)
elif isinstance(wanted, str):
item_or_block = get_item_or_block(wanted, init=True)
(item, meta) = (item_or_block.id, item_or_block.metadata)
else:
try:
(item, meta) = wanted
except TypeError:
raise ValueError(('Illegal args for make_slot_check(): %s' % wanted))
return (lambda slot: ((item == slot.item_id) and (meta in (None, slot.damage))))
|
Creates and returns a function that takes a slot
and checks if it matches the wanted item.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata)
|
codesearchnet
|
def clientConnectionFailed(self, err, address: Address):
if type(err.value) == error.TimeoutError:
logger.debug(f"Failed connecting to {address} connection timed out")
elif type(err.value) == error.ConnectError:
ce = err.value
if len(ce.args) > 0:
logger.debug(f"Failed connecting to {address} {ce.args[0].value}")
else:
logger.debug(f"Failed connecting to {address}")
else:
logger.debug(f"Failed connecting to {address} {err.value}")
self.peers_connecting -= 1
self.RemoveKnownAddress(address)
self.RemoveFromQueue(address)
self.AddDeadAddress(address)
return err.type
|
Called when we fail to connect to an endpoint
Args:
err: Twisted Failure instance
address: the address we failed to connect to
|
juraj-google-style
|
def cut_matrix(self, n):
return connectivity.relevant_connections(n, self.from_nodes,
self.to_nodes)
|
Compute the cut matrix for this cut.
The cut matrix is a square matrix which represents connections severed
by the cut.
Args:
n (int): The size of the network.
Example:
>>> cut = Cut((1,), (2,))
>>> cut.cut_matrix(3)
array([[0., 0., 0.],
[0., 0., 1.],
[0., 0., 0.]])
|
juraj-google-style
|
def ScanForFileSystem(self, source_path_spec):
if (source_path_spec.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER):
return path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_APFS, location='/', parent=source_path_spec)
try:
type_indicators = analyzer.Analyzer.GetFileSystemTypeIndicators(source_path_spec, resolver_context=self._resolver_context)
except RuntimeError as exception:
raise errors.BackEndError('Unable to process source path specification with error: {0!s}'.format(exception))
if (not type_indicators):
return None
type_indicator = type_indicators[0]
if (len(type_indicators) > 1):
if (definitions.PREFERRED_NTFS_BACK_END not in type_indicators):
raise errors.BackEndError('Unsupported source found more than one file system types.')
type_indicator = definitions.PREFERRED_NTFS_BACK_END
if (type_indicator == definitions.TYPE_INDICATOR_NTFS):
root_location = '\\'
else:
root_location = '/'
file_system_path_spec = path_spec_factory.Factory.NewPathSpec(type_indicator, location=root_location, parent=source_path_spec)
if (type_indicator == definitions.TYPE_INDICATOR_TSK):
try:
file_system = resolver.Resolver.OpenFileSystem(file_system_path_spec, resolver_context=self._resolver_context)
file_system.Close()
except errors.BackEndError:
file_system_path_spec = None
return file_system_path_spec
|
Scans the path specification for a supported file system format.
Args:
source_path_spec (PathSpec): source path specification.
Returns:
PathSpec: file system path specification or None if no supported file
system type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one file
system type is found.
|
codesearchnet
|
def num_employers(self, num_employers):
if (num_employers < 2):
self._logger.log('warn', 'Two employers are needed: setting to two')
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(num_employers))
self._limit = (num_employers * len(self._value_ranges))
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
|
Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
|
codesearchnet
|
def un(byts):
return msgpack.loads(byts, use_list=False, raw=False, unicode_errors='surrogatepass')
|
Use msgpack to de-serialize a python object.
Args:
byts (bytes): The bytes to de-serialize
Notes:
String objects are decoded using utf8 encoding. In order to handle
potentially malformed input, ``unicode_errors='surrogatepass'`` is set
to allow decoding bad input strings.
Returns:
obj: The de-serialized object
|
codesearchnet
|
def read_from_source(source, start_position=None, stop_position=None):
values = []
range_tracker = source.get_range_tracker(start_position, stop_position)
assert isinstance(range_tracker, iobase.RangeTracker)
reader = source.read(range_tracker)
for value in reader:
values.append(value)
return values
|
Reads elements from the given ```BoundedSource```.
Only reads elements within the given position range.
Args:
source (~apache_beam.io.iobase.BoundedSource):
:class:`~apache_beam.io.iobase.BoundedSource` implementation.
start_position (int): start position for reading.
stop_position (int): stop position for reading.
Returns:
List[str]: the set of values read from the sources.
|
github-repos
|
def get_model_filepath(self, infodict):
u = infodict['uniprot_ac']
original_filename = '{}_{}_{}_{}'.format(infodict['from'], infodict['to'],
infodict['template'], infodict['coordinate_id'])
file_path = op.join(self.metadata_dir, u[:2], u[2:4], u[4:6],
'swissmodel', '{}.pdb'.format(original_filename))
if op.exists(file_path):
return file_path
else:
log.warning('{}: no file {} found for model'.format(u, file_path))
return None
|
Get the path to the homology model using information from the index dictionary for a single model.
Example: use self.get_models(UNIPROT_ID) to get all the models, which returns a list of dictionaries.
Use one of those dictionaries as input to this function to get the filepath to the model itself.
Args:
infodict (dict): Information about a model from get_models
Returns:
str: Path to homology model
|
juraj-google-style
|
def raisefrom(exc_type, message, exc):
if (sys.version_info[:2] >= (3, 2)):
six.raise_from(exc_type(message), exc)
else:
six.reraise(exc_type, ('%s - %s' % (message, exc)), sys.exc_info()[2])
|
Call Python 3 raise from or emulate it for Python 2
Args:
exc_type (Any): Type of Exception
message (str): Error message to display
exc (BaseException): original exception
Returns:
None
|
codesearchnet
|
def find_tested_models(test_file: str) -> List[str]:
with open(os.path.join(PATH_TO_TESTS, test_file), 'r', encoding='utf-8', newline='\n') as f:
content = f.read()
all_models = re.findall('all_model_classes\\s+=\\s+\\(\\s*\\(([^\\)]*)\\)', content)
all_models += re.findall('all_model_classes\\s+=\\s+\\(([^\\)]*)\\)', content)
if len(all_models) > 0:
model_tested = []
for entry in all_models:
for line in entry.split(','):
name = line.strip()
if len(name) > 0:
model_tested.append(name)
return model_tested
|
Parse the content of test_file to detect what's in `all_model_classes`. This detects the models that inherit from
the common test class.
Args:
test_file (`str`): The path to the test file to check
Returns:
`List[str]`: The list of models tested in that file.
|
github-repos
|
def add_from_existing(self, resource, timeout=-1):
uri = self.URI + "/from-existing"
return self._client.create(resource, uri=uri, timeout=timeout)
|
Adds a volume that already exists in the Storage system
Args:
resource (dict):
Object to create.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Added resource.
|
juraj-google-style
|
def isCaCert(self, name):
crtpath = self._getPathJoin('cas', '%s.crt' % name)
return os.path.isfile(crtpath)
|
Checks if a CA certificate exists.
Args:
name (str): The name of the CA keypair.
Examples:
Check if the CA certificate for "myca" exists:
exists = cdir.isCaCert('myca')
Returns:
bool: True if the certificate is present, False otherwise.
|
juraj-google-style
|
def query_snl(self, criteria):
try:
payload = {"criteria": json.dumps(criteria)}
response = self.session.post("{}/snl/query".format(self.preamble),
data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["response"]
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
|
Query for submitted SNLs.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
criteria (dict): Query criteria.
Returns:
A dict, with a list of submitted SNLs in the "response" key.
Raises:
MPRestError
|
juraj-google-style
|
def __add__(self, r):
if not isinstance(r, TestResult):
raise TypeError('Operand %s of type %s is not a TestResult.' %
(r, type(r)))
sum_result = TestResult()
for name in sum_result.__dict__:
r_value = getattr(r, name)
l_value = getattr(self, name)
if isinstance(r_value, list):
setattr(sum_result, name, l_value + r_value)
return sum_result
|
Overrides '+' operator for TestResult class.
The add operator merges two TestResult objects by concatenating all of
their lists together.
Args:
r: another instance of TestResult to be added
Returns:
A TestResult instance that's the sum of two TestResult instances.
|
juraj-google-style
|
def delay(self, identifier: typing.Any, until: typing.Union[(int, float)]=(- 1)) -> bool:
raise NotImplementedError()
|
Delay a deferred function until the given time.
Args:
identifier (typing.Any): The identifier returned from a call
to defer or defer_for.
until (typing.Union[int, float]): A numeric value that represents
the clock time when the callback becomes available for
execution. Values that are less than the current time result in
the function being called at the next opportunity.
Returns:
bool: True if the call is delayed. False if the identifier is
invalid or if the deferred call is already executed.
|
codesearchnet
|
def fetch(self, customer_id, data={}, **kwargs):
return super(Customer, self).fetch(customer_id, data, **kwargs)
|
Fetch Customer for given Id
Args:
customer_id : Id for which customer object has to be retrieved
Returns:
Order dict for given customer Id
|
juraj-google-style
|
def new_log_files(self, name, redirect_output=True):
if (redirect_output is None):
redirect_output = self._ray_params.redirect_output
if (not redirect_output):
return (None, None)
log_stdout = self._make_inc_temp(suffix='.out', prefix=name, directory_name=self._logs_dir)
log_stderr = self._make_inc_temp(suffix='.err', prefix=name, directory_name=self._logs_dir)
log_stdout_file = open(log_stdout, 'a', buffering=1)
log_stderr_file = open(log_stderr, 'a', buffering=1)
return (log_stdout_file, log_stderr_file)
|
Generate partially randomized filenames for log files.
Args:
name (str): descriptive string for this log file.
redirect_output (bool): True if files should be generated for
logging stdout and stderr and false if stdout and stderr
should not be redirected.
If it is None, it will use the "redirect_output" Ray parameter.
Returns:
If redirect_output is true, this will return a tuple of two
file handles. The first is for redirecting stdout and the
second is for redirecting stderr.
If redirect_output is false, this will return a tuple
of two None objects.
|
codesearchnet
|
def _create_job_info(self, job_dir):
meta = self._build_job_meta(job_dir)
self.logger.debug("Create job: %s" % meta)
job_record = JobRecord.from_json(meta)
job_record.save()
|
Create information for given job.
Meta file will be loaded if exists, and the job information will
be saved in db backend.
Args:
job_dir (str): Directory path of the job.
|
juraj-google-style
|
def parse_rsa_data(rsa_outfile, ignore_hets=True):
naccess_rel_dict = OrderedDict()
with open(rsa_outfile, 'r') as f:
for line in f:
if line.startswith('RES'):
res_name = line[4:7]
chain_id = line[8]
resseq = int(line[9:13])
icode = line[13]
res_id = (' ', resseq, icode)
all_atoms_abs = line[16:22].strip()
all_atoms_rel = line[23:28].strip()
side_chain_abs = line[29:35].strip()
side_chain_rel = line[36:41].strip()
main_chain_abs = line[42:48].strip()
main_chain_rel = line[49:54].strip()
non_polar_abs = line[55:61].strip()
non_polar_rel = line[62:67].strip()
all_polar_abs = line[68:74].strip()
all_polar_rel = line[75:80].strip()
if all_atoms_rel =='N/A' and main_chain_rel =='N/A' and all_polar_rel =='N/A' and non_polar_rel =='N/A' and side_chain_rel =='N/A' and ignore_hets:
continue
naccess_rel_dict[(chain_id, res_id)] = {
'res_name' : res_name,
'all_atoms_abs' : ssbio.utils.conv_to_float(all_atoms_abs, inf_str='N/A'),
'all_atoms_rel' : ssbio.utils.conv_to_float(all_atoms_rel, inf_str='N/A'),
'side_chain_abs': ssbio.utils.conv_to_float(side_chain_abs, inf_str='N/A'),
'side_chain_rel': ssbio.utils.conv_to_float(side_chain_rel, inf_str='N/A'),
'main_chain_abs': ssbio.utils.conv_to_float(main_chain_abs, inf_str='N/A'),
'main_chain_rel': ssbio.utils.conv_to_float(main_chain_rel, inf_str='N/A'),
'non_polar_abs' : ssbio.utils.conv_to_float(non_polar_abs, inf_str='N/A'),
'non_polar_rel' : ssbio.utils.conv_to_float(non_polar_rel, inf_str='N/A'),
'all_polar_abs' : ssbio.utils.conv_to_float(all_polar_abs, inf_str='N/A'),
'all_polar_rel' : ssbio.utils.conv_to_float(all_polar_rel, inf_str='N/A')}
return naccess_rel_dict
|
Process a NACCESS or freesasa RSA output file. Adapted from Biopython NACCESS modele.
Args:
rsa_outfile (str): Path to RSA output file
ignore_hets (bool): If HETATMs should be excluded from the final dictionary. This is extremely important
when loading this information into a ChainProp's SeqRecord, since this will throw off the sequence matching.
Returns:
dict: Per-residue dictionary of RSA values
|
juraj-google-style
|
def _create_flow(self, request_handler):
if self.flow is None:
redirect_uri = request_handler.request.relative_url(
self._callback_path)
self.flow = client.OAuth2WebServerFlow(
self._client_id, self._client_secret, self._scope,
redirect_uri=redirect_uri, user_agent=self._user_agent,
auth_uri=self._auth_uri, token_uri=self._token_uri,
revoke_uri=self._revoke_uri, **self._kwargs)
|
Create the Flow object.
The Flow is calculated lazily since we don't know where this app is
running until it receives a request, at which point redirect_uri can be
calculated and then the Flow object can be constructed.
Args:
request_handler: webapp.RequestHandler, the request handler.
|
juraj-google-style
|
def CheckDataVisiblity(self, value):
if (not self.data_visibility_policy):
return None
(visible, reason) = self.data_visibility_policy.IsDataVisible(DetermineType(value))
if visible:
return None
return {'status': {'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': {'format': reason}}}
|
Returns a status object if the given name is not visible.
Args:
value: The value to check. The actual value here is not important but the
value's metadata (e.g. package and type) will be checked.
Returns:
None if the value is visible. A variable structure with an error status
if the value should not be visible.
|
codesearchnet
|
def _verify_parsed_token(parsed_token, issuers, audiences, allowed_client_ids, is_legacy_google_auth=True):
if parsed_token.get('iss') not in issuers:
_logger.warning('Issuer was not valid: %s', parsed_token.get('iss'))
return False
aud = parsed_token.get('aud')
if not aud:
_logger.warning('No aud field in token')
return False
cid = parsed_token.get('azp')
audience_allowed = (aud in audiences) or (is_legacy_google_auth and aud == cid)
if not audience_allowed:
_logger.warning('Audience not allowed: %s', aud)
return False
if is_legacy_google_auth:
if list(allowed_client_ids) == SKIP_CLIENT_ID_CHECK:
_logger.warning('Client ID check can\'t be skipped for ID tokens. '
'Id_token cannot be verified.')
return False
elif not cid or cid not in allowed_client_ids:
_logger.warning('Client ID is not allowed: %s', cid)
return False
if 'email' not in parsed_token:
return False
return True
|
Verify a parsed user ID token.
Args:
parsed_token: The parsed token information.
issuers: A list of allowed issuers
audiences: The allowed audiences.
allowed_client_ids: The allowed client IDs.
Returns:
True if the token is verified, False otherwise.
|
juraj-google-style
|
def FinalizeTaskStorage(self, task):
if task.identifier not in self._task_storage_writers:
raise IOError('Storage writer for task: {0:s} does not exist.'.format(
task.identifier))
|
Finalizes a processed task storage.
Args:
task (Task): task.
Raises:
IOError: if the task storage does not exist.
OSError: if the task storage does not exist.
|
juraj-google-style
|
def delete_folder(self, folder):
if not is_valid_uuid(folder):
raise StorageArgumentException(
'Invalid UUID for folder: {0}'.format(folder))
self._authenticated_request \
.to_endpoint('folder/{}/'.format(folder)) \
.delete()
|
Delete a folder. It will recursively delete all the content.
Args:
folder_id (str): The UUID of the folder to be deleted.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: 403
StorageNotFoundException: 404
HTTPError: other non-20x error codes
|
juraj-google-style
|
def __init__(self, subdir, experiment_name, run_name):
self._subdir = subdir
self._experiment_name = experiment_name
self._run_name = run_name
self._directory_watcher = directory_watcher.DirectoryWatcher(
subdir,
event_file_loader.RawEventFileLoader,
io_wrapper.IsTensorFlowEventsFile)
|
Constructs a `_RunLoader`.
Args:
subdir: string, filesystem path of the run directory
experiment_name: string, name of the run's experiment
run_name: string, name of the run
|
juraj-google-style
|
def new_typed_dict(self, name, items, keywords):
cls_name = escape.pack_typeddict_base_class(name, len(self.generated_classes[name]))
processed_keywords = []
for k in keywords:
if k.arg != 'total':
raise _ParseError(f'Unexpected kwarg {k.arg!r} passed to TypedDict')
if not isinstance(k.value, types.Pyval) or not isinstance(k.value.value, bool):
raise _ParseError(f"Illegal value {k.value!r} for 'total' kwarg to TypedDict")
processed_keywords.append((k.arg, k.value.to_pytd_literal()))
constants = tuple((pytd.Constant(k, v) for k, v in items.items()))
cls = pytd.Class(name=cls_name, keywords=tuple(processed_keywords), bases=(pytd.NamedType('typing.TypedDict'),), methods=(), constants=constants, decorators=(), classes=(), slots=None, template=())
self.generated_classes[name].append(cls)
self.add_import('typing', ['TypedDict'])
return pytd.NamedType(cls_name)
|
Returns a type for a TypedDict.
This method is called only for TypedDict objects defined via the following
function-based syntax:
Foo = TypedDict('Foo', {'a': int, 'b': str}, total=False)
rather than the recommended class-based syntax.
Args:
name: the name of the TypedDict instance, e.g., "'Foo'".
items: a {key: value_type} dict, e.g., {"'a'": "int", "'b'": "str"}.
keywords: A sequence of kwargs passed to the function.
|
github-repos
|
def to_value(original_string, corenlp_value=None):
if isinstance(original_string, Value):
return original_string
if (not corenlp_value):
corenlp_value = original_string
amount = NumberValue.parse(corenlp_value)
if (amount is not None):
return NumberValue(amount, original_string)
ymd = DateValue.parse(corenlp_value)
if (ymd is not None):
if (ymd[1] == ymd[2] == (- 1)):
return NumberValue(ymd[0], original_string)
else:
return DateValue(ymd[0], ymd[1], ymd[2], original_string)
return StringValue(original_string)
|
Convert the string to Value object.
Args:
original_string (basestring): Original string
corenlp_value (basestring): Optional value returned from CoreNLP
Returns:
Value
|
codesearchnet
|
def add_affiliation(self, value, curated_relation=None, record=None):
if value:
affiliation = {
'value': value
}
if record:
affiliation['record'] = record
if curated_relation is not None:
affiliation['curated_relation'] = curated_relation
self._ensure_list_field('affiliations', affiliation)
|
Add an affiliation.
Args:
value (string): affiliation value
curated_relation (bool): is relation curated
record (dict): affiliation JSON reference
|
juraj-google-style
|
def remove(self, email):
if (email in self._collaborators):
if (self._collaborators[email] == ShareRequestValue.Add):
del self._collaborators[email]
else:
self._collaborators[email] = ShareRequestValue.Remove
self._dirty = True
|
Remove a Collaborator.
Args:
str : Collaborator email address.
|
codesearchnet
|
def get_component(self, colour, tolerance=0, default=None):
if (not (0 <= tolerance <= np.sqrt(195075))):
raise LegendError('Tolerance must be between 0 and 441.67')
for decor in self.__list:
if (colour.lower() == decor.colour):
return decor.component
(r1, g1, b1) = utils.hex_to_rgb(colour)
best_match = '
best_match_dist = np.sqrt((((r1 ** 2.0) + (g1 ** 2.0)) + (b1 ** 2.0)))
for decor in self.__list:
(r2, g2, b2) = decor.rgb
distance = np.sqrt(((((r2 - r1) ** 2.0) + ((g2 - g1) ** 2.0)) + ((b2 - b1) ** 2.0)))
if (distance < best_match_dist):
best_match = decor.component
best_match_dist = distance
best_match_colour = decor.colour
if (best_match_dist <= tolerance):
return best_match
else:
with warnings.catch_warnings():
warnings.simplefilter('always')
w = 'No match found for {0} '.format(colour.lower())
w += 'with tolerance of {0}. Best match is '.format(tolerance)
w += '{0}, {1}'.format(best_match.summary(), best_match_colour)
w += ', d={0}'.format(best_match_dist)
warnings.warn(w)
return default
|
Get the component corresponding to a display colour. This is for
generating a Striplog object from a colour image of a striplog.
Args:
colour (str): The hex colour string to look up.
tolerance (float): The colourspace distance within which to match.
default (component or None): The component to return in the event
of no match.
Returns:
component. The component best matching the provided colour.
|
codesearchnet
|
def _send(self, method, path, data, filename):
if filename is None:
return self._send_json(method, path, data)
else:
return self._send_file(method, path, data, filename)
|
Send data to a remote server, either with a POST or a PUT request.
Args:
`method`: The method (POST or PUT) to use.
`path`: The path to the resource.
`data`: The data to send.
`filename`: The filename of the file to send (if any).
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.