content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def autoaugment_preproccess(
input_size,
scale_size,
normalize=None,
pre_transform=True,
**kwargs):
"""
Args:
input_size:
scale_size:
normalize:
pre_transform:
**kwargs:
Returns:
"""
if normalize is None:
normalize = __imagenet_stats
augment = PbaAugment(
input_size,
scale_size,
normalize=normalize,
pre_transform=pre_transform,
**kwargs)
return augment
| 5,348,400 |
def pubchem_image(cid_or_container, size=500):
"""
Generate HTML code for a PubChem molecular structure graphic and link.
Parameters:
cid_or_container: The CID (int, str) or a subscriptable object that
contains a key ``cid``.
Returns:
HTML code for an image from PubChem.
"""
if type(cid_or_container) in (int, str):
cid = cid_or_container
elif 'cid' in cid_or_container:
cid = cid_or_container['cid']
else:
raise MissingParamError('cid')
cid_url = 'https://pubchem.ncbi.nlm.nih.gov/compound/{}'.format(cid)
imgbase = 'https://pubchem.ncbi.nlm.nih.gov/image/imagefly.cgi?'
params = {'cid': cid, 'width': size, 'height': size}
img_url = imgbase + urlencode(params)
ret = '<a target="_blank" href="{0}"><img src="{1}"></a>'
ret = ret.format(cid_url, img_url)
return ret
| 5,348,401 |
def break_discussion():
"""Emulate buggy 'ticket move' behavior"""
project = M.Project.query.get(shortname='test')
tracker = M.AppConfig.query.find({'options.mount_point': 'bugs'}).first()
discussion = M.Discussion(name='fake discussion')
app_config = M.AppConfig()
app_config.tool_name = 'Tickets'
app_config.project_id = project._id
app_config.options = {'mount_point': 'fake'}
session(app_config).flush(app_config)
discussion.app_config_id = app_config._id
session(discussion).flush(discussion)
t = TM.Ticket.new()
t.summary = 'ticket 1'
# move disscusion somewhere
t.discussion_thread.discussion.app_config_id = discussion.app_config_id
session(t).flush(t)
t = TM.Ticket.new()
t.summary = 'ticket 2'
# moved ticket attached to wrong discussion
t.discussion_thread.discussion_id = discussion._id
t.discussion_thread.add_post(text='comment 1')
t.discussion_thread.add_post(text='comment 2')
session(t).flush(t)
| 5,348,402 |
def to_lines(text: str, k: int) -> Optional[List[str]]:
"""
Given a block of text and a maximum line length k, split the text into lines of length at most k.
If this cannot be done, i.e. a word is longer than k, return None.
:param text: the block of text to process
:param k: the maximum length of each line
:return: the list of lines
>>> text = 'the quick brown fox jumps over the lazy dog'
>>> to_lines(text, 4) is None
True
>>> to_lines(text, 5)
['the', 'quick', 'brown', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']
>>> to_lines(text, 9)
['the quick', 'brown fox', 'jumps', 'over the', 'lazy dog']
>>> to_lines(text, 10)
['the quick', 'brown fox', 'jumps over', 'the lazy', 'dog']
>>> to_lines(text, 12)
['the quick', 'brown fox', 'jumps over', 'the lazy dog']
>>> to_lines('AAAAA', 5)
['AAAAA']
"""
def line_to_str(l: List[str]) -> str:
return ' '.join(l)
# If there is no text or the line length is 0, we can't do anything.
if not text or not k:
return None
# If any word is longer then k, we can't do anything.
words = text.split()
if max(len(word) for word in words) > k:
return None
# Now split the word into lines.
lines = []
line = []
len_so_far = 0
for word in words:
len_word = len(word)
if len_word + len_so_far <= k:
# We add the word to the line plus a blank space afterwards.
# If this is the last word in the line, the blank space will not occur; hence why we check the
# condition <= k rather than < k.
line.append(word)
len_so_far += len_word + 1
else:
# Make the line into a string, add it to lines, and reset everything.
lines.append(line_to_str(line))
line = [word]
len_so_far = len_word + 1
# Last case: if we have a partial line, add it.
if line:
lines.append(line_to_str(line))
# Assert that none of the lines went over the length.
for line in lines:
assert(len(line) <= k)
return lines
| 5,348,403 |
def _image_pos(name):
"""
查找指定图片在背景图中的位置
"""
imsrc = ac.imread('images/bg/{}.png'.format(name[1:]))
imobj = ac.imread('images/{}.PNG'.format(name))
# find the match position
pos = ac.find_template(imsrc, imobj)
circle_center_pos = pos['result']
return circle_center_pos
| 5,348,404 |
def handle_greenness_indices(parameters: tuple, input_folder: str, working_folder: str, msg_func: Callable, err_func: Callable) -> \
Optional[dict]:
"""Handle running the greenness algorithm
Arguments:
parameters: the specified parameters for the algorithm
input_folder: the base folder where input files are located
working_folder: the working folder for the algorithm
msg_func: function to write messages to
err_func: function to write errors to
Return:
A dictionary of addittional parameters to pass to the next command or None
"""
json_filename, experiment_file, search_folder, options = _find_parameter_values(parameters,
('found_json_file', 'experimentdata', 'results_search_folder', 'options'))
# Ensure we have our mandatory parameters
_handle_missing_parameters('greenness indices', (json_filename,), ('found_json_file',))
_handle_missing_files('greenness indices', (json_filename,), ('found_json_file',))
# Adjust the found files JSON to point to our output folder - making a best effort if search_folder is None
new_json_filename = _repoint_files_json_dir(json_filename, search_folder, working_folder, working_folder)
if new_json_filename is None:
new_json_filename = json_filename
# Default our options
if options is None:
options = ''
# Add in additional options
if experiment_file is not None:
if os.path.isfile(experiment_file):
options += ' --metadata ' + experiment_file
else:
msg = 'Warning: invalid experiment file specified for greenness indices "%s"' % experiment_file
logging.warning(msg)
msg_func((msg,), True)
# Write the arguments
json_args = {
'GREENNESS_INDICES_OPTIONS': options if options is not None else '',
}
json_file_path = os.path.join(working_folder, 'args.json')
_write_command_json(json_file_path, json_args)
logging.debug("Command JSON: %s", str(json_args))
# Run the command
ret_value = _run_command('greenness-indices', input_folder, working_folder, json_file_path, msg_func, err_func,
[[new_json_filename,'/scif/apps/src/greenness-indices_files.json']])
command_results = None
if ret_value == 0:
command_results = {'results': _get_results_json(input_folder, err_func, True)}
command_results['top_path'] = working_folder
return command_results
| 5,348,405 |
def weights_init_normal(m, std=0.02):
""" This init is common in GAN """
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, std)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, std)
m.bias.data.fill_(0.0)
| 5,348,406 |
def overwrite_table_data(
cube_path: os.PathLike, data: bytes, label=None, table_name=None
):
"""The file at *cube_path* will be modified by overwriting the
data in the specfied table name with the contents of *data*.
Either *label* or *table_name* is needed. If neither a *label*
dict (which must contain a 'Name' key) nor a *table_name* is
provided, then this function will raise a ValueError. If both
are provided, *label* will take precedence and *table_name*
will be ignored.
*label* is a dict which must contain *Name*, *StartByte*, and
*Bytes* keys (*StartByte* and *Bytes* must be convertable to
int if not already). These values will be used to locate where
in the file to write the new *data*.
The name of the table as a string can be provided via *table_name*
and the ISIS getkey function will be applied to extract the needed
StartByte and Bytes values from the label. However, if there is more
than one table in the cube, getkey can only find the first, and a
ValueError might be returned, even thought there is a table of that
name in the file.
If the pvl library is available, this function will use it to find
*all* of the tables in the *cube_path* labels and will find the one
named by *table_name* if it is present.
"""
(start, size) = get_startsize_from(label, table_name, cube_path)
if size != len(data):
raise ValueError(
f"The size of the table ({size}) to be overwritten from the file "
f"({cube_path}) is different from size of the data provided "
f"({len(data)})."
)
with open(cube_path, "r+b") as cubehandle:
cubehandle.seek(start)
cubehandle.write(data)
return
| 5,348,407 |
def notify_osd_fallback(title, message, sound, fallback):
"""Ubuntu Notify OSD notifications fallback (just sound)."""
# Fallback to wxPython notification
fallback(title, message, sound)
| 5,348,408 |
def product_except_self(nums: list[int]) -> list[int]:
"""Computes the product of all the elements of given array at each index excluding the value at that index.
Note: could also take math.prod(nums) and divide out the num at each index,
but corner cases of num_zeros > 1 and num_zeros == 1 make code inelegant.
Args:
nums:
Returns:
Examples:
>>> product_except_self([])
[]
>>> product_except_self([1,2,3,4])
[24, 12, 8, 6]
>>> product_except_self([-1,1,0,-3,3])
[0, 0, 9, 0, 0]
"""
"""ALGORITHM"""
## INITIALIZE VARS ##
nums_sz = len(nums)
# DS's/res
nums_products_except_i = [1] * nums_sz
## Multiply against product of all elements PRECEDING i
total_product = 1
for i in range(nums_sz):
nums_products_except_i[i] *= total_product
total_product *= nums[i]
## Multiply against product of all elements FOLLOWING i
total_product = 1
for i in reversed(range(nums_sz)):
nums_products_except_i[i] *= total_product
total_product *= nums[i]
return nums_products_except_i
| 5,348,409 |
def _season_retrieve_rows(
db: sql.Connection,
columns: str,
classification: str | int,
start_date: date | None,
stop_date: date | None,
exclusion_certifications: Collection[str] = (),
) -> Iterator[tuple]:
"""Gathers projects belonging in a single season in release order
The `columns` parameter should NOT be constructed from user input,
as this could open vulnerability to an SQL injection attack. Use {0}
in place of the table name for automatic substitution (e.g
"{0}.artist_group, {0}.classification").
If a date is `None`, the datetime.MAXYEAR and datetime.MINYEAR will
be used to bound the season.
"""
if isinstance(classification, int):
classifications = AUTOSEASON_RANKINGS
target_table = "ranking"
else:
classifications = strray2list(classification)
if all(c in RANKINGS for c in classifications):
target_table = "ranking"
else:
target_table = "certification"
start_date, stop_date = start_date or date.min, stop_date or date.max
cursor = db.execute(
f"""
SELECT DISTINCT {columns.strip(';').format(target_table)}
FROM {target_table} LEFT JOIN certification AS exclusion
ON {target_table}.release_day = exclusion.release_day
AND {target_table}.artist_names = exclusion.artist_names
AND {target_table}.name = exclusion.name
AND exclusion.classification
IN {sql_array(exclusion_certifications)}
LEFT JOIN helper_artist_score
ON {target_table}.artist_group=helper_artist_score.artist_group
AND {target_table}.release_day=helper_artist_score.date_from
LEFT JOIN helper_single
ON helper_single.single_release_day = {target_table}.release_day
AND helper_single.artist_names = {target_table}.artist_names
AND helper_single.single_name = {target_table}.name
AND helper_single.album_track_names IN (
SELECT {target_table}.track_names FROM {target_table}
WHERE classification IN {sql_array(classifications)}
)
WHERE helper_single.album_track_names IS NULL
AND {target_table}.classification IN {sql_array(classifications)}
AND {target_table}.release_day >= ?
AND {target_table}.release_day < ?
AND exclusion.classification IS NULL
ORDER BY {target_table}.release_day ASC, helper_artist_score.score DESC
""",
(
*exclusion_certifications,
*classifications,
*classifications,
start_date,
stop_date,
),
)
yield from read_rows(cursor, columns)
| 5,348,410 |
def fetch_pkey():
"""Download private key file from secure S3 bucket"""
s3_client = boto3.client('s3')
s3_client.download_file(S3_BUCKET, BUCKET_KEY, PKEY_FILE)
pkey_filename = PKEY_FILE.replace("/tmp/", "")
if os.path.isfile(PKEY_FILE):
return print(f"{pkey_filename} successfully downloaded from {S3_BUCKET}")
| 5,348,411 |
def so_mörk():
"""Sagnorð."""
return itertools.chain(
# Nafnháttur - nútíð er sleppt og ekki til í þáttíð miðmynd
{"sng---", "sng--þ", "snm---"},
# Boðháttur - alltaf 2.p og nútíð
string_product({"sb"}, MYND, {"2"}, TALA, {"n"}),
# Lýsingarháttur nútíðar
string_product({"slg---", "slm---"}),
# Framsögu- og viðtengingarháttur
string_product({"s"}, {"f", "v"}, MYND, PERSÓNA, TALA, TÍÐ),
# Lýsingarháttur þátíðar - hann virðist vera til í nefnifalli, þolfalli og þágufalli. Setjum líka inn eignarfall til að vera viss.
string_product({"s"}, {"þ"}, MYND, KYN, TALA, FALL),
)
| 5,348,412 |
def _histogram(values, value_range, nbins=100, dtype=tf.int32, name=None):
"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram counting
the number of entries in `values` that fell into every bin. The bins are
equal width and determined by the arguments `value_range` and `nbins`.
Args:
values: Numeric `Tensor`.
value_range: Shape [2] `Tensor` of same `dtype` as `values`.
values <= value_range[0] will be mapped to hist[0],
values >= value_range[1] will be mapped to hist[-1].
nbins: Scalar `int32 Tensor`. Number of histogram bins.
dtype: dtype for returned histogram.
name: A name for this operation (defaults to 'histogram').
Returns:
A 1-D `Tensor` holding histogram of values.
"""
with tf.name_scope(name, 'histogram', [values, value_range, nbins]) as scope:
values = tf.convert_to_tensor(values, name='values')
values = tf.reshape(values, [-1])
value_range = tf.convert_to_tensor(value_range, name='value_range')
nbins_float = np.float32(nbins)
# Map tensor values that fall within value_range to [0, 1].
scaled_values = tf.truediv(
values - value_range[0],
value_range[1] - value_range[0],
name='scaled_values')
# map tensor values within the open interval value_range to {0,.., nbins-1},
# values outside the open interval will be zero or less, or nbins or more.
indices = tf.floor(nbins_float * scaled_values, name='indices')
# Clip edge cases (e.g. value = value_range[1]) or "outliers."
indices = tf.cast(
tf.clip_by_value(indices, 0, nbins_float - 1), tf.int32)
return tf.unsorted_segment_sum(
tf.ones_like(indices, dtype=dtype), indices, nbins, name=scope)
| 5,348,413 |
def find_missing_letter(chars):
"""
chars: string of characters
return: missing letter between chars or after
"""
letters = [char for char in chars][0]
chars = [char.lower() for char in chars]
alphabet = [char for char in "abcdefghijklmnopqrstuvwxyz"]
starting_index = alphabet.index(chars[0])
for letter in alphabet[starting_index:]:
if letter not in chars and chars[0].lower() == letters[0]:
return letter
if letter not in chars and chars[0].upper() == letters[0]:
return letter.upper()
| 5,348,414 |
def series_to_pyseries(
name: str,
values: "pl.Series",
) -> "PySeries":
"""
Construct a PySeries from a Polars Series.
"""
values.rename(name, in_place=True)
return values.inner()
| 5,348,415 |
def _stagefile(coption, source, destination, filesize, is_stagein, setup=None, **kwargs):
"""
Stage the file (stagein or stageout)
:return: destination file details (checksum, checksum_type) in case of success, throw exception in case of failure
:raise: PilotException in case of controlled error
"""
filesize_cmd, checksum_cmd, checksum_type = None, None, None
cmd = '%s -np -f %s %s %s' % (copy_command, coption, source, destination)
if setup:
cmd = "source %s; %s" % (setup, cmd)
#timeout = get_timeout(filesize)
#logger.info("Executing command: %s, timeout=%s" % (cmd, timeout))
rcode, stdout, stderr = execute(cmd, **kwargs)
logger.info('rcode=%d, stdout=%s, stderr=%s', rcode, stdout, stderr)
if rcode: ## error occurred
error = resolve_common_transfer_errors(stdout + stderr, is_stagein=is_stagein)
#rcode = error.get('rcode') ## TO BE IMPLEMENTED
#if not is_stagein and rcode == PilotErrors.ERR_CHKSUMNOTSUP: ## stage-out, on fly checksum verification is not supported .. ignore
# logger.info('stage-out: ignore ERR_CHKSUMNOTSUP error .. will explicitly verify uploaded file')
# return None, None
raise PilotException(error.get('error'), code=error.get('rcode'), state=error.get('state'))
# extract filesize and checksum values from output
if coption != "":
filesize_cmd, checksum_cmd, checksum_type = get_file_info_from_output(stdout + stderr)
## verify transfer by returned checksum or call remote checksum calculation
## to be moved at the base level
is_verified = True ## TO BE IMPLEMENTED LATER
if not is_verified:
rcode = ErrorCodes.GETADMISMATCH if is_stagein else ErrorCodes.PUTADMISMATCH
raise PilotException("Copy command failed", code=rcode, state='AD_MISMATCH')
return filesize_cmd, checksum_cmd, checksum_type
| 5,348,416 |
def tally_cache_file(results_dir):
"""Return a fake tally cache file for testing."""
file = results_dir / 'tally.npz'
file.touch()
return file
| 5,348,417 |
def get_soft_label(cls_label, num_classes):
"""
compute soft label replace one-hot label
:param cls_label:ground truth class label
:param num_classes:mount of classes
:return:
"""
# def metrix_fun(a, b):
# torch.IntTensor(a)
# torch.IntTensor(b)
# metrix_dis = (a - b) ** 2
# return metrix_dis
def metrix_fun(a, b):
a = a.type_as(torch.FloatTensor())
b = b.type_as(torch.FloatTensor())
metrix_dis = (torch.log(a) - torch.log(b)) ** 2
return metrix_dis
def exp(x):
x = x.type_as(torch.FloatTensor())
return torch.exp(x)
rt = torch.IntTensor([cls_label]) # must be torch.IntTensor or torch.LongTensor
rk = torch.IntTensor([idx for idx in range(1, num_classes + 1, 1)])
metrix_vector = exp(-metrix_fun(rt, rk))
return metrix_vector / torch.sum(metrix_vector)
| 5,348,418 |
def get_option(args, config, key, default=None):
"""Gets key option from args if it is provided, otherwise tries to get it from config"""
if hasattr(args, key) and getattr(args, key) is not None:
return getattr(args, key)
return config.get(key, default)
| 5,348,419 |
def z_decode(p):
"""
decode php param from string to python
p: bytes
"""
if p[0]==0x4e: #NULL 0x4e-'N'
return None,p[2:]
elif p[0]==0x62: #bool 0x62-'b'
if p[2] == 0x30: # 0x30-'0'
return False,p[4:]
else:
return True,p[4:]
elif p[0]==0x69: #int 0x69-'i'
i = index(p, 0x3b, 1) # 0x3b-';'
return int(p[2:i]),p[i+1:]
elif p[0]==0x64: #double 0x64-'d'
i = index(p, 0x3b, 1) # 0x3b-';'
return float(p[2:i]),p[i+1:]
elif p[0]==0x73: #string 0x73-'s'
len_end = index(p, 0x3a, 2) # 0x3a-':'
str_len = int(p[2:len_end])
end = len_end + 1 + str_len + 2
v = p[(len_end + 2) : (len_end + 2 + str_len)]
return str(v, php_python.CHARSET), p[end+1:]
elif p[0]==0x61: #array 0x61-'a'
list_=[] #数组
dict_={} #字典
flag=True #类型,true-元组 false-字典
second = index(p, 0x3a, 2) # 0x3a-":"
num = int(p[2:second]) #元素数量
pp = p[second+2:] #所有元素
for i in range(num):
key,pp=z_decode(pp) #key解析
if (i == 0): #判断第一个元素key是否int 0
if (not isinstance(key, int)) or (key != 0):
flag = False
val,pp=z_decode(pp) #value解析
list_.append(val)
dict_[key]=val
return (list_, pp[2:]) if flag else (dict_, pp[2:])
else:
return p,''
| 5,348,420 |
def parse_extension(uri):
""" Parse the extension of URI. """
patt = re.compile(r'(\.\w+)')
return re.findall(patt, uri)[-1]
| 5,348,421 |
def rotz(ang):
"""
Calculate the transform for rotation around the Z-axis.
Arguments:
angle: Rotation angle in degrees.
Returns:
A 4x4 numpy array of float32 representing a homogeneous coordinates
matrix for rotation around the Z axis.
"""
rad = math.radians(ang)
c = math.cos(rad)
s = math.sin(rad)
return [
[c, -s, 0.0, 0.0],
[s, c, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
| 5,348,422 |
def getBits(data, offset, bits=1):
"""
Get specified bits from integer
>>> bin(getBits(0b0011100,2))
'0b1'
>>> bin(getBits(0b0011100,0,4))
'0b1100'
"""
mask = ((1 << bits) - 1) << offset
return (data & mask) >> offset
| 5,348,423 |
def rescale(img, input_height, input_width):
"""Code from Loading_Pretrained_Models.ipynb - a Caffe2 tutorial"""
aspect = img.shape[1]/float(img.shape[0])
if(aspect>1):
# landscape orientation - wide image
res = int(aspect * input_height)
imgScaled = skimage.transform.resize(img, (input_width, res))
if(aspect<1):
# portrait orientation - tall image
res = int(input_width/aspect)
imgScaled = skimage.transform.resize(img, (res, input_height))
if(aspect == 1):
imgScaled = skimage.transform.resize(img, (input_width, input_height))
return imgScaled
| 5,348,424 |
def compute_halfmax_crossings(sig):
"""
Compute threshold_crossing, linearly interpolated.
Note this code assumes there is just one peak in the signal.
"""
half_max = np.max(sig)/2.0
fwhm_set = np.where(sig > half_max)
l_ndx = np.min(fwhm_set) #assumes a clean peak.
if l_ndx > 0:
fwhm_left_ndx = l_ndx - 1 + ((half_max - sig[l_ndx-1]) / (float(sig[l_ndx]) - sig[l_ndx-1]))
else:
fwhm_left_ndx = 0
r_ndx = np.max(fwhm_set) #assumes a clean peak.
if r_ndx < len(sig)-1:
fwhm_right_ndx = r_ndx + ((half_max - sig[r_ndx]) / (float(sig[r_ndx+1]) - sig[r_ndx]))
else:
fwhm_right_ndx = len(sig)-1
return np.array([fwhm_left_ndx,fwhm_right_ndx])
| 5,348,425 |
def test_export_edited_suffix():
"""test export with --edited-suffix"""
import glob
import os
import os.path
import osxphotos
from osxphotos.cli import export
runner = CliRunner()
cwd = os.getcwd()
# pylint: disable=not-context-manager
with runner.isolated_filesystem():
result = runner.invoke(
export,
[
os.path.join(cwd, CLI_PHOTOS_DB),
".",
"--edited-suffix",
CLI_EXPORT_EDITED_SUFFIX,
"-V",
],
)
assert result.exit_code == 0
files = glob.glob("*")
assert sorted(files) == sorted(CLI_EXPORT_FILENAMES_EDITED_SUFFIX)
| 5,348,426 |
def quote_identities(expression):
"""
Rewrite sqlglot AST to ensure all identities are quoted.
Example:
>>> import sqlglot
>>> expression = sqlglot.parse_one("SELECT x.a AS a FROM db.x")
>>> quote_identities(expression).sql()
'SELECT "x"."a" AS "a" FROM "db"."x"'
Args:
expression (sqlglot.Expression): expression to quote
Returns:
sqlglot.Expression: quoted expression
"""
def qualify(node):
if isinstance(node, exp.Identifier):
node.set("quoted", True)
return node
return expression.transform(qualify)
| 5,348,427 |
def write_png(data, origin='upper', colormap=None):
"""
Transform an array of data into a PNG string.
This can be written to disk using binary I/O, or encoded using base64
for an inline PNG like this:
>>> png_str = write_png(array)
>>> 'data:image/png;base64,'+png_str.encode('base64')
Inspired from
http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image
Parameters
----------
data: numpy array or equivalent list-like object.
Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA)
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
colormap : callable, used only for `mono` image.
Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]
for transforming a mono image into RGB.
It must output iterables of length 3 or 4, with values between
0. and 1. Hint: you can use colormaps from `matplotlib.cm`.
Returns
-------
PNG formatted byte string
"""
if np is None:
raise ImportError('The NumPy package is required'
' for this functionality')
if colormap is None:
def colormap(x):
return (x, x, x, 1)
array = np.atleast_3d(data)
height, width, nblayers = array.shape
if nblayers not in [1, 3, 4]:
raise ValueError('Data must be NxM (mono), '
'NxMx3 (RGB), or NxMx4 (RGBA)')
assert array.shape == (height, width, nblayers)
if nblayers == 1:
array = np.array(list(map(colormap, array.ravel())))
nblayers = array.shape[1]
if nblayers not in [3, 4]:
raise ValueError('colormap must provide colors of'
'length 3 (RGB) or 4 (RGBA)')
array = array.reshape((height, width, nblayers))
assert array.shape == (height, width, nblayers)
if nblayers == 3:
array = np.concatenate((array, np.ones((height, width, 1))), axis=2)
nblayers = 4
assert array.shape == (height, width, nblayers)
assert nblayers == 4
# Normalize to uint8 if it isn't already.
if array.dtype != 'uint8':
array = array * 255./array.max(axis=(0, 1)).reshape((1, 1, 4))
array = array.astype('uint8')
# Eventually flip the image.
if origin == 'lower':
array = array[::-1, :, :]
# Transform the array to bytes.
raw_data = b''.join([b'\x00' + array[i, :, :].tobytes()
for i in range(height)])
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack('!I', len(data)) +
chunk_head +
struct.pack('!I', 0xFFFFFFFF & zlib.crc32(chunk_head)))
return b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack('!2I5B', width, height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
| 5,348,428 |
def prepare_url(url, source_url=None):
"""
Operations that purify a url, removes arguments,
redirects, and merges relatives with absolutes.
"""
try:
if source_url is not None:
source_domain = urlparse(source_url).netloc
proper_url = urljoin(source_url, url)
proper_url = redirect_back(proper_url, source_domain)
# proper_url = remove_args(proper_url)
else:
# proper_url = remove_args(url)
proper_url = url
except ValueError as e:
log.critical('url %s failed on err %s' % (url, str(e)))
proper_url = ''
return proper_url
| 5,348,429 |
def str_contains_num_version_range_with_x(str):
"""
Check if a string contains a range of number version with x.
:param str: the string to check.
:return: true if the string contains a a range of number version with x, false else.
"""
return bool(re.search(r'\d+((\.\d+)+)?(\.x)? < \d+((\.\d+)+)?(\.x)?', str))
| 5,348,430 |
def get_args():
"""
gets cli args via the argparse module
"""
msg = "This script records cpu statistics"
# create an instance of parser from the argparse module
parser = argparse.ArgumentParser(description=msg)
# add expected arguments
parser.add_argument('-s', dest='silent', required=False,
action="store_true",
help="dont display statistics to screen")
parser.add_argument('-a', dest='append', required=False,
action="store_true",
help="dont overwrite previous files")
parser.add_argument('-c', dest='convert', required=False,
action="store_true",
help="converts data to human readable")
parser.add_argument('-n', dest='noheader', required=False,
action="store_true", help="dont write header")
parser.add_argument('-R', dest='refresh', required=False)
parser.add_argument('-r', dest='runtime', required=False)
parser.add_argument('-o', dest='outfile', required=False)
args = parser.parse_args()
if args.silent:
silent = True
else:
silent = False
if args.noheader:
noheader = True
else:
noheader = False
if args.append:
append = True
else:
append = False
if args.refresh:
refresh = float(args.refresh)
else:
# default refresh i s 5 seconds
refresh = 5
if args.runtime:
runtime = float(args.runtime)
else:
# default runtime is eight hours
runtime = 28800
if args.outfile:
outfile = args.outfile
else:
outfile = 'memutil.csv'
if args.convert:
convert = True
else:
convert = False
return silent, noheader, refresh, runtime, append, outfile, convert
| 5,348,431 |
def mpc_coro_ignore(
func: Callable[..., Coroutine[SecureElement, None, SecureElement]]
) -> Callable[..., SecureElement]:
"""
A wrapper for an MPC coroutine that ensures that the behaviour of the code is unaffected by
the type annotations.
:param func: The async function to be wrapped
:return: A placeholder for which a result will automatically be set when the coroutine has
finished running
"""
return mpc_coro(func, apply_program_counter_wrapper=False, ignore_type_hints=True)
| 5,348,432 |
def test_managed():
"""
Test to manage a memcached key.
"""
name = "foo"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
mock_t = MagicMock(side_effect=[CommandExecutionError, "salt", True, True, True])
with patch.dict(
memcached.__salt__, {"memcached.get": mock_t, "memcached.set": mock_t}
):
assert memcached.managed(name) == ret
comt = "Key 'foo' does not need to be updated"
ret.update({"comment": comt, "result": True})
assert memcached.managed(name, "salt") == ret
with patch.dict(memcached.__opts__, {"test": True}):
comt = "Value of key 'foo' would be changed"
ret.update({"comment": comt, "result": None})
assert memcached.managed(name, "salt") == ret
with patch.dict(memcached.__opts__, {"test": False}):
comt = "Successfully set key 'foo'"
ret.update(
{
"comment": comt,
"result": True,
"changes": {"new": "salt", "old": True},
}
)
assert memcached.managed(name, "salt") == ret
| 5,348,433 |
def is_all_in_one(config):
"""
Returns True if packstack is running allinone setup, otherwise
returns False.
"""
# Even if some host have been excluded from installation, we must count
# with them when checking all-in-one. MariaDB host should however be
# omitted if we are not installing MariaDB.
return len(filtered_hosts(config, exclude=False, dbhost=True)) == 1
| 5,348,434 |
def subset_prots_longest_cds(genes,proteins_path, path_out):
"""
the gene-protein ID matching table, generated with gffread is used for this table
(it contains the length of the CDS ) to infer the transcript with the longest cds for each gene
this transcript is then written to a tmp fasta file
Args:
genes:
proteins_path:
path_out:
Returns:
"""
# when matching prot ID to gene ID it is already checked for the one with the longest CDS
longest_transcripts = [gene.fasta_header for gene in genes.values() if gene.fasta_header]
logging.info("{} proteins written to fasta file for taxonomic assignment (subsetting for longest CDS)".format(len(longest_transcripts)))
subset_protein_fasta(proteins_path, longest_transcripts, path_out, "include")
| 5,348,435 |
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
if you need a simple wiki simply replace the two lines below with:
return auth.wiki()
"""
response.title = 'Award management'
data = {"message": "Put the award links here."}
return data
| 5,348,436 |
def session_generate(instanceAddress, appSecret): # pragma: no cover
"""
**Deprecated**
Issue a token to authenticate the user.
:param instanceAddress: Specify the misskey instance address.
:param appSecret: Specifies the secret key.
:type instanceAddress: str
:type appSecret: str
:rtype: dict
"""
res = requests.post(f"https://{instanceAddress}/api/auth/session/generate", data=json.dumps({'appSecret': appSecret}), headers={'content-type': 'application/json'})
if res.status_code != 200:
raise MisskeyAPIException('/auth/session/generate', 200, res.status_code, res.text)
else:
return json.loads(res.text)
| 5,348,437 |
def _dream_proposals( currentVectors, history, dimensions, nChains, DEpairs, gamma, jitter, eps ):
"""
generates and returns proposal vectors given the current states
"""
sampleRange = history.ncombined_history
currentIndex = np.arange(sampleRange - nChains,sampleRange)[:, np.newaxis]
combined_history = history.combined_history
#choose some chains without replacement to combine
chains = _random_no_replace(DEpairs * 2, sampleRange - 1, nChains)
# makes sure we have already selected the current chain so it is not replaced
# this ensures that the the two chosen chains cannot be the same as the chain for which the jump is
chains += (chains >= currentIndex)
chainDifferences = (np.sum(combined_history[chains[:, 0:DEpairs], :], axis = 1) -
np.sum(combined_history[chains[:, DEpairs:(DEpairs*2)], :], axis = 1))
e = np.random.normal(0, jitter, (nChains,dimensions))
E = np.random.normal(0, eps,(nChains,dimensions)) # could replace eps with 1e-6 here
proposalVectors = currentVectors + (1 + e) * gamma[:,np.newaxis] * chainDifferences + E
return proposalVectors
| 5,348,438 |
def handle_compressed_file(
file_prefix: FilePrefix,
datatypes_registry,
ext: str = "auto",
tmp_prefix: Optional[str] = "sniff_uncompress_",
tmp_dir: Optional[str] = None,
in_place: bool = False,
check_content: bool = True,
) -> HandleCompressedFileResponse:
"""
Check uploaded files for compression, check compressed file contents, and uncompress if necessary.
Supports GZip, BZip2, and the first file in a Zip file.
For performance reasons, the temporary file used for uncompression is located in the same directory as the
input/output file. This behavior can be changed with the `tmp_dir` param.
``ext`` as returned will only be changed from the ``ext`` input param if the param was an autodetect type (``auto``)
and the file was sniffed as a keep-compressed datatype.
``is_valid`` as returned will only be set if the file is compressed and contains invalid contents (or the first file
in the case of a zip file), this is so lengthy decompression can be bypassed if there is invalid content in the
first 32KB. Otherwise the caller should be checking content.
"""
CHUNK_SIZE = 2**20 # 1Mb
is_compressed = False
compressed_type = None
keep_compressed = False
is_valid = False
filename = file_prefix.filename
uncompressed_path = filename
tmp_dir = tmp_dir or os.path.dirname(filename)
check_compressed_function = COMPRESSION_CHECK_FUNCTIONS.get(file_prefix.compressed_format)
if check_compressed_function:
is_compressed, is_valid = check_compressed_function(filename, check_content=check_content)
compressed_type = file_prefix.compressed_format
if is_compressed and is_valid:
if ext in AUTO_DETECT_EXTENSIONS:
# attempt to sniff for a keep-compressed datatype (observing the sniff order)
sniff_datatypes = filter(lambda d: getattr(d, "compressed", False), datatypes_registry.sniff_order)
sniffed_ext = run_sniffers_raw(file_prefix, sniff_datatypes)
if sniffed_ext:
ext = sniffed_ext
keep_compressed = True
else:
datatype = datatypes_registry.get_datatype_by_extension(ext)
keep_compressed = getattr(datatype, "compressed", False)
# don't waste time decompressing if we sniff invalid contents
if is_compressed and is_valid and file_prefix.auto_decompress and not keep_compressed:
assert compressed_type # Tell type checker is_compressed will only be true if compressed_type is also set.
with tempfile.NamedTemporaryFile(prefix=tmp_prefix, dir=tmp_dir, delete=False) as uncompressed:
with DECOMPRESSION_FUNCTIONS[compressed_type](filename) as compressed_file:
# TODO: it'd be ideal to convert to posix newlines and space-to-tab here as well
try:
for chunk in file_reader(compressed_file, CHUNK_SIZE):
if not chunk:
break
uncompressed.write(chunk)
except OSError as e:
os.remove(uncompressed.name)
raise OSError(
"Problem uncompressing {} data, please try retrieving the data uncompressed: {}".format(
compressed_type, util.unicodify(e)
)
)
finally:
is_compressed = False
uncompressed_path = uncompressed.name
if in_place:
# Replace the compressed file with the uncompressed file
shutil.move(uncompressed_path, filename)
uncompressed_path = filename
elif not is_compressed or not check_content:
is_valid = True
return HandleCompressedFileResponse(is_valid, ext, uncompressed_path, compressed_type, is_compressed)
| 5,348,439 |
def compress_r_params(r_params_dict):
"""
Convert a dictionary of r_paramsters to a compressed string format
Parameters
----------
r_params_dict: Dictionary
dictionary with parameters for weighting matrix. Proper fields
and formats depend on the mode of data_weighting.
data_weighting == 'dayenu':
dictionary with fields
'filter_centers', list of floats (or float) specifying the (delay) channel numbers
at which to center filtering windows. Can specify fractional channel number.
'filter_half_widths', list of floats (or float) specifying the width of each
filter window in (delay) channel numbers. Can specify fractional channel number.
'filter_factors', list of floats (or float) specifying how much power within each filter window
is to be suppressed.
Returns
-------
string containing r_params dictionary in json format and only containing one
copy of each unique dictionary with a list of associated baselines.
"""
if r_params_dict == {} or r_params_dict is None:
return ''
else:
r_params_unique = {}
r_params_unique_bls = {}
r_params_index = -1
for rp in r_params_dict:
#do not include data set in tuple key
already_in = False
for rpu in r_params_unique:
if r_params_unique[rpu] == r_params_dict[rp]:
r_params_unique_bls[rpu] += [rp,]
already_in = True
if not already_in:
r_params_index += 1
r_params_unique[r_params_index] = copy.copy(r_params_dict[rp])
r_params_unique_bls[r_params_index] = [rp,]
for rpi in r_params_unique:
r_params_unique[rpi]['baselines'] = r_params_unique_bls[rpi]
r_params_str = json.dumps(r_params_unique)
return r_params_str
| 5,348,440 |
def replace_ext(filename, oldext, newext):
"""Safely replaces a file extension new a new one"""
if filename.endswith(oldext):
return filename[:-len(oldext)] + newext
else:
raise Exception("file '%s' does not have extension '%s'" %
(filename, oldext))
| 5,348,441 |
def pp_table(operations, multiplies, n, num_stages):
"""Pretty prints a table describing the
calculations made during the pipeline."""
stage_titles = ["a"] + [f"Stage {i}" for i in range(num_stages)]
table = PrettyTable(stage_titles)
for row in range(n):
table_row = [f"{row}"]
for stage in range(num_stages):
lhs, op, mult_register = operations[stage][row]
_, rhs, phi_index = multiplies[stage][mult_register]
table_row.append(f"a[{lhs}] {op} a[{rhs}] * phis[{phi_index}]")
table.add_row(table_row)
table = table.get_string().split("\n")
table = [" ".join(("//", line)) for line in table]
print("\n".join(table))
| 5,348,442 |
def getAllArt():
"""
1/ verify if user is authenticated (login)
2/ if yes he can post a new article
if not he can only read article
"""
if request.method == "GET":
articles = actualArticle.getAll()
return articles
elif request.method == 'PUT':
if 'logged_in' in session:
response = actualArticle.crud(request, id)
articles = actualArticle.getAll()
return articles
else:
message="To add a new article you have to login"
return message
| 5,348,443 |
def test_dev_exception_logging(caplog: LogCaptureFixture) -> None:
"""Test that exceptions are properly logged in the development logger."""
configure_logging(name="myapp", profile="development", log_level="info")
logger = structlog.get_logger("myapp")
try:
raise ValueError("this is some exception")
except Exception:
logger.exception("exception happened", foo="bar")
assert caplog.record_tuples[0][0] == "myapp"
assert caplog.record_tuples[0][1] == logging.ERROR
assert "Traceback (most recent call last)" in caplog.record_tuples[0][2]
assert '"this is some exception"' in caplog.record_tuples[0][2]
| 5,348,444 |
def test_withStatementComplicatedTarget():
""" If the target of a statement uses any or all of the valid forms
for that part of the grammar
(See: http://docs.python.org/reference/compound_stmts.html#the-with-statement),
the names involved are checked both for definedness and any bindings
created are respected in the suite of the statement and afterwards.
"""
flakes('''
from __future__ import with_statement
c = d = e = g = h = i = None
with open('foo') as [(a, b), c[d], e.f, g[h:i]]:
a, b, c, d, e, g, h, i
a, b, c, d, e, g, h, i
''')
| 5,348,445 |
def update_ref(refname, newval, oldval):
"""Change the commit pointed to by a branch."""
if not oldval:
oldval = ''
assert(refname.startswith('refs/heads/'))
p = subprocess.Popen(['git', 'update-ref', refname,
newval.encode('hex'), oldval.encode('hex')],
preexec_fn = _gitenv)
_git_wait('git update-ref', p)
| 5,348,446 |
def normalize_citation(line: str) -> Union[Tuple[str, str], Tuple[None, str]]:
"""Normalize a citation string that might be a crazy URL from a publisher."""
warnings.warn("this function has been externalized to :func:`citation_url.parse`")
return citation_url.parse(line)
| 5,348,447 |
def cnn_predict_grid(data_in=None,
win_sizes=[((int(8), int(5)), 2, 1),((int(10), int(6)), 3, 2),((int(13), int(8)), 4, 3)],
problim = 0.95,
model_fpath=model_fpath,
scaler_fpath=scaler_fpath,
nc_fpath='D:/Master/data/cmems_data/global_10km/noland/phys_noland_2016_060.nc',
storedir=None):
""" Test the model using multiple sliding windows, there will be multiple returned predictions
data in: [lon,lat,x,y,ssl,uvel,vvel]
storedir: path to directory for storing image of predicted grid, if None, no image is stored"""
print("\n\n")
lon,lat,x,y,ssl,uvel,vvel = data_in
# Recreate the exact same model purely from the file
custom_objects = {
"f1_m": f1_m,
"precision_m": precision_m,
"recall_m": recall_m
}
clf = load_model(model_fpath, custom_objects=custom_objects)
scaler = joblib.load(scaler_fpath) # Import the std sklearn scaler model
nx, ny = ssl.shape
# Create canvas to show the cv2 rectangles around predictions
fig, ax = plt.subplots(figsize=(15, 12))
n=-1
color_array = np.sqrt(((uvel.T-n)/2)**2 + ((vvel.T-n)/2)**2)
# x and y needs to be equally spaced for streamplot
if not (same_dist_elems(x) or same_dist_elems(y)):
x, y = np.arange(len(x)), np.arange(len(y))
ax.contourf(x, y, ssl.T, cmap='rainbow', levels=150)
ax.streamplot(x, y, uvel.T, vvel.T, color=color_array, density=10)
#ax.quiver(x, y, uvel.T, vvel.T, scale=3)
fig.subplots_adjust(0,0,1,1)
fig.canvas.draw()
im = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
im = im.reshape(fig.canvas.get_width_height()[::-1] + (3,))
imCopy = cv2.cvtColor(im,cv2.COLOR_RGB2BGR)
imH, imW, _ = imCopy.shape # col, row
winScaleW, winScaleH = imW*1.0/nx, imH*1.0/ny # Scalar coeff from dataset to cv2 image
# Define what variables are used as channel, if only uvel and vvel it should be [1,2]
to_be_scaled = [1,2]
data = [ssl, uvel, vvel]
# Holds rectangle coordinates with dataset and image indexes
cyc_r, acyc_r = [], []
cyc_r_im, acyc_r_im = [], []
print("++ Performing sliding window and predicting using pre-trained CNN model")
# Loop over different window sizes, they will be resized down to correct dimensiona anyways
for wSize, wStep, hStep in win_sizes:
# loop over the sliding window of indeces
for rectIdx, (i, j, (xIdxs, yIdxs)) in enumerate(sliding_window(ssl, wStep, hStep, windowSize=wSize)):
if xIdxs[-1] >= nx or yIdxs[-1] >= ny:
continue
winW2, winH2 = winW*6, winH*6
winSize = (winH2, winW2)
masked = False # Continue if window hits land
data_window, data_scaled_window = [], []
for c in range(len(data)):
# Creates window, checks if masked, if not returns the window
a = check_window(data[c], xIdxs, yIdxs)
if a is None:
masked = True
break
# append window if not masked
data_window.append( a )
# Resize the original window to CNN input dim
data_window[c] = cv2.resize(data_window[c], dsize=(winSize), interpolation=cv2.INTER_CUBIC)
if c in to_be_scaled:
# Create a copy of window to be scaled
data_scaled_window.append(data_window[c].copy())
k = len(data_scaled_window) - 1
# Flatten array before applying scalar
data_scaled_window[k] = data_scaled_window[k].flatten()
# Scale the data
data_scaled_window[k] = scaler[k].transform([data_scaled_window[k]])[0]
# Reshape scaled data to original shape
data_scaled_window[k] = data_scaled_window[k].reshape(winW2, winH2)
# continue to next window if mask (land) is present
if masked: continue
# Transfrom input window to CNN input format
X_cnn = np.zeros((1,winW2,winH2,nChannels))
for lo in range(winW2): # Row
for la in range(winH2): # Column
for c in range(nChannels): # Channels
X_cnn[0,lo,la,c] = data_scaled_window[c][lo,la]
# Predict and receive probability
prob = clf.predict(X_cnn)
# This is the size of the current sliding window
nxWin, nyWin = len(xIdxs), len(yIdxs)
# y starts in top left for cv2, want it to be bottom left
xr, yr = int(winScaleW*(i)), int(winScaleH*(ny-j)) # rect coords
xrW, yrW= int(winScaleW*nxWin), int(winScaleH*nyWin) # rect width
# If either cyclone or acyclone are above probability limit, we have a prediction
if any(p >= problim for p in prob[0,1:]):
if prob[0,1] >= problim:
acyc_r.append([i, j, i + nxWin, j + nyWin])
acyc_r_im.append([xr, yr, xr + xrW, yr - xrW])
cv2.rectangle(imCopy, (xr, yr), (xr + xrW, yr - xrW), (217, 83, 25), 2)
#print('anti-cyclone | prob: {}'.format(prob[0,1]*100))
else:
cyc_r.append([i, j, i + nxWin, j + nyWin])
cyc_r_im.append([xr, yr, xr + xrW, yr - xrW])
cv2.rectangle(imCopy, (xr, yr), (xr + xrW, yr - xrW), (0, 76, 217), 2)
#print('cyclone | prob: {}'.format(prob[0,2]*100))
# We want to return both grouped and ungrouped predictions, in case user wants different grouping
# Predictions need at least 2 rectangles with 20% overlap to be a final prediciton
cyc_r_im_grouped, _ = cv2.groupRectangles(rectList=cyc_r_im, groupThreshold=1, eps=0.2)
acyc_r_im_grouped, _ = cv2.groupRectangles(rectList=acyc_r_im, groupThreshold=1, eps=0.2)
# if a store directory is defined, create and store an image of both grouped and ungrouped
# predicted grid at location
imgdir = 'C:/Users/47415/Master/images/compare/'
if isinstance(storedir, str):
if not os.path.isdir(imgdir + storedir):
os.makedirs(imgdir + storedir)
cv2.imwrite(imgdir + f'{storedir}/full_pred_grid.png', imCopy)
imCopy = cv2.cvtColor(im,cv2.COLOR_RGB2BGR)
draw_rectangles(imCopy, cyc_r_im_grouped, lon, lat, winScaleW, winScaleH, 'cyclone')
draw_rectangles(imCopy, acyc_r_im_grouped, lon, lat, winScaleW, winScaleH, 'anti-cyclone')
cv2.imwrite(imgdir + f'{storedir}/grouped_pred_grid.png', imCopy)
#cv2.imshow("Window", imCopy)
#cv2.waitKey(0)
plt.close(fig)
return cyc_r, acyc_r, cyc_r_im_grouped, acyc_r_im_grouped
| 5,348,448 |
def connect_syndicate( username=CONFIG.SYNDICATE_OPENCLOUD_USER, password=CONFIG.SYNDICATE_OPENCLOUD_PASSWORD, user_pkey_pem=CONFIG.SYNDICATE_OPENCLOUD_PKEY ):
"""
Connect to the OpenCloud Syndicate SMI, using the OpenCloud user credentials.
"""
debug = True
if hasattr(CONFIG, "DEBUG"):
debug = CONFIG.DEBUG
client = syntool.Client( username, CONFIG.SYNDICATE_SMI_URL,
password=password,
user_pkey_pem=user_pkey_pem,
debug=debug )
return client
| 5,348,449 |
def test_docker_run_implies_container_method():
"""
If a value is given for the ``--docker-run`` argument then the method is
*container*.
"""
args = telepresence.cli.parse_args([
"--docker-run", "foo:latest", "/bin/bash"
])
assert args.method == "container"
| 5,348,450 |
def join(var, wrapper, message):
"""Either starts a new game of Werewolf or joins an existing game that has not started yet."""
# keep this and the event in fjoin() in sync
evt = Event("join", {
"join_player": join_player,
"join_deadchat": join_deadchat,
"vote_gamemode": vote_gamemode
})
if not evt.dispatch(var, wrapper, message, forced=False):
return
if var.PHASE in ("none", "join"):
if wrapper.private:
return
if var.ACCOUNTS_ONLY:
if wrapper.source.account is None:
wrapper.pm(messages["not_logged_in"])
return
if evt.data["join_player"](var, wrapper) and message:
evt.data["vote_gamemode"](var, wrapper, message.lower().split()[0], doreply=False)
else: # join deadchat
if wrapper.private and wrapper.source is not wrapper.target:
evt.data["join_deadchat"](var, wrapper.source)
| 5,348,451 |
def get_all_comb_pairs(M, b_monitor=False):
"""returns all possible combination pairs from M repeated measurements (M choose 2)
Args:
M (int): number of measurements per
Returns:
indices1, incides2
"""
indices1 = np.zeros(int(M*(M-1)/2))
indices2 = np.zeros(int(M*(M-1)/2))
qq = 0
for q0 in range(M):
dt = q0+1
for q1 in range(M-q0-1):
indices1[qq] = q1
indices2[qq] = q1+dt
qq += 1
if b_monitor:
print("indices1:", indices1)
print("indices2:", indices2)
return (indices1, indices2)
| 5,348,452 |
def gomc_sim_completed_properly(job, control_filename_str):
"""General check to see if the gomc simulation was completed properly."""
with job:
job_run_properly_bool = False
output_log_file = "out_{}.dat".format(control_filename_str)
if job.isfile(output_log_file):
# with open(f"workspace/{job.id}/{output_log_file}", "r") as fp:
with open(f"{output_log_file}", "r") as fp:
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "Move" in line:
split_move_line = line.split()
if (
split_move_line[0] == "Move"
and split_move_line[1] == "Type"
and split_move_line[2] == "Mol."
and split_move_line[3] == "Kind"
):
job_run_properly_bool = True
else:
job_run_properly_bool = False
return job_run_properly_bool
| 5,348,453 |
def test_teams_new_get():
"""Can a user get /teams/new"""
app = create_ctfd(user_mode="teams")
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get("/teams/new")
assert r.status_code == 200
destroy_ctfd(app)
| 5,348,454 |
def test_border():
"""Test border."""
app = MakeApp(srcdir='tests/marktest', copy_srcdir_to_tmpdir=True,
confoverrides={'sphinxmark_border': 'left'})
app.builder.build_all()
assert app.config.sphinxmark_border == 'left'
html = Path(path.join(app.outdir, htmlfile)).read_text()
assert htmlresult in html
css = Path(path.join(app.outdir, cssfile)).read_text()
assert ('border-left') in css
| 5,348,455 |
def debounce(timeout, **kwargs):
"""Use:
@debounce(text=lambda t: t.id, ...)
def on_message(self, foo=..., bar=..., text=None, ...)"""
keys = sorted(kwargs.items())
def wrapper(f):
@functools.wraps(f)
def handler(self, *args, **kwargs):
# Construct a tuple of keys from the input args
key = tuple(fn(kwargs.get(k)) for k, fn in keys)
curr = set()
if hasattr(self, '__debounce_curr'):
curr = self.__debounce_curr
prev = set()
if hasattr(self, '__debounce_prev'):
prev = self.__debounce_prev
now = time.time()
tick = time.time()
if hasattr(self, '__debounce_tick'):
tick = self.__debounce_tick
# Check the current and previous sets, if present
if key in curr or key in prev:
return
# Rotate and update
if now > tick:
prev = curr
curr = set()
tick = now + timeout
curr.add(key)
self.__debounce_curr = curr
self.__debounce_prev = prev
self.__debounce_tick = tick
# Call the wrapped function
return f(self, *args, **kwargs)
return handler
return wrapper
| 5,348,456 |
def trainval(exp_dict, savedir, args):
"""
exp_dict: dictionary defining the hyperparameters of the experiment
savedir: the directory where the experiment will be saved
args: arguments passed through the command line
"""
# 2. Create data loader and model
train_loader = he.get_loader(
name=exp_dict["dataset"], split="train", datadir=os.path.dirname(savedir), exp_dict=exp_dict
)
model = he.get_model(name=exp_dict["model"], exp_dict=exp_dict)
# 3. load checkpoint
chk_dict = hw.get_checkpoint(savedir)
# 4. Add main loop
for epoch in tqdm.tqdm(range(chk_dict["epoch"], 10), desc="Running Experiment"):
# 5. train for one epoch
train_dict = model.train_on_loader(train_loader, epoch=epoch)
# 6. get and save metrics
score_dict = {"epoch": epoch, "acc": train_dict["train_acc"], "loss": train_dict["train_loss"]}
chk_dict["score_list"] += [score_dict]
images = model.vis_on_loader(train_loader)
hw.save_checkpoint(savedir, score_list=chk_dict["score_list"], images=[images])
print("Experiment done\n")
| 5,348,457 |
def test_disease_gene_example_dwwc(dwwc_method):
"""
Test the PC & DWWC computations in Figure 2D of Himmelstein & Baranzini
(2015) PLOS Comp Bio. https://doi.org/10.1371/journal.pcbi.1004259.g002
"""
graph = get_graph("disease-gene-example")
metagraph = graph.metagraph
# Compute GiGaD path count and DWWC matrices
metapath = metagraph.metapath_from_abbrev("GiGaD")
rows, cols, wc_matrix = dwwc(graph, metapath, damping=0, dwwc_method=dwwc_method)
rows, cols, dwwc_matrix = dwwc(
graph, metapath, damping=0.5, dwwc_method=dwwc_method
)
# Check row and column name assignment
assert rows == ["CXCR4", "IL2RA", "IRF1", "IRF8", "ITCH", "STAT3", "SUMO1"]
assert cols == ["Crohn's Disease", "Multiple Sclerosis"]
# Check concordance with https://doi.org/10.1371/journal.pcbi.1004259.g002
i = rows.index("IRF1")
j = cols.index("Multiple Sclerosis")
# Warning: the WC (walk count) and PC (path count) are only equivalent
# because none of the GiGaD paths contain duplicate nodes. Since, GiGaD
# contains duplicate metanodes, WC and PC are not guaranteed to be the
# same. However, they happen to be equivalent for this example.
assert wc_matrix[i, j] == 3
assert dwwc_matrix[i, j] == pytest.approx(0.25 + 0.25 + 32**-0.5)
| 5,348,458 |
def delete_local_group(self):
"""
Delete local group
:param self: MainController object
:return: None
"""
self.log('Delete local group by clicking on "DELETE GROUP" button...')
self.wait_until_visible(type=By.ID, element=popups.LOCAL_GROUP_DELETE_GROUP_BTN_ID).click()
self.log('... and clicking on "CONFIRM" button')
self.wait_until_visible(type=By.XPATH, element=popups.LOCAL_GROUP_DELETE_GROUP_CONFIRM_BTN_XPATH).click()
| 5,348,459 |
def isolated_add_event(event, quiet=True):
"""
Add an event object, but in its own transaction, not bound to an existing transaction scope
Returns a dict object of the event as was added to the system
:param event: event object
:param quiet: boolean indicating if false then exceptions on event add should be swallowed to prevent blocking the caller. If false, exceptions are raised
:return:
"""
with session_scope() as session:
return add_event_json(event.to_dict(), session, quiet)
| 5,348,460 |
def get_publishers():
""" Fetch and return all registered publishers."""
url = current_app.config['DATABASE']
with psycopg2.connect(url) as conn:
with conn.cursor() as cur:
cur.execute("SELECT * FROM userrole WHERE is_publisher = %s ORDER BY reg_date DESC;", ('true',))
res = cur.fetchall()
return res
| 5,348,461 |
def acs_map():
"""call after curses.initscr"""
# can this mapping be obtained from curses?
return {
ord(b'l'): curses.ACS_ULCORNER,
ord(b'm'): curses.ACS_LLCORNER,
ord(b'k'): curses.ACS_URCORNER,
ord(b'j'): curses.ACS_LRCORNER,
ord(b't'): curses.ACS_LTEE,
ord(b'u'): curses.ACS_RTEE,
ord(b'v'): curses.ACS_BTEE,
ord(b'w'): curses.ACS_TTEE,
ord(b'q'): curses.ACS_HLINE,
ord(b'x'): curses.ACS_VLINE,
ord(b'n'): curses.ACS_PLUS,
ord(b'o'): curses.ACS_S1,
ord(b's'): curses.ACS_S9,
ord(b'`'): curses.ACS_DIAMOND,
ord(b'a'): curses.ACS_CKBOARD,
ord(b'f'): curses.ACS_DEGREE,
ord(b'g'): curses.ACS_PLMINUS,
ord(b'~'): curses.ACS_BULLET,
ord(b','): curses.ACS_LARROW,
ord(b'+'): curses.ACS_RARROW,
ord(b'.'): curses.ACS_DARROW,
ord(b'-'): curses.ACS_UARROW,
ord(b'h'): curses.ACS_BOARD,
ord(b'i'): curses.ACS_LANTERN,
ord(b'p'): curses.ACS_S3,
ord(b'r'): curses.ACS_S7,
ord(b'y'): curses.ACS_LEQUAL,
ord(b'z'): curses.ACS_GEQUAL,
ord(b'{'): curses.ACS_PI,
ord(b'|'): curses.ACS_NEQUAL,
ord(b'}'): curses.ACS_STERLING,
}
| 5,348,462 |
def get_polymorphic_ancestors_models(ChildModel):
"""
ENG: Inheritance chain that inherited from the PolymorphicModel include self model.
RUS: Наследуется от PolymorphicModel, включая self.
"""
ancestors = []
for Model in ChildModel.mro():
if isinstance(Model, PolymorphicModelBase):
if not Model._meta.abstract:
ancestors.append(Model)
return reversed(ancestors)
| 5,348,463 |
def plot_load_vs_fractional_freq_shift(all_data,ax=None):
"""
Plot fractional frequency shift as a function of load temperature for all resonators
"""
if ax is None:
fig,ax = plt.subplots(figsize=(8,8))
for name, group in all_data.groupby('resonator_index'):
ax.plot(group.sweep_primary_load_temperature,group.fractional_delta_f_0,'.')
ax.grid()
ax.set_ylim(-2e-4,1e-5)
ax.set_ylabel('Fractional Frequency Shift')
ax.set_xlabel('Load Temperature (K)')
return fig
| 5,348,464 |
def cli(env):
"""Displays bandwidth pool information
Similiar to https://cloud.ibm.com/classic/network/bandwidth/vdr
"""
manager = AccountManager(env.client)
items = manager.get_bandwidth_pools()
table = formatting.Table([
"Id",
"Pool Name",
"Region",
"Servers",
"Allocation",
"Current Usage",
"Projected Usage"
], title="Bandwidth Pools")
table.align = 'l'
for item in items:
id_bandwidth = item.get('id')
name = item.get('name')
region = utils.lookup(item, 'locationGroup', 'name')
servers = manager.get_bandwidth_pool_counts(identifier=item.get('id'))
allocation = "{} GB".format(item.get('totalBandwidthAllocated', 0))
current = "{} GB".format(utils.lookup(item, 'billingCyclePublicBandwidthUsage', 'amountOut'))
projected = "{} GB".format(item.get('projectedPublicBandwidthUsage', 0))
table.add_row([id_bandwidth, name, region, servers, allocation, current, projected])
env.fout(table)
| 5,348,465 |
def encode_dist_anchor_free_np(gt_ctr, gt_offset, anchor_ctr, anchor_offset=None):
"""
3DSSD anchor-free encoder
:param:
gt_ctr: [bs, points_num, 3]
gt_offset: [bs, points_num, 3]
anchor_ctr: [bs, points_num, 3]
anchor_offset: [bs, points_num, 3]
:return:
encoded_ctr: [bs, points_num, 3]
encoded_offset: [bs, points_num, 3]
"""
target_ctr_half = gt_offset / 2.
# translate to center
padding_half_height = target_ctr_half[:, :, 1]
padding_zeros = np.zeros_like(padding_half_height)
padding_translate = np.stack([padding_zeros, padding_half_height, padding_zeros], axis=-1) # [bs, points_num, 3]
encoded_ctr = gt_ctr - padding_translate # to object center
encoded_ctr = encoded_ctr - anchor_ctr
return encoded_ctr, target_ctr_half
| 5,348,466 |
def BCrand(h, hu, t, side, mean_h, amplitude, period, phase):
""" Conditions aux limites du modele direct, avec plus de paramètres"""
if side == 'L':
h[0] = mean_h + amplitude * np.sin((t * (2 * np.pi) / period) + phase)
hu[0] = 0.0
elif side == 'R':
h[-1] = h[-2]
hu[-1] = hu[-2] * 0.0
return [h] + [hu]
| 5,348,467 |
def dumpproc(stdout, stderr=None):
"""
print stdout/stderr of a process
"""
if stdout is not None and len(stdout) > 0:
print(" ", "=" * 20, "BEGIN STDOUT", "=" * 20)
for line in stdout.decode().splitlines():
print(" ", f"{Style.DIM}{line}{Style.RESET_ALL}")
print(" ", "=" * 20, "END STDOUT", "=" * 20)
if stderr is not None and len(stderr) > 0:
print(" ", "=" * 20, "BEGIN STDERR", "=" * 20)
for line in stderr.decode().splitlines():
print(" ", f"{Fore.RED}{line}{Style.RESET_ALL}")
print(" ", "=" * 20, "END STDERR", "=" * 20)
| 5,348,468 |
def pyplot(
figure=None,
scale: float = 0.8,
clear: bool = True,
aspect_ratio: typing.Union[list, tuple] = None
) -> str:
"""
:param figure:
:param scale:
:param clear:
:param aspect_ratio:
:return:
"""
environ.abort_thread()
from bs4 import BeautifulSoup
try:
from matplotlib import pyplot as mpl_pyplot
except Exception:
mpl_pyplot = None
if not figure:
figure = mpl_pyplot.gcf()
if aspect_ratio:
figure.set_size_inches(
aspect_ratio[0],
aspect_ratio[1]
)
else:
figure.set_size_inches(12, 8)
buffer = io.StringIO()
figure.savefig(
buffer,
format='svg',
dpi=300
)
buffer.seek(0)
svg_data = buffer.read()
if clear:
figure.clear()
soup = BeautifulSoup(svg_data, 'html.parser')
svg_tag = soup.find_all('svg')[0]
svg_tag['width'] = '100%'
svg_tag['height'] = '100%'
classes = svg_tag.get('class', '').strip().split(' ')
classes.append('cd-pylab-svg')
svg_tag['class'] = '\n'.join(classes)
styles = [
s for s in svg_tag.get('style', '').split(';')
if len(s.strip()) > 1
]
styles.append('max-height:{}vh;'.format(int(100.0 * scale)))
svg_tag['style'] = ';'.join(styles)
return '<div class="cd-pylab-plot">{}</div>'.format(soup.prettify())
| 5,348,469 |
def simplify(graph):
""" helper that simplifies the xy to mere node ids."""
d = {}
cnt = itertools.count(1)
c2 = []
for s, e, dst in graph.edges():
if s not in d:
d[s] = next(cnt)
if e not in d:
d[e] = next(cnt)
c2.append((d[s], d[e], dst))
g = Graph(from_list=c2)
return g
| 5,348,470 |
def delete(name: str) -> None:
"""
Delete a character.
:param name: the character's name
"""
con = kingdomsouls.database.connect()
with con:
try:
con.execute(
"""
DELETE FROM characters
WHERE name = ?
""",
(name,),
)
except sqlite3.Error as e:
click.echo(f"Database error: {e}")
except Exception as e:
click.echo(f"Exception in _query: {e}")
| 5,348,471 |
def _get_class_for(type):
"""Returns a :type:`class` corresponding to :param:`type`.
Used for getting a class from object type in JSON response. Usually, to
instantiate the Python object from response, this function is called in
the form of ``_get_class_for(data['object']).from_data(data)``.
:type type: str
:rtype: class
"""
return {
'account': Account,
'balance': Balance,
'bank_account': BankAccount,
'capability': Capability,
'card': Card,
'chain': Chain,
'charge': Charge,
'customer': Customer,
'dispute': Dispute,
'document': Document,
'event': Event,
'forex': Forex,
'link': Link,
'list': Collection,
'occurrence': Occurrence,
'receipt': Receipt,
'recipient': Recipient,
'refund': Refund,
'schedule': Schedule,
'search': Search,
'source': Source,
'token': Token,
'transfer': Transfer,
'transaction': Transaction,
}.get(type)
| 5,348,472 |
def _query_jupyterhub_api(method, api_path, post_data=None):
"""Query Jupyterhub api
Detects Jupyterhub environment variables and makes a call to the Hub API
Parameters
----------
method : string
HTTP method, e.g. GET or POST
api_path : string
relative path, for example /users/
post_data : dict
JSON arguments for the API call
Returns
-------
response : dict
JSON response converted to dictionary
"""
hub_api_url = get_jupyterhub_api_url()
user = get_jupyterhub_user()
auth_header = get_jupyterhub_authorization()
api_path = api_path.format(authenticated_user=user)
req = requests.request(
url=hub_api_url + api_path,
method=method,
headers=auth_header,
json=post_data,
)
if not req.ok:
raise JupyterhubApiError("JupyterhubAPI returned a status code of: " + str(req.status_code) + " for api_path: " + api_path)
return req.json()
| 5,348,473 |
def PoolingOutputShape(input_shape, pool_size=(2, 2),
strides=None, padding='VALID'):
"""Helper: compute the output shape for the pooling layer."""
dims = (1,) + pool_size + (1,) # NHWC
spatial_strides = strides or (1,) * len(pool_size)
strides = (1,) + spatial_strides + (1,)
pads = convolution.PadtypeToPads(input_shape, dims, strides, padding)
operand_padded = onp.add(input_shape, onp.add(*zip(*pads)))
t = onp.floor_divide(onp.subtract(operand_padded, dims), strides) + 1
return tuple(t)
| 5,348,474 |
def libre_office(odt_list, pdf_dir):
"""Convert a list of odt-files to pdf-files.
The input files are provided as a list of absolute paths,
<pdf_dir> is the absolute path to the output folder.
"""
# Use LibreOffice to convert the odt-files to pdf-files.
# If using the appimage, the paths MUST be absolute, so I use absolute
# paths "on principle".
# I don't know whether the program blocks until the conversion is complete
# (some versions don't), so it might be good to check that all the
# expected files have been generated (with a timeout in case something
# goes wrong?).
# The old problem that libreoffice wouldn't work headless if another
# instance (e.g. desktop) was running seems to be no longer the case,
# at least on linux.
def extern_out(line):
REPORT('OUT', line)
rc, msg = run_extern(LIBREOFFICE, '--headless',
'--convert-to', 'pdf',
'--outdir', pdf_dir,
*odt_list,
feedback = extern_out
)
| 5,348,475 |
def parse_config_file(config_file_path: Path) -> List[TabEntry]:
""" Parse the json config file, validate and convert to object structure """
app_config = None
Logger().info(f"Loading file '{config_file_path}'...")
if not config_file_path.is_file():
Logger().error(f"Config file '{config_file_path}' does not exist.")
return []
with open(str(config_file_path)) as fp:
try:
app_config = json.load(fp)
with open(this.base_path / "assets" / "config_schema.json") as schema_file:
json_schema = json.load(schema_file)
jsonschema.validate(instance=app_config, schema=json_schema)
except BaseException as error:
Logger().error(f"Config file:\n{str(error)}")
return []
# build the object model and update
tabs = []
for tab in app_config.get("tabs"):
tab_entry = TabEntry(tab.get("name"))
for app in tab.get("apps"):
# TODO: not very robust, but enough for small changes
update_app_info(app)
app_entry = AppEntry(app, config_file_path)
tab_entry.add_app_entry(app_entry)
tabs.append(tab_entry)
# auto Update version to next version:
app_config["version"] = json_schema.get("properties").get("version").get("enum")[-1]
# write it back with updates
with open(str(config_file_path), "w") as config_file:
json.dump(app_config, config_file, indent=4)
return tabs
| 5,348,476 |
def create_app():
""" 工厂函数 """
app = Flask(__name__)
register_blueprint(app)
# register_plugin(app)
register_filter(app)
register_logger()
return app
| 5,348,477 |
async def nasapod(ctx):
"""gives NASA's image of the day"""
await ctx.send(astrof())
| 5,348,478 |
def passwordbox(**kwargs):
"""
This wrapper is for making a dialog for changing your password.
It will return the old password, the new password, and a confirmation.
The remaining keywords are passed on to the autobox class.
"""
additional_fields = kwargs.get("additional_fields") and kwargs.pop("additional_fields") or []
title = kwargs.get("title_string", "Change your password")
header = kwargs.get("header_string") and kwargs.pop("header_string") or "Change your password"
default_fields = [
{"type" : "label", "label" : "First type your old password"},
{"name" : "old_password", "type" : "hidden_text", "label" : "Old Password: "},
{"type" : "label", "label": "Now enter your new password twice"},
{"name" : "new_password", "type" : "hidden_text", "label" : "New Password: "},
{"name" : "confirm_password", "type" : "hidden_text", "label" : "Confirm Password: "}
]
fields = default_fields + additional_fields
return autobox(fields = fields, title_string = title, header_string = header, **kwargs)
| 5,348,479 |
def unravel_hpx_index(idx, npix):
"""Convert flattened global map index to an index tuple.
Parameters
----------
idx : `~numpy.ndarray`
Flat index.
npix : `~numpy.ndarray`
Number of pixels in each band.
Returns
-------
idx : tuple of `~numpy.ndarray`
Index array for each dimension of the map.
"""
if npix.size == 1:
return tuple([idx])
dpix = np.zeros(npix.size, dtype="i")
dpix[1:] = np.cumsum(npix.flat[:-1])
bidx = np.searchsorted(np.cumsum(npix.flat), idx + 1)
pix = idx - dpix[bidx]
return tuple([pix] + list(np.unravel_index(bidx, npix.shape)))
| 5,348,480 |
def install_apt_pkg():
"""
Install my own usefull package, won't explain more about why :P
"""
apt_manager = AptManager()
apt_manager.commit()
all_pkg = ['nano', 'htop', 'tmux', 'vim', 'cmake',
'libncurses5-dev', 'libncursesw5-dev', 'git',
'tree', 'zip', 'expect', 'pigz', 'pv']
apt_manager.install_pkg(*all_pkg)
apt_manager.commit()
| 5,348,481 |
def map2sqldb(map_path, column_names, sep='\t'):
"""Determine the mean and 2std of the length distribution of a group
"""
table_name = os.path.basename(map_path).rsplit('.', 1)[0]
sqldb_name = table_name + '.sqlite3db'
sqldb_path = os.path.join(os.path.dirname(map_path), sqldb_name)
conn = sqlite3.connect(sqldb_path) # @UndefinedVariable
c = conn.cursor()
# If table already exist, return the connector and the table_name
SQL = '''
SELECT count(*) FROM sqlite_master WHERE name == \"{}\"
'''.format(table_name)
c.execute(SQL)
exists_flag = False
if c.fetchone()[0] == 1:
c.fetchall() #get rid of the remainder
exists_flag=True
if exists_flag:
return c, table_name
# Create table
SQL = '''
create table if not exists {0} ({1});
'''.format(table_name, '\"' + '\" text,\"'.join([str(n).lower() for n in column_names]) + '\" text')
c.execute(SQL)
c.close()
# Fill table
SQL = '''
insert into {0} values ({1})
'''.format(table_name, ' ,'.join(['?']*len(column_names)))
with open(map_path, 'r') as map_file:
csv.field_size_limit(2147483647)
csv_reader = csv.reader(map_file, delimiter=sep, quoting=csv.QUOTE_NONE)
with sqlite3.connect(sqldb_path) as conn: # @UndefinedVariable
c = conn.cursor()
c.executemany(SQL, csv_reader)
return c, table_name
| 5,348,482 |
def parse_revdep(value):
"""Value should be an atom, packages with deps intersecting that match."""
try:
targetatom = atom.atom(value)
except atom.MalformedAtom as e:
raise argparser.error(e)
val_restrict = values.FlatteningRestriction(
atom.atom,
values.AnyMatch(values.FunctionRestriction(targetatom.intersects)))
return packages.OrRestriction(*list(
packages.PackageRestriction(dep, val_restrict)
for dep in ('bdepend', 'depend', 'rdepend', 'pdepend')))
| 5,348,483 |
def is_context_word(model, word_a, word_b):
"""Calculates probability that both words appear in context with each
other by executing forward pass of model.
Args:
model (Mode): keras model
word_a (int): index of first word
word_b (int): index of second word
"""
# define inputs
input_a = np.zeros((1,))
input_b = np.zeros((1,))
input_a[0,] = word_a
input_b[0,] = word_b
# compute forward pass of model
prediction = model.predict_on_batch([input_a, input_b])
# retrieve value from tf tensor
prediction = prediction.numpy()[0][0]
return prediction
| 5,348,484 |
def map_and_save_gene_ids(hit_genes_location, all_detectable_genes_location=''):
"""
Maps gene names/identifiers into internal database identifiers (neo4j ids) and saves them
:param hit_genes_location: genes in the set we would like to analyse
:param all_detectable_genes_location: genes in the set that can be detected (background)
:return: list of internal db ids for hits, list of internal db ids for background
"""
standardized_hits = [] # [primary_set]
standardized_secondary_hits = [] # [secondary_set=None]
if type(hit_genes_location) == str or isinstance(hit_genes_location, pathlib.PurePath):
# log.info('codepath 1')
standardized_hits = [cast_external_refs_to_internal_ids(hit_genes_location)]
standardized_secondary_hits = [None]
if type(hit_genes_location) == tuple:
# log.info('codepath 2')
standardized_hits = [cast_external_refs_to_internal_ids(hit_genes_location[0])]
standardized_secondary_hits = [cast_external_refs_to_internal_ids(hit_genes_location[1])]
if type(hit_genes_location) == list:
# log.info('codepath 3')
for sub_hit_genes_location in hit_genes_location:
# log.info('codepath 3.0')
if type(sub_hit_genes_location) == str or isinstance(sub_hit_genes_location, pathlib.PurePath):
# log.info('codepath 3.1')
standardized_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location)]
standardized_secondary_hits += [None]
if type(sub_hit_genes_location) == tuple:
# log.info('codepath 3.2')
standardized_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location[0])]
standardized_secondary_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location[1])]
log.debug('standardized primary hits:\n\t%s' % standardized_hits)
log.debug('standardized secondary_hits:\n\t%s' % standardized_secondary_hits)
dump_object(Dumps.analysis_set_bulbs_ids, (standardized_hits, standardized_secondary_hits))
if all_detectable_genes_location:
background_set = cast_external_refs_to_internal_ids(all_detectable_genes_location)
# print(background_set)
primary_set = [y for x in standardized_hits for y in x] # flattens the mapped ids list
# print(primary_set)
formatted_secondary_hits = [_l
if _l is not None
else []
for _l in standardized_secondary_hits]
sec_set = [y for x in formatted_secondary_hits for y in x]
re_primary_set = set()
for _id in primary_set:
if type(_id) == str or type(_id) == int:
re_primary_set.add(_id)
else:
re_primary_set.add(_id[0])
primary_set = re_primary_set
re_secondary_set = set()
for _id in sec_set:
if type(_id) == str or type(_id) == int:
re_secondary_set.add(_id)
else:
re_secondary_set.add(_id[0])
sec_set = re_primary_set
if type(background_set[0]) == str or type(background_set[0]) == int: # unweighted
background_set = list(set(background_set).union(primary_set).union(sec_set))
else:
bck_set = {_id[0] for _id in background_set}
bck_set = list(bck_set)
if not primary_set.issubset(bck_set):
log.info('Nodes ids %s are missing in background set and are added with weight 0' %
(primary_set - bck_set))
background_set += [(_id, 0) for _id in (primary_set - bck_set)]
if not sec_set.issubset(bck_set):
log.info('Secondary set nodes ids %s are missing in background set and are added '
'with weight 0' % (sec_set - bck_set))
background_set += [(_id, 0) for _id in (sec_set - bck_set)]
else:
background_set = []
dump_object(Dumps.background_set_bulbs_ids, background_set)
return standardized_hits, standardized_secondary_hits, background_set
| 5,348,485 |
def next_method():
"""next, for: Get one item of an iterators."""
class _Iterator:
def __init__(self):
self._stop = False
def __next__(self):
if self._stop:
raise StopIteration()
self._stop = True
return "drums"
return next(_Iterator())
| 5,348,486 |
def get_ingress_deployment(
serve_dag_root_node: DAGNode, pipeline_input_node: PipelineInputNode
) -> Deployment:
"""Return an Ingress deployment to handle user HTTP inputs.
Args:
serve_dag_root_node (DAGNode): Transformed as serve DAG's root. User
inputs are translated to serve_dag_root_node.execute().
pipeline_input_node (DAGNode): Singleton PipelineInputNode instance that
contains input preprocessor info.
Returns:
ingress (Deployment): Generated pipeline ingress deployment to serve
user HTTP requests.
"""
serve_dag_root_json = json.dumps(serve_dag_root_node, cls=DAGNodeEncoder)
preprocessor_import_path = pipeline_input_node.get_preprocessor_import_path()
serve_dag_root_deployment = serve.deployment(Ingress).options(
name=DEFAULT_INGRESS_DEPLOYMENT_NAME,
init_args=(
serve_dag_root_json,
preprocessor_import_path,
),
)
return serve_dag_root_deployment
| 5,348,487 |
def get_project_settings(project):
"""Gets project's settings.
Return value example: [{ "attribute" : "Brightness", "value" : 10, ...},...]
:param project: project name or metadata
:type project: str or dict
:return: project settings
:rtype: list of dicts
"""
if not isinstance(project, dict):
project = get_project_metadata_bare(project)
team_id, project_id = project["team_id"], project["id"]
params = {
"team_id": team_id,
}
response = _api.send_request(
req_type='GET', path=f'/project/{project_id}/settings', params=params
)
if not response.ok:
raise SABaseException(
response.status_code,
"Couldn't get project settings " + response.text
)
res = response.json()
for val in res:
if val['attribute'] == 'ImageQuality':
if val['value'] == 60:
val['value'] = 'compressed'
elif val['value'] == 100:
val['value'] = 'original'
else:
raise SABaseException(0, "NA ImageQuality value")
return res
| 5,348,488 |
def guard(M, test):
"""Monadic guard.
What it does::
return M.pure(Unit) if test else M.empty()
https://en.wikibooks.org/wiki/Haskell/Alternative_and_MonadPlus#guard
"""
return M.pure(Unit) if test else M.empty()
| 5,348,489 |
def get_git_hash() -> str:
"""Get the git hash."""
rv = _run("git", "rev-parse", "HEAD")
if rv is None:
return "UNHASHED"
return rv
| 5,348,490 |
def primary_style():
""" a blue green style """
return color_mapping(
'bg:#449adf #ffffff',
'bg:#002685 #ffffff',
'#cd1e10',
'#007e3a',
'#fe79d1',
'#4cde77',
'#763931',
'#64d13e',
'#7e77d2',
'bg:#000000 #ffffff',
)
| 5,348,491 |
def test_add_index_with_string_list():
"""test index add with list of string(64)."""
header = ShardHeader()
schema_json = {"id": {"type": "number"}, "name": {"type": "string"},
"label": {"type": "string"}, "key": {"type": "string"}}
schema = header.build_schema(schema_json, ["key"], "schema_desc")
header.add_schema(schema)
ret = header.add_index_fields(["id", "label"])
assert ret == SUCCESS
| 5,348,492 |
def decrypt_files(rsa_key):
"""
Decrypt all encrypted files on host machine
`Required`
:param str rsa_key: RSA private key in PEM format
"""
try:
if not isinstance(rsa_key, Crypto.PublicKey.RSA.RsaKey):
rsa_key = Crypto.PublicKey.RSA.importKey(rsa_key)
if not rsa_key.has_private():
return "Error: RSA key cannot decrypt"
globals()['threads']['iter_files'] = _iter_files(rsa_key)
globals()['threads']['decrypt_files'] = _threader()
return "Decrypting files"
except Exception as e:
util.log("{} error: {}".format(decrypt_files.__name__, str(e)))
| 5,348,493 |
def _bivariate_uc_uc(
lhs,rhs,
z,
dz_dl, # (dz_re_dl_re, dz_re_dl_im, dz_im_dl_re, dz_im_dl_im)
dz_dr # (dz_re_dr_re, dz_re_dr_im, dz_im_dr_re, dz_im_dr_im)
):
"""
Create an uncertain complex number as a bivariate function
This is a utility method for implementing mathematical
functions of uncertain complex numbers.
The parameters 'lhs' and 'rhs' are the UncertainComplex
arguments to the function, 'z' is the complex value of the
function and 'dz_dl' and 'dz_dr' are the Jacobian matrices
of the function value z with respect to the real and imaginary
components of the function's left and right arguments.
Parameters
----------
lhs, rhs : :class:`UncertainComplex`
z : complex
dz_dl, dz_dr : 4-element sequence of float
Returns
-------
:class:`UncertainComplex`
"""
lhs_r = lhs.real
lhs_i = lhs.imag
rhs_r = rhs.real
rhs_i = rhs.imag
u_lhs_real, u_lhs_imag = vector.merge_weighted_vectors_twice(
lhs_r._u_components,(dz_dl[0],dz_dl[2]),
lhs_i._u_components,(dz_dl[1],dz_dl[3])
)
u_rhs_real, u_rhs_imag = vector.merge_weighted_vectors_twice(
rhs_r._u_components,(dz_dr[0],dz_dr[2]),
rhs_i._u_components,(dz_dr[1],dz_dr[3])
)
d_lhs_real, d_lhs_imag = vector.merge_weighted_vectors_twice(
lhs_r._d_components,(dz_dl[0],dz_dl[2]),
lhs_i._d_components,(dz_dl[1],dz_dl[3])
)
d_rhs_real, d_rhs_imag = vector.merge_weighted_vectors_twice(
rhs_r._d_components,(dz_dr[0],dz_dr[2]),
rhs_i._d_components,(dz_dr[1],dz_dr[3])
)
i_lhs_real, i_lhs_imag = vector.merge_weighted_vectors_twice(
lhs_r._i_components,(dz_dl[0],dz_dl[2]),
lhs_i._i_components,(dz_dl[1],dz_dl[3])
)
i_rhs_real, i_rhs_imag = vector.merge_weighted_vectors_twice(
rhs_r._i_components,(dz_dr[0],dz_dr[2]),
rhs_i._i_components,(dz_dr[1],dz_dr[3])
)
return UncertainComplex(
UncertainReal(
z.real,
vector.merge_vectors(
u_lhs_real, u_rhs_real
),
vector.merge_vectors(
d_lhs_real, d_rhs_real
),
vector.merge_vectors(
i_lhs_real, i_rhs_real
)
),
UncertainReal(
z.imag,
vector.merge_vectors(
u_lhs_imag,u_rhs_imag
),
vector.merge_vectors(
d_lhs_imag,d_rhs_imag
),
vector.merge_vectors(
i_lhs_imag, i_rhs_imag
)
)
)
| 5,348,494 |
def smoothing_filter(time_in, val_in, time_out=None, relabel=None, params=None):
"""
@brief Smoothing filter with relabeling and resampling features.
@details It supports evenly sampled multidimensional input signal.
Relabeling can be used to infer the value of samples at
time steps before and after the explicitly provided samples.
As a reminder, relabeling is a generalization of periodicity.
@param[in] time_in Time steps of the input signal (1D numpy array)
@param[in] val_in Sampled values of the input signal
(2D numpy array: row = sample, column = time)
@param[in] time_out Time steps of the output signal (1D numpy array)
@param[in] relabel Relabeling matrix (identity for periodic signals)
Optional: Disable if omitted
@param[in] params Parameters of the filter. Dictionary with keys:
'mixing_ratio_1': Relative time at the begining of the signal
during the output signal corresponds to a
linear mixing over time of the filtered and
original signal. (only used if relabel is omitted)
'mixing_ratio_2': Relative time at the end of the signal
during the output signal corresponds to a
linear mixing over time of the filtered and
original signal. (only used if relabel is omitted)
'smoothness'[0]: Smoothing factor to filter the begining of the signal
(only used if relabel is omitted)
'smoothness'[1]: Smoothing factor to filter the end of the signal
(only used if relabel is omitted)
'smoothness'[2]: Smoothing factor to filter the middle part of the signal
@return Filtered signal (2D numpy array: row = sample, column = time)
"""
if time_out is None:
time_out = time_in
if params is None:
params = dict()
params['mixing_ratio_1'] = 0.12
params['mixing_ratio_2'] = 0.04
params['smoothness'] = [0.0,0.0,0.0]
params['smoothness'][0] = 5e-3
params['smoothness'][1] = 5e-3
params['smoothness'][2] = 3e-3
if relabel is None:
mix_fit = [None,None,None]
mix_fit[0] = lambda t: 0.5*(1+np.sin(1/params['mixing_ratio_1']*((t-time_in[0])/(time_in[-1]-time_in[0]))*np.pi-np.pi/2))
mix_fit[1] = lambda t: 0.5*(1+np.sin(1/params['mixing_ratio_2']*((t-(1-params['mixing_ratio_2'])*time_in[-1])/(time_in[-1]-time_in[0]))*np.pi+np.pi/2))
mix_fit[2] = lambda t: 1
val_fit = []
for jj in range(val_in.shape[0]):
val_fit_jj = []
for kk in range(len(params['smoothness'])):
val_fit_jj.append(UnivariateSpline(time_in, val_in[jj], s=params['smoothness'][kk]))
val_fit.append(val_fit_jj)
time_out_mixing = [None, None, None]
time_out_mixing_ind = [None, None, None]
time_out_mixing_ind[0] = time_out < time_out[-1]*params['mixing_ratio_1']
time_out_mixing[0] = time_out[time_out_mixing_ind[0]]
time_out_mixing_ind[1] = time_out > time_out[-1]*(1-params['mixing_ratio_2'])
time_out_mixing[1] = time_out[time_out_mixing_ind[1]]
time_out_mixing_ind[2] = np.logical_and(np.logical_not(time_out_mixing_ind[0]), np.logical_not(time_out_mixing_ind[1]))
time_out_mixing[2] = time_out[time_out_mixing_ind[2]]
val_out = np.zeros((val_in.shape[0],len(time_out)))
for jj in range(val_in.shape[0]):
for kk in range(len(time_out_mixing)):
val_out[jj,time_out_mixing_ind[kk]] = \
(1 - mix_fit[kk](time_out_mixing[kk])) * val_fit[jj][kk](time_out_mixing[kk]) + \
mix_fit[kk](time_out_mixing[kk]) * val_fit[jj][-1](time_out_mixing[kk])
else:
time_tmp = np.concatenate([time_in[:-1]-time_in[-1],time_in,time_in[1:]+time_in[-1]])
val_in_tmp = np.concatenate([relabel.dot(val_in[:,:-1]),val_in,relabel.dot(val_in[:,1:])], axis=1)
val_out = np.zeros((val_in.shape[0],len(time_out)))
for jj in range(val_in_tmp.shape[0]):
f = UnivariateSpline(time_tmp, val_in_tmp[jj], s=params['smoothness'][-1])
val_out[jj] = f(time_out)
return val_out
| 5,348,495 |
async def test_rgb_light_custom_effect_via_service(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture
) -> None:
"""Test an rgb light with a custom effect set via the service."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: IP_ADDRESS, CONF_NAME: DEFAULT_ENTRY_TITLE},
unique_id=MAC_ADDRESS,
)
config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
with _patch_discovery(device=bulb), _patch_wifibulb(device=bulb):
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
entity_id = "light.az120444_aabbccddeeff"
state = hass.states.get(entity_id)
assert state.state == STATE_ON
attributes = state.attributes
assert attributes[ATTR_BRIGHTNESS] == 128
assert attributes[ATTR_COLOR_MODE] == "rgbw"
assert attributes[ATTR_EFFECT_LIST] == [*FLUX_EFFECT_LIST]
assert attributes[ATTR_SUPPORTED_COLOR_MODES] == ["color_temp", "hs", "rgbw"]
assert attributes[ATTR_HS_COLOR] == (0, 100)
await hass.services.async_call(
LIGHT_DOMAIN, "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
bulb.turnOff.assert_called_once()
bulb.is_on = False
async_fire_time_changed(hass, utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_OFF
await hass.services.async_call(
DOMAIN,
"set_custom_effect",
{
ATTR_ENTITY_ID: entity_id,
CONF_COLORS: [[0, 0, 255], [255, 0, 0]],
CONF_SPEED_PCT: 30,
CONF_TRANSITION: "jump",
},
blocking=True,
)
bulb.setCustomPattern.assert_called_with([(0, 0, 255), (255, 0, 0)], 30, "jump")
bulb.setCustomPattern.reset_mock()
| 5,348,496 |
def get_selector_qty(*args):
"""get_selector_qty() -> int"""
return _idaapi.get_selector_qty(*args)
| 5,348,497 |
def get(
host: str,
path: str,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
stream: bool = False,
) -> requests.Response:
"""
Send a GET request to the remote API.
"""
return do_request(
"GET",
host,
path,
params=params,
headers=headers,
authenticated=authenticated,
stream=stream,
)
| 5,348,498 |
def test_find_outermost_module_name():
"""
This test intents to ensure that the utility function `find_module_name` will
find the correct outermost module name for a given filepath by default
"""
# given
package_path = build_path("test_find_outermost_module_name", "some_package")
inner_module_path = join_paths(package_path, "inner_module")
filepath = join_paths(inner_module_path, "foo", "bar.py")
sys.path.append(parent_path(package_path))
sys.path.append(parent_path(inner_module_path))
expected_module_name = "some_package.inner_module.foo.bar"
# when
module_name = find_module_name(filepath)
# then
assert module_name == expected_module_name
| 5,348,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.