content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def build_package(name: str) -> None:
"""Build it."""
package_dir = CURR_PATH.joinpath(name)
assert package_dir.exists()
assert package_dir.joinpath("PKGBUILD").exists()
print(f"\tFound directory and PKGBUILD")
subprocess.run(["makepkg", "-si", "--noconfirm"], cwd=package_dir)
package_files = [x for x in package_dir.glob("*.pkg.tar.*")]
assert len(package_files) != 0
print(f"\t{len(package_files)} packages have been produced.")
for pkg in package_files:
move(pkg, BUILD_PATH / pkg.name)
| 5,348,300 |
def get_module_version(module_name: str) -> str:
"""Check module version. Raise exception when not found."""
version = None
if module_name == "onnxrt":
module_name = "onnxruntime"
command = [
"python",
"-c",
f"import {module_name} as module; print(module.__version__)",
]
proc = Proc()
proc.run(args=command)
if proc.is_ok:
for line in proc.output:
version = line.strip()
proc.remove_logs()
if version is None:
raise ClientErrorException(f"Could not found version of {module_name} module.")
return version
| 5,348,301 |
def _post(url, data):
"""RESTful API post (insert to database)
Parameters
----------
url: str
Address for the conftrak server
data: dict
Entries to be inserted to database
"""
r = requests.post(url,
data=ujson.dumps(data))
r.raise_for_status()
return r.json()
| 5,348,302 |
def curry(arity_or_fn=None, ignore_kwargs=False, evaluator=None, *args, **kw):
"""
Creates a function that accepts one or more arguments of a function and
either invokes func returning its result if at least arity number of
arguments have been provided, or returns a function that accepts the
remaining function arguments until the function arity is satisfied.
This function is overloaded: you can pass a function or coroutine function
as first argument or an `int` indicating the explicit function arity.
Function arity can be inferred via function signature or explicitly
passed via `arity_or_fn` param.
You can optionally ignore keyword based arguments as well passsing the
`ignore_kwargs` param with `True` value.
This function can be used as decorator.
Arguments:
arity_or_fn (int|function|coroutinefunction): function arity to curry
or function to curry.
ignore_kwargs (bool): ignore keyword arguments as arity to satisfy
during curry.
evaluator (function): use a custom arity evaluator function.
*args (mixed): mixed variadic arguments for partial function
application.
*kwargs (mixed): keyword variadic arguments for partial function
application.
Raises:
TypeError: if function is not a function or a coroutine function.
Returns:
function or coroutinefunction: function will be returned until all the
function arity is satisfied, where a coroutine function will be
returned instead.
Usage::
# Function signature inferred function arity
@paco.curry
async def task(x, y, z=0):
return x * y + z
await task(4)(4)(z=8)
# => 24
# User defined function arity
@paco.curry(4)
async def task(x, y, *args, **kw):
return x * y + args[0] * args[1]
await task(4)(4)(8)(8)
# => 80
# Ignore keyword arguments from arity
@paco.curry(ignore_kwargs=True)
async def task(x, y, z=0):
return x * y
await task(4)(4)
# => 16
"""
def isvalidarg(x):
return all([
x.kind != x.VAR_KEYWORD,
x.kind != x.VAR_POSITIONAL,
any([
not ignore_kwargs,
ignore_kwargs and x.default == x.empty
])
])
def params(fn):
return inspect.signature(fn).parameters.values()
def infer_arity(fn):
return len([x for x in params(fn) if isvalidarg(x)])
def merge_args(acc, args, kw):
_args, _kw = acc
_args = _args + args
_kw = _kw or {}
_kw.update(kw)
return _args, _kw
def currier(arity, acc, fn, *args, **kw):
"""
Function either continues curring of the arguments
or executes function if desired arguments have being collected.
If function curried is variadic then execution without arguments
will finish curring and trigger the function
"""
# Merge call arguments with accumulated ones
_args, _kw = merge_args(acc, args, kw)
# Get current function call accumulated arity
current_arity = len(args)
# Count keyword params as arity to satisfy, if required
if not ignore_kwargs:
current_arity += len(kw)
# Decrease function arity to satisfy
arity -= current_arity
# Use user-defined custom arity evaluator strategy, if present
currify = evaluator and evaluator(acc, fn)
# If arity is not satisfied, return recursive partial function
if currify is not False and arity > 0:
return functools.partial(currier, arity, (_args, _kw), fn)
# If arity is satisfied, instanciate coroutine and return it
return fn(*_args, **_kw)
def wrapper(fn, *args, **kw):
if not iscallable(fn):
raise TypeError('paco: first argument must a coroutine function, '
'a function or a method.')
# Infer function arity, if required
arity = (arity_or_fn if isinstance(arity_or_fn, int)
else infer_arity(fn))
# Wraps function as coroutine function, if needed.
fn = wraps(fn) if isfunc(fn) else fn
# Otherwise return recursive currier function
return currier(arity, (args, kw), fn, *args, **kw) if arity > 0 else fn
# Return currier function or decorator wrapper
return (wrapper(arity_or_fn, *args, **kw)
if iscallable(arity_or_fn)
else wrapper)
| 5,348,303 |
def add_to_sys_path(path):
"""Add a path to the system PATH."""
sys.path.insert(0, path)
| 5,348,304 |
def plot_means_std(means, std, list_samples, prev_w=10, nxt_w=10,
figsize=(6, 4)):
"""
Plot mean and standard deviation from the accuracies of each state of each
mouse.
Parameters
----------
means : dict
dictionary containing all the stage changes (e.g. '1-2', '2-3'...) and
the accuracies associated to each change.
std : dict
dictionary containing all the stage changes (e.g. '1-2', '2-3'...) and
the standard deviations associated to each change.
prev_w: int
previous window size (default value:10)
nxt_w: int
previous window size (default value:10)
Returns
-------
Plot of the mean and standard deviation of each stage for all the subjects
"""
if len(means) == 5:
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=figsize,
gridspec_kw={'wspace': 0.5, 'hspace': 0.5})
elif len(means) == 8:
fig, ax = plt.subplots(nrows=2, ncols=4, figsize=figsize,
gridspec_kw={'wspace': 0.5, 'hspace': 0.5})
ax = ax.flatten()
fig.suptitle('Mean Accuracy of changes', fontsize='x-large')
xs = np.arange(-prev_w, nxt_w)
for i_k, (key, val) in enumerate(means.items()):
ax[i_k].errorbar(xs, val, std[key], label=key)
ax[i_k].set_ylim(0.5, 1)
ax[i_k].set_title(key + ' (N='+str(list_samples[i_k])+')')
ax[i_k].axvline(0, color='black', linestyle='--')
# Hide the right and top spines
ax[i_k].spines['right'].set_visible(False)
ax[i_k].spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax[i_k].yaxis.set_ticks_position('left')
ax[i_k].xaxis.set_ticks_position('bottom')
if len(means) == 5:
if i_k in [0, 3]:
ax[i_k].set_ylabel('Mean accuracy')
if i_k in [3, 4]:
ax[i_k].set_xlabel('Sessions after stage change')
elif len(means) == 8:
if i_k in [0, 4]:
ax[i_k].set_ylabel('Mean accuracy')
if i_k in [4, 5, 6, 7]:
ax[i_k].set_xlabel('Trials after stage change')
if len(means) == 5:
sv_fig(fig, 'Mean Accuracy of changes for 3 stages')
elif len(means) == 8:
sv_fig(fig, 'Mean Accuracy of changes for 4 stages')
| 5,348,305 |
def filter(args):
"""
%prog filter fastafile 100
Filter the FASTA file to contain records with size >= or <= certain cutoff.
"""
p = OptionParser(filter.__doc__)
p.add_option(
"--less",
default=False,
action="store_true",
help="filter the sizes < certain cutoff [default: >=]",
)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, cutoff = args
try:
cutoff = int(cutoff)
except ValueError:
sys.exit(not p.print_help())
f = Fasta(fastafile, lazy=True)
fw = must_open(opts.outfile, "w")
for name, rec in f.iteritems_ordered():
if opts.less and len(rec) >= cutoff:
continue
if (not opts.less) and len(rec) < cutoff:
continue
SeqIO.write([rec], fw, "fasta")
fw.flush()
return fw.name
| 5,348,306 |
def fin_forecast(ratio1, ratio2, sp_df):
"""used to forecast 3 years of financial forecast/projection
"""
print("print test line 6")
forecast = MCSimulation(
portfolio_data = sp_df,
weights = [ratio1, ratio2],
num_simulation = 500,
num_trading_days = 252*3
)
print("test line 3")
print(forecast.portfolio_data.head())
simulation = forecast.portfolio_data
#return ratio1, ratio2, sp_df
return simulation
| 5,348,307 |
def load_uci_regression_dataset(name,
split_seed,
train_fraction=0.9,
data_dir="uci_datasets"):
"""Load a UCI dataset from an npz file.
Ported from
https://github.com/wjmaddox/drbayes/blob/master/experiments/uci_exps/bayesian_benchmarks/data.py.
"""
path = os.path.join(data_dir,
_UCI_REGRESSION_FILENAMES[UCIRegressionDatasets(name)])
data_arr = onp.load(path)
x, y = data_arr["x"], data_arr["y"]
indices = jax.random.permutation(jax.random.PRNGKey(split_seed), len(x))
indices = onp.asarray(indices)
x, y = x[indices], y[indices]
n_train = int(train_fraction * len(x))
x_train, y_train = x[:n_train], y[:n_train]
x_test, y_test = x[n_train:], y[n_train:]
def normalize_with_stats(arr, arr_mean=None, arr_std=None):
return (arr - arr_mean) / arr_std
def normalize(arr):
eps = 1e-6
arr_mean = arr.mean(axis=0, keepdims=True)
arr_std = arr.std(axis=0, keepdims=True) + eps
return normalize_with_stats(arr, arr_mean, arr_std), arr_mean, arr_std
x_train, x_mean, x_std = normalize(x_train)
y_train, y_mean, y_std = normalize(y_train)
x_test = normalize_with_stats(x_test, x_mean, x_std)
y_test = normalize_with_stats(y_test, y_mean, y_std)
data_info = {"y_scale": float(y_std)}
return (x_train, y_train), (x_test, y_test), data_info
| 5,348,308 |
def test_check_inputs(
student_names, project_names, supervisor_names, capacities, seed, clean
):
""" Test that inputs to an instance of SA can be verified. """
_, _, _, game = make_game(
student_names, project_names, supervisor_names, capacities, seed, clean
)
with warnings.catch_warnings(record=True) as w:
game.check_inputs()
assert not w
assert game.students == game._all_students
assert game.projects == game._all_projects
assert game.supervisors == game._all_supervisors
| 5,348,309 |
def implied_volatility(price, S, K, t, r, q, flag):
"""Calculate the Black-Scholes-Merton implied volatility.
:param S: underlying asset price
:type S: float
:param K: strike price
:type K: float
:param sigma: annualized standard deviation, or volatility
:type sigma: float
:param t: time to expiration in years
:type t: float
:param r: risk-free interest rate
:type r: float
:param q: annualized continuous dividend rate
:type q: float
:param flag: 'c' or 'p' for call or put.
:type flag: str
>>> S = 100
>>> K = 100
>>> sigma = .2
>>> r = .01
>>> flag = 'c'
>>> t = .5
>>> q = .02
>>> price = black_scholes_merton(flag, S, K, t, r, sigma, q)
>>> implied_volatility(price, S, K, t, r, q, flag)
0.20000000000000018
>>> flac = 'p'
>>> sigma = 0.3
>>> price = black_scholes_merton(flag, S, K, t, r, sigma, q)
>>> price
8.138101080183894
>>> implied_volatility(price, S, K, t, r, q, flag)
0.30000000000000027
"""
f = lambda sigma: price - black_scholes_merton(flag, S, K, t, r, sigma, q)
return brentq(
f,
a=1e-12,
b=100,
xtol=1e-15,
rtol=1e-15,
maxiter=1000,
full_output=False
)
| 5,348,310 |
def clean_hotel_maxpersons(string):
"""
"""
if string is not None:
r = int(re.findall('\d+', string)[0])
else:
r = 0
return r
| 5,348,311 |
def merge_nodes(nodes):
"""
Merge nodes to deduplicate same-name nodes and add a "parents"
attribute to each node, which is a list of Node objects.
"""
def add_parent(unique_node, parent):
if getattr(unique_node, 'parents', None):
if parent.name not in unique_node.parents:
unique_node.parents[parent.name] = parent
else:
unique_node.parents = {parent.name: parent}
names = OrderedDict()
for node in nodes:
if node.name not in names:
names[node.name] = node
add_parent(names[node.name], node.parent)
else:
add_parent(names[node.name], node.parent)
return names.values()
| 5,348,312 |
def offset_add(OFF_par1, OFF_par2, OFF_par3, logpath=None, outdir=None, shellscript=None):
"""
| Add range and azimuth offset polynomial coefficients
| Copyright 2008, Gamma Remote Sensing, v1.1 12-Feb-2008 clw
Parameters
----------
OFF_par1:
(input) ISP offset/interferogram parameter file
OFF_par2:
(input) ISP offset/interferogram parameter file
OFF_par3:
(output) ISP offset/interferogram parameter file with sums of the
range and azimuth offset polynomials in OFF_par1 and OFF_par2
logpath: str or None
a directory to write command logfiles to
outdir: str or None
the directory to execute the command in
shellscript: str or None
a file to write the Gamma commands to in shell format
"""
process(['/usr/local/GAMMA_SOFTWARE-20180703/ISP/bin/offset_add', OFF_par1, OFF_par2, OFF_par3], logpath=logpath,
outdir=outdir, shellscript=shellscript)
| 5,348,313 |
def getVPCs(account="*", region="*", debug=False, save=False):
"""Retrieve all data on VPCs from an AWS account and optionally save
data on each to a file"""
print("Collecting VPC data...", file=sys.stderr)
vpclist = {}
for vpc in skew.scan("arn:aws:ec2:%s:%s:vpc/*" % (region, account)):
if debug:
msg = "Account: %s, VPCID: %s, Region: %s, CIDR: %s" % (
vpc._client.account_id, vpc.id, vpc._client.region_name,
vpc.data['CidrBlock'])
print(msg, file=sys.stderr)
try:
tags = vpc.data['Tags']
except KeyError:
tags = []
vpclist[vpc.id] = {"CIDR": vpc.data['CidrBlock'],
"Account": vpc._client.account_id,
"Region": vpc._client.region_name,
"Tags": tags,
"Instances":[]}
vpclist['none'] = {"Instances":[]}
print("Collected information on %s VPCs" % len(vpclist), file=sys.stderr)
return vpclist
| 5,348,314 |
def get_cache_timeout():
"""Returns timeout according to COOLOFF_TIME."""
cache_timeout = None
cool_off = settings.AXES_COOLOFF_TIME
if cool_off:
if isinstance(cool_off, (int, float)):
cache_timeout = timedelta(hours=cool_off).total_seconds()
else:
cache_timeout = cool_off.total_seconds()
return cache_timeout
| 5,348,315 |
def find_records(dataset, search_string):
"""Retrieve records filtered on search string.
Parameters:
dataset (list): dataset to be searched
search_string (str): query string
Returns:
list: filtered list of records
"""
records = [] # empty list (accumulator pattern)
for record in dataset:
if search_string.lower() in record.lower(): # case insensitive
records.append(record) # add to new list
return records
| 5,348,316 |
def state_changed(name, address, value, group):
"""Capture the state change."""
_LOGGER.info("Device %s state %d changed to 0x%02x", address, group, value)
| 5,348,317 |
def mood(sentence, **kwargs):
""" Returns IMPERATIVE (command), CONDITIONAL (possibility), SUBJUNCTIVE (wish) or INDICATIVE (fact).
"""
if isinstance(sentence, basestring):
try:
# A Sentence is expected but a string given.
# Attempt to parse the string on-the-fly.
from pattern.en import parse, Sentence
sentence = Sentence(parse(sentence))
except ImportError:
pass
if imperative(sentence, **kwargs):
return IMPERATIVE
if conditional(sentence, **kwargs):
return CONDITIONAL
if subjunctive(sentence, **kwargs):
return SUBJUNCTIVE
else:
return INDICATIVE
| 5,348,318 |
def create_script(*args, **kwargs):
"""Similar to create_file() but will set permission to 777"""
mode = kwargs.pop("mode", 777)
path = create_file(*args, **kwargs)
path.chmod(mode)
return path
| 5,348,319 |
def unique(x, dim=None):
"""Unique elements of x and indices of those unique elements
https://github.com/pytorch/pytorch/issues/36748#issuecomment-619514810
e.g.
unique(tensor([
[1, 2, 3],
[1, 2, 4],
[1, 2, 3],
[1, 2, 5]
]), dim=0)
=>
tensor([0, 1, 3])
"""
unique, inverse = torch.unique(
x, sorted=True, return_inverse=True, dim=dim)
perm = torch.arange(inverse.size(0), dtype=inverse.dtype,
device=inverse.device)
inverse, perm = inverse.flip([0]), perm.flip([0])
return inverse.new_empty(unique.size(0)).scatter_(0, inverse, perm)
| 5,348,320 |
def _convert_arg(val, name, type, errmsg=None):
""" Convert a Python value in CPO and check its value
Args:
val: Value to convert
name: Argument name
type: Expected type
errmsg: Optional error message
"""
val = build_cpo_expr(val)
assert val.is_kind_of(type), errmsg if errmsg is not None else "Argument '{}' should be a {}".format(name, type.get_public_name())
return val
| 5,348,321 |
def fit_gamma_dist(dataset, model, target_dir):
"""
- Fit gamma distribution for anomaly scores.
- Save the parameters of the distribution.
"""
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=CONFIG["training"]["batch_size"],
shuffle=False,
drop_last=False,
)
# fit gamma distribution for anomaly scores
gamma_params = calc_anomaly_score(model=model, data_loader=data_loader)
score_file_path = "{model}/score_distr_{machine_type}.pkl".format(
model=CONFIG["model_directory"], machine_type=os.path.split(target_dir)[1]
)
# save the parameters of the distribution
joblib.dump(gamma_params, score_file_path)
| 5,348,322 |
def into_json(struct):
"""
Transforms a named tuple into a json object in a nice way.
"""
return json.dumps(_compile(struct), indent=2)
| 5,348,323 |
def atan(*args, **kwargs):
"""
atan(x)
Return the arc tangent (measured in radians) of x.
This function has been overriden from math.atan to work element-wise on iterables
"""
pass
| 5,348,324 |
def required_input(data_input, status_code):
"""check for required fields"""
if not data_input in request.get_json():
abort(status_code, "field {0} is required".format(data_input))
| 5,348,325 |
def generate_integer_seeds(signature):
"""Generates a set of seeds. Each seed
is supposed to be included as a file for AFL.
**NOTE** that this method assumes that the signature
only have int types. If a non int type is found,
None is returned.
Param:
(str) signature: fuzzable method signature
Return:
(list) of seeds to include in the analysis
"""
primitives = {'int', 'float', 'long', 'double'}
sm = SootMethod(signature)
param_types = sm.get_parameter_types()
#for param_type in param_types:
#if param_type not in primitives:
# return None
# seeds = itertools.product(_generate_integer_parameter_seed(),
# repeat=len(param_types))
seed = [str(random.randint(-2048, 2048)) for _ in param_types]
return ["\x07".join(seed)]
#return list(map(lambda x: "\x07".join(x), seeds))
| 5,348,326 |
def adjust_time(hour: int, minute: int) -> Tuple[int, int]:
"""Adjust time from sunset using offset in config.
Returns:
Tuple[int, int]: (hour, minute) of the adjusted time
"""
today = pendulum.today().at(hour, minute)
today = today.add(hours=config.OFFSET_H, minutes=config.OFFSET_M)
hour = today.hour
minute = today.minute
message = f'Scripts will run at around {hour:02}:{minute:02}'
config.LOGGER.info(message)
return (today.hour, today.minute)
| 5,348,327 |
def read_data_unet(path_images, path_masks, img_dims, norm, stretch, shuffle):
"""
load, crop and normalize image data
option to select images form a folder
:param image_path: path of the folder containing the images [string]
:param mask_path: path of the folder containing the masks [string]
:param scans_to_use: list of 4-zero-padded sequence numbers of the scans to be used in the current training [list of strings]
:param img_dims: [dict]
:param norm: is normalization to the range of [0, 1] required [bool]
:param stretch: is contrast stretch to the range of [0, 255] required [bool]
:return: list - array of normalized depth maps [array of numpy arrays]
"""
images = [] # array of normalized multidiemnsional distance maps (frequencies: 20 - 120MHz)
masks = []
im_names = [fname for fname in os.listdir(path_images) if fname[-4:] == '.jpg'] # image and corresponding mask have the same filename
mask_names = [fname for fname in os.listdir(path_masks) if fname[-4:] == '.jpg'] # image and corresponding mask have the same filename
for ind, im_name in enumerate(im_names):
# load image
im = cv2.imread(os.path.join(path_images, im_name), 0)
if stretch:
im = cv2.normalize(im, im, 0, 255, cv2.NORM_MINMAX)
images.append(im)
# load mask
mask = cv2.imread(os.path.join(path_masks, mask_names[ind]), 0)
masks.append(mask)
if len(images) == 0:
print("No images were read.")
exit(101)
if shuffle:
data = list(zip(images, masks))
random.shuffle(data)
images, masks = zip(*data)
# convert data to ndarrays
images = np.array(images)
images = np.reshape(images, (len(images), img_dims['rows'], img_dims['cols'], img_dims['depth']))
masks = np.array(masks)
masks = np.reshape(masks, (len(masks), img_dims['rows'], img_dims['cols'], 1))
# binarize masks
masks[masks > 0] = 1
masks[masks <= 0] = 0
if norm:
images = images / 255.0
return images, masks
| 5,348,328 |
def indent(elem, level=0):
"""
Recursive function to indent an ElementTree._ElementInterface
used for pretty printing. Code from
U{http://www.effbot.org/zone/element-lib.htm}. To use run indent
on elem and then output in the normal way.
@param elem: element to be indented. will be modified.
@type elem: ElementTree._ElementInterface
@param level: level of indentation for this element
@type level: nonnegative integer
@rtype: ElementTree._ElementInterface
@return: Contents of elem indented to reflect its structure
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
| 5,348,329 |
def flatten(l):
"""Flatten 2 list
"""
return reduce(lambda x, y: list(x) + list(y), l, [])
| 5,348,330 |
def _rep_t_s(byte4):
"""
合成置换T', 由非线性变换和线性变换L'复合而成
"""
# 非线性变换
b_array = _non_linear_map(_byte_unpack(byte4))
# 线性变换L'
return _linear_map_s(_byte_pack(b_array))
| 5,348,331 |
def get_mapping():
"""
Returns a dictionary with the mapping of Spacy dependency labels to a numeric value, spacy dependency annotations
can be found here https://spacy.io/api/annotation
:return: dictionary
"""
keys = ['acl', 'acomp', 'advcl', 'advmod', 'agent', 'amod', 'appos', 'attr', 'aux', 'auxpass', 'case', 'cc', 'ccomp',
'clf', 'compound', 'conj', 'cop', 'csubj', 'csubjpass', 'dative', 'dep', 'det', 'discourse', 'dislocated',
'dobj', 'expl', 'fixed', 'flat', 'goeswith', 'iobj', 'intj', 'list', 'mark',
'meta', 'neg', 'nn', 'nmod', 'nounmod', 'npadvmod', 'npmod', 'nsubj', 'nsubjpass', 'nummod', 'oprd', 'obj', 'obl',
'orphan', 'parataxis', 'pcomp', 'pobj', 'poss', 'preconj', 'predet', 'prep', 'prt', 'punct', 'quantmod', 'relcl',
'reparandum', 'root', 'vocative', 'xcomp', '']
values = list(range(2, len(keys) + 2))
assert len(keys) == len(values)
return dict(zip(keys, values))
| 5,348,332 |
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope(name):
mean = tf.reduce_mean(input_tensor=var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(
input_tensor=tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(input_tensor=var))
tf.summary.scalar('min', tf.reduce_min(input_tensor=var))
tf.summary.histogram('histogram', var)
| 5,348,333 |
def create_root_folder(path: str, name: str) -> int:
"""
Creates a root folder if folder not in database.
Fetches id if folder already in database.
Handles paths with both slash and backslash as separator.
:param path: The path to the folder in the users file system.
:param name: The name of the folder.
:return: The id of the folder.
"""
path = path.replace('\\', '/')
f = Folder.objects.get_or_create(path=path, name=name)[0]
return f.id
| 5,348,334 |
def get_document_term_matrix(all_documents):
""" Counts word occurrences by document. Then transform it into a
document-term matrix (dtm).
Returns a Tf-idf matrix, first count word occurrences by document.
This is transformed into a document-term matrix (dtm). This is also
just called a term frequency matrix.
:param all_documents:
:return:
"""
tfidf_vectorizer = TfidfVectorizer(stop_words='english',
ngram_range=(1, 3))
tfidf_matrix = tfidf_vectorizer.fit_transform(all_documents)
terms = tfidf_vectorizer.get_feature_names()
logging.info('Total terms found: %d' % len(terms))
logging.info('(TFM/DTM) Matrix size: %s' % (tfidf_matrix.shape,))
return terms, tfidf_matrix
| 5,348,335 |
def f_raw2(coordinate, packedParams):
"""
The raw function call, performs no checks on valid parameters..
:return:
"""
gaussParams = packedParams['pack']
res = 0
for p in gaussParams:
res += gaussian_2d.f_noravel(coordinate, *p)
return res
| 5,348,336 |
def example_serving_input_fn():
"""Build the serving inputs."""
example_bytestring = tf.placeholder(
shape=[None],
dtype=tf.string,
)
features = tf.parse_example(
example_bytestring,
tf.feature_column.make_parse_example_spec(INPUT_COLUMNS))
return tf.estimator.export.ServingInputReceiver(
features, {'example_proto': example_bytestring})
| 5,348,337 |
def emptyentries(targets, headers, dirnames):
""" Write genes with no candidate at all to log file """
t = dirnames[1].split('TempFolder')[1]
nocandidate = []
for i in range(len(targets)):
if not targets[i]:
nocandidate.append(i)
if len(nocandidate):
with open(os.path.join(dirnames[0], 'log_' + t + '.txt'), 'a') as f:
f.write('%d Gene(s) have no probe candidates:\n'% len(nocandidate))
for i in nocandidate:
f.write('%s\n' % headers[i])
| 5,348,338 |
def add_file_uri_to_path(filepath):
"""Add the file uri preix: "file://" to the beginning of a path"""
if not filepath:
return False, "The filepath must be specified"
if filepath.lower().startswith(FILE_URI_PREFIX):
#
#
return True, filepath
updated_fpath = '%s%s' % (FILE_URI_PREFIX, filepath)
return True, updated_fpath
| 5,348,339 |
def test_dsm_compute_arg(
compute_dsm_default_args,
): # pylint: disable=redefined-outer-name
"""
Cars compute_dsm arguments test with default and degraded cases
"""
parser = cars_parser()
with tempfile.TemporaryDirectory(dir=temporary_dir()) as directory:
compute_dsm_default_args.outdir = directory
# test with default args
main_cli(compute_dsm_default_args, parser, dry_run=True)
# test with mp mode (multiprocessing)
args_mode_mp = copy(compute_dsm_default_args)
args_mode_mp.mode = "mp"
main_cli(args_mode_mp, parser, dry_run=True)
# test [xmin, ymin, xmax, ymax] roi argument
args_roi_bbox = copy(compute_dsm_default_args)
args_roi_bbox.roi_bbox = ["1.0", "2.0", "3.0", "4.0"]
main_cli(args_roi_bbox, parser, dry_run=True)
# test image roi argument
args_roi_file = copy(compute_dsm_default_args)
args_roi_file.roi_file = absolute_data_path(
"input/cars_input/roi_image.tif"
)
main_cli(args_roi_file, parser, dry_run=True)
# test vector roi argument
args_roi_file.roi_file = absolute_data_path(
"input/cars_input/roi_vector.gpkg"
)
main_cli(args_roi_file, parser, dry_run=True)
# degraded cases input jsons
args_bad_jsons = copy(compute_dsm_default_args)
with pytest.raises(SystemExit) as exit_error:
args_bad_jsons.injsons = [
absolute_data_path("input/cars_input/test.txt")
]
main_cli(args_bad_jsons, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
with pytest.raises(SystemExit) as exit_error:
args_bad_jsons.injsons = []
main_cli(args_bad_jsons, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
# degraded cases sigma
args_bad_sigma = copy(compute_dsm_default_args)
with pytest.raises(SystemExit) as exit_error:
args_bad_sigma.sigma = -10
main_cli(args_bad_sigma, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
# degraded cases dsm radius
args_bad_dsm_radius = copy(compute_dsm_default_args)
with pytest.raises(SystemExit) as exit_error:
args_bad_dsm_radius.dsm_radius = -10
main_cli(args_bad_dsm_radius, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
# degraded cases resolution
args_bad_resolution = copy(compute_dsm_default_args)
with pytest.raises(SystemExit) as exit_error:
args_bad_resolution.resolution = 0
main_cli(args_bad_resolution, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
args_bad_resolution = copy(compute_dsm_default_args)
with pytest.raises(SystemExit) as exit_error:
args_bad_resolution.resolution = -1
main_cli(args_bad_resolution, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
# degraded cases epsg
args_bad_epsg = copy(compute_dsm_default_args)
with pytest.raises(SystemExit) as exit_error:
args_bad_epsg.epsg = -1
main_cli(args_bad_epsg, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
# degraded cases input ROI file
args_bad_roi_file = copy(compute_dsm_default_args)
with pytest.raises(SystemExit) as exit_error:
args_bad_roi_file.roi_file = absolute_data_path(
"input/cars_input/test.txt"
)
main_cli(args_bad_roi_file, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
with pytest.raises(SystemExit) as exit_error:
args_bad_roi_file.roi_file = absolute_data_path(
"input/phr_ventoux/preproc_output/content.json"
)
main_cli(args_bad_roi_file, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
with pytest.raises(SystemExit) as exit_error:
args_bad_roi_file.roi_file = absolute_data_path(
"input/phr_ventoux/left_image.tif"
)
main_cli(args_bad_roi_file, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
# degraded cases correlator config file
args_bad_correlator_conf = copy(compute_dsm_default_args)
with pytest.raises(SystemExit) as exit_error:
args_bad_correlator_conf.corr_config = absolute_data_path(
"input/cars_input/test.txt"
)
main_cli(args_bad_correlator_conf, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
# degraded cases elevation offsets
args_bad_elevation_offsets = copy(compute_dsm_default_args)
with pytest.raises(SystemExit) as exit_error:
args_bad_elevation_offsets.min_elevation_offset = 10
args_bad_elevation_offsets.max_elevation_offset = -10
main_cli(args_bad_elevation_offsets, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
# degraded cases log level
args_bad_loglevel = copy(compute_dsm_default_args)
with pytest.raises(ValueError):
args_bad_loglevel.loglevel = "TEST"
main_cli(args_bad_loglevel, parser, dry_run=True)
# degraded cases number of workers
args_bad_nb_workers = copy(compute_dsm_default_args)
with pytest.raises(SystemExit) as exit_error:
args_bad_nb_workers.nb_workers = -1
main_cli(args_bad_nb_workers, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
# degraded cases wall time
args_bad_wall_time = copy(compute_dsm_default_args)
with pytest.raises(SystemExit) as exit_error:
args_bad_wall_time.walltime = "000:00:00"
main_cli(args_bad_wall_time, parser, dry_run=True)
assert exit_error.type == SystemExit
assert exit_error.value.code == 1
| 5,348,340 |
def np_slope_diff_spacing(z, xspace, yspace):
"""
https://github.com/UP-RS-ESP/TopoMetricUncertainty/blob/master/uncertainty.py
Provides slope in degrees.
"""
dy, dx = np.gradient(z, xspace, yspace)
return np.arctan(np.sqrt(dx*dx+dy*dy))*180/np.pi
| 5,348,341 |
def build_dist(
cfg, feat_1, feat_2=None, dist_m=None, verbose=False,
):
"""Computes distance.
Args:
input1 (torch.Tensor): 2-D feature matrix.
input2 (torch.Tensor): 2-D feature matrix. (optional)
Returns:
numpy.ndarray: distance matrix.
"""
if dist_m is None:
dist_m = cfg.dist_metric
if dist_m == "euclidean":
if feat_2 is not None:
return compute_euclidean_distance(feat_1, feat_2, cfg.dist_cuda)
else:
return compute_euclidean_distance(feat_1, feat_1, cfg.dist_cuda)
elif dist_m == "cosine":
if feat_2 is not None:
return compute_cosine_distance(feat_1, feat_2, cfg.dist_cuda)
else:
return compute_cosine_distance(feat_1, feat_1, cfg.dist_cuda)
elif dist_m == "jaccard":
if feat_2 is not None:
feat = torch.cat((feat_1, feat_2), dim=0)
else:
feat = feat_1
dist = compute_jaccard_distance(
feat, k1=cfg.k1, k2=cfg.k2, search_option=cfg.search_type, verbose=verbose,
)
if feat_2 is not None:
return dist[: feat_1.size(0), feat_1.size(0) :]
else:
return dist
else:
assert "Unknown distance metric: {}".format(dist_m)
| 5,348,342 |
def write_features(
netcdf_file_name, feature_matrix, target_values, num_classes,
append_to_file=False):
"""Writes features (activations of intermediate layer) to NetCDF file.
:param netcdf_file_name: Path to output file.
:param feature_matrix: numpy array of features. Must have >= 2 dimensions,
where the first dimension (length E) represents examples and the last
dimension represents channels (transformed input variables).
:param target_values: length-E numpy array of target values. Must all be
integers in 0...(K - 1), where K = number of classes.
:param num_classes: Number of classes.
:param append_to_file: Boolean flag. If True, will append to existing file.
If False, will create new file.
"""
error_checking.assert_is_boolean(append_to_file)
error_checking.assert_is_numpy_array(feature_matrix)
num_storm_objects = feature_matrix.shape[0]
dl_utils.check_target_array(
target_array=target_values, num_dimensions=1, num_classes=num_classes)
error_checking.assert_is_numpy_array(
target_values, exact_dimensions=numpy.array([num_storm_objects]))
if append_to_file:
error_checking.assert_is_string(netcdf_file_name)
netcdf_dataset = netCDF4.Dataset(
netcdf_file_name, 'a', format='NETCDF3_64BIT_OFFSET')
prev_num_storm_objects = len(numpy.array(
netcdf_dataset.variables[TARGET_VALUES_KEY][:]))
netcdf_dataset.variables[FEATURE_MATRIX_KEY][
prev_num_storm_objects:(prev_num_storm_objects + num_storm_objects),
...
] = feature_matrix
netcdf_dataset.variables[TARGET_VALUES_KEY][
prev_num_storm_objects:(prev_num_storm_objects + num_storm_objects)
] = target_values
else:
file_system_utils.mkdir_recursive_if_necessary(
file_name=netcdf_file_name)
netcdf_dataset = netCDF4.Dataset(
netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
netcdf_dataset.setncattr(NUM_CLASSES_KEY, num_classes)
netcdf_dataset.createDimension(STORM_OBJECT_DIMENSION_KEY, None)
netcdf_dataset.createDimension(
FEATURE_DIMENSION_KEY, feature_matrix.shape[1])
num_spatial_dimensions = len(feature_matrix.shape) - 2
tuple_of_dimension_keys = (STORM_OBJECT_DIMENSION_KEY,)
for i in range(num_spatial_dimensions):
netcdf_dataset.createDimension(
SPATIAL_DIMENSION_KEYS[i], feature_matrix.shape[i + 1])
tuple_of_dimension_keys += (SPATIAL_DIMENSION_KEYS[i],)
tuple_of_dimension_keys += (FEATURE_DIMENSION_KEY,)
netcdf_dataset.createVariable(
FEATURE_MATRIX_KEY, datatype=numpy.float32,
dimensions=tuple_of_dimension_keys)
netcdf_dataset.variables[FEATURE_MATRIX_KEY][:] = feature_matrix
netcdf_dataset.createVariable(
TARGET_VALUES_KEY, datatype=numpy.int32,
dimensions=STORM_OBJECT_DIMENSION_KEY)
netcdf_dataset.variables[TARGET_VALUES_KEY][:] = target_values
netcdf_dataset.close()
| 5,348,343 |
def call_inverse_cic_single_omp(img_in,yc1,yc2,yi1,yi2,dsi):
"""
Input:
img_in: Magnification Map
yc1, yc2: Lens position
yi1, yi2: Source position
dsi: pixel size on grid
"""
ny1,ny2 = np.shape(img_in)
img_in = np.array(img_in,dtype=ct.c_float)
yi1 = np.array(yi1,dtype=ct.c_float)
yi2 = np.array(yi2,dtype=ct.c_float)
nlimgs = len(yi1)
img_out = np.zeros((nlimgs),dtype=ct.c_float)
rtf.inverse_cic_omp_single(img_in,yi1,yi2,ct.c_float(yc1),ct.c_float(yc2),ct.c_float(dsi),ct.c_int(ny1),ct.c_int(ny2),ct.c_int(nlimgs),img_out)
return img_out
| 5,348,344 |
def filter_spans(spans):
"""Filter a sequence of spans and remove duplicates or overlaps. Useful for
creating named entities (where one token can only be part of one entity) or
when merging spans with `Retokenizer.merge`. When spans overlap, the (first)
longest span is preferred over shorter spans.
spans (iterable): The spans to filter.
RETURNS (list): The filtered spans.
"""
get_sort_key = lambda span: (span.end - span.start, -span.start)
sorted_spans = sorted(spans, key=get_sort_key, reverse=True)
result = []
seen_tokens = set()
for span in sorted_spans:
# Check for end - 1 here because boundaries are inclusive
if span.start not in seen_tokens and span.end - 1 not in seen_tokens:
result.append(span)
seen_tokens.update(range(span.start, span.end))
result = sorted(result, key=lambda span: span.start)
return result
| 5,348,345 |
def get_col_names_for_tick(tick='BCHARTS/BITSTAMPUSD'):
"""
Return the columns available for the tick. Startdate is late by default to avoid getting much data
"""
return quandl.get(tick, start_date=None).columns
| 5,348,346 |
def check_if_needs_inversion(tomodir):
"""check of we need to run CRTomo in a given tomodir
Parameters
----------
tomodir : str
Tomodir to check
Returns
-------
needs_inversion : bool
True if not finished yet
"""
required_files = (
'grid' + os.sep + 'elem.dat',
'grid' + os.sep + 'elec.dat',
'exe' + os.sep + 'crtomo.cfg',
)
needs_inversion = True
for filename in required_files:
if not os.path.isfile(tomodir + os.sep + filename):
needs_inversion = False
# check for crmod OR modeling capabilities
if not os.path.isfile(tomodir + os.sep + 'mod' + os.sep + 'volt.dat'):
if not check_if_needs_modeling(tomodir):
print('no volt.dat and no modeling possible')
needs_inversion = False
# check if finished
inv_ctr_file = tomodir + os.sep + 'inv' + os.sep + 'inv.ctr'
if os.path.isfile(inv_ctr_file):
inv_lines = open(inv_ctr_file, 'r').readlines()
print('inv_lines', inv_lines[-1])
if inv_lines[-1].startswith('***finished***'):
needs_inversion = False
return needs_inversion
| 5,348,347 |
def check_result(request):
"""
通过任务id查询任务结果
:param request:
:param task_id:
:return:
"""
task_id = request.data.get('task_id')
if task_id is None:
return Response({'message': '缺少task_id'}, status=status.HTTP_400_BAD_REQUEST)
res = AsyncResult(task_id)
if res.ready(): # 检查指定任务是否已经完成
if res.successful():
return Response(res.result) # res.result为任务函数的返回值,即任务结果
return Response({'message': '任务运行错误:{}'.format(res.result)})
return Response({'message': '任务运行中,稍后查看...'})
| 5,348,348 |
def get_debug_device():
"""Get the profile to debug from the RIVALCFG_DEVICE environment variable,
if any.
This device should be selected as the one where the commands will be
written, regardless of the selected profile. This is usefull to debug a
mouse that have the same command set than an other one but with a different
product_id.
If the RIVALCFG_PROFILE is defined but the RIVALCFG_DEVICE is not, this
function returns the same output that get_debug_profile()."""
mouse_id = _get_mouse_id_from_env("RIVALCFG_DEVICE")
if mouse_id:
return mouse_id
return _get_mouse_id_from_env("RIVALCFG_PROFILE")
| 5,348,349 |
def make():
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger.info("Nothing implemented yet")
| 5,348,350 |
def export_experiment_configuration_to_yml(logger, log_dir, filename, config_interface_obj, user_confirm):
"""
Dumps the configuration to ``yaml`` file.
:param logger: logger object
:param log_dir: Directory used to host log files (such as the collected statistics).
:type log_dir: str
:param filename: Name of the ``yaml`` file to write to.
:type filename: str
:param config_interface_obj: Configuration interface object.
:param user_confirm: Whether to request user confirmation.
:type user_confirm: bool
"""
# -> At this point, all configuration for experiment is complete.
# Log the resulting training configuration.
conf_str = 'Final parameter registry configuration:\n'
conf_str += '='*80 + '\n'
conf_str += yaml.safe_dump(config_interface_obj.to_dict(), default_flow_style=False)
conf_str += '='*80 + '\n'
logger.info(conf_str)
# Save the resulting configuration into a .yaml settings file, under log_dir
with open(log_dir + filename, 'w') as yaml_backup_file:
yaml.dump(config_interface_obj.to_dict(), yaml_backup_file, default_flow_style=False)
# Ask for confirmation - optional.
if user_confirm:
try:
input('Press <Enter> to confirm and start the experiment\n')
except KeyboardInterrupt:
exit(0)
| 5,348,351 |
def _remove_node(node: int, meta: IntArray, orig_dest: IntArray) -> Tuple[int, int]:
"""
Parameters
----------
node : int
ID of the node to remove
meta : ndarray
Array with rows containing node, count, and address where
address is used to find the first occurrence in orig_desk
orig_dest : ndarray
Array with rows containing origin and destination nodes
Returns
-------
next_node : int
ID of the next node in the branch
next_count : int
Count of the next node in the branch
Notes
-----
Node has 1 link, so:
1. Remove the forward link
2. Remove the backward link
3. Decrement node's count
4. Decrement next_node's count
"""
# 3. Decrement
meta[node, 1] -= 1
# 1. Remove forewrd link
next_offset = meta[node, 2]
orig, next_node = orig_dest[next_offset]
while next_node == -1:
# Increment since this could have been previously deleted
next_offset += 1
next_orig, next_node = orig_dest[next_offset]
assert orig == next_orig
# 4. Remove next_node's link
orig_dest[next_offset, 1] = -1
# 2. Remove the backward link
# Set reverse to -1
reverse_offset = meta[next_node, 2]
reverse_node = orig_dest[reverse_offset, 1]
while reverse_node != orig:
reverse_offset += 1
reverse_node = orig_dest[reverse_offset, 1]
orig_dest[reverse_offset, 1] = -1
# Step forward
meta[next_node, 1] -= 1
next_count = meta[next_node, 1]
return next_node, next_count
| 5,348,352 |
def pg():
"""
Configures environment variables for running commands on pg, e.g.:
fab pg --user=myuser --password=mytopsecretpassword refresh_pg
"""
env.hosts = ['pg-001.ebi.ac.uk']
env.run = run
env.cd = cd
| 5,348,353 |
def tfs(parser, xml_parent, data):
"""yaml: tfs
Specifies the Team Foundation Server repository for this job.
Requires the Jenkins `Team Foundation Server Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/
Team+Foundation+Server+Plugin>`_
**NOTE**: TFS Password must be entered manually on the project if a
user name is specified. The password will be overwritten with an empty
value every time the job is rebuilt with Jenkins Job Builder.
:arg str server-url: The name or URL of the team foundation server.
If the server has been registered on the machine then it is only
necessary to enter the name.
:arg str project-path: The name of the project as it is registered on the
server.
:arg str login: The user name that is registered on the server. The user
name must contain the name and the domain name. Entered as
domain\\\\user or user\@domain (optional).
**NOTE**: You must enter in at least two slashes for the
domain\\\\user format in JJB YAML. It will be rendered normally.
:arg str use-update: If true, Hudson will not delete the workspace at end
of each build. This causes the artifacts from the previous build to
remain when a new build starts. (default true)
:arg str local-path: The folder where all files will be retrieved into.
The folder name is a relative path, under the workspace of the current
job. (default .)
:arg str workspace: The name of the workspace under which the source
should be retrieved. This workspace is created at the start of a
download, and deleted at the end. You can normally omit the property
unless you want to name a workspace to avoid conflicts on the server
(i.e. when you have multiple projects on one server talking to a
Team Foundation Server). (default Hudson-${JOB_NAME}-${NODE_NAME})
The TFS plugin supports the following macros that are replaced in the
workspace name:
* ${JOB_NAME} - The name of the job.
* ${USER_NAME} - The user name that the Hudson server or slave is
running as.
* ${NODE_NAME} - The name of the node/slave that the plugin currently
is executed on. Note that this is not the hostname, this value is
the Hudson configured name of the slave/node.
* ${ENV} - The environment variable that is set on the master or slave.
:arg dict web-access: Adds links in "changes" views within Jenkins to an
external system for browsing the details of those changes. The "Auto"
selection attempts to infer the repository browser from other jobs,
if supported by the SCM and a job with matching SCM details can be
found. (optional, default Auto).
:web-access value:
* **web-url** -- Enter the URL to the TSWA server. The plugin will
strip the last path (if any) of the URL when building URLs for
change set pages and other pages. (optional, default
uses server-url)
Examples::
scm:
- tfs:
server-url: "tfs.company.com"
project-path: "$/myproject"
login: "mydomain\\\jane"
use-update: false
local-path: "../foo/"
workspace: "Hudson-${JOB_NAME}"
web-access:
- web-url: "http://TFSMachine:8080"
scm:
- tfs:
server-url: "tfs.company.com"
project-path: "$/myproject"
login: "jane@mydomain"
use-update: false
local-path: "../foo/"
workspace: "Hudson-${JOB_NAME}"
web-access:
scm:
- tfs:
server-url: "tfs.company.com"
project-path: "$/myproject"
login: "mydomain\\\jane"
use-update: false
local-path: "../foo/"
workspace: "Hudson-${JOB_NAME}"
"""
tfs = XML.SubElement(xml_parent, 'scm', {'class': 'hudson.plugins.tfs.'
'TeamFoundationServerScm'})
XML.SubElement(tfs, 'serverUrl').text = str(
data.get('server-url', ''))
XML.SubElement(tfs, 'projectPath').text = str(
data.get('project-path', ''))
XML.SubElement(tfs, 'localPath').text = str(
data.get('local-path', '.'))
XML.SubElement(tfs, 'workspaceName').text = str(
data.get('workspace', 'Hudson-${JOB_NAME}-${NODE_NAME}'))
# TODO: In the future, with would be nice to have a place that can pull
# passwords into JJB without having to commit them in plaintext. This
# could also integrate nicely with global configuration options.
XML.SubElement(tfs, 'userPassword')
XML.SubElement(tfs, 'userName').text = str(
data.get('login', ''))
XML.SubElement(tfs, 'useUpdate').text = str(
data.get('use-update', True))
store = data.get('web-access', None)
if 'web-access' in data and isinstance(store, list):
web = XML.SubElement(tfs, 'repositoryBrowser', {'class': 'hudson.'
'plugins.tfs.browsers.'
'TeamSystemWebAccessBrowser'})
XML.SubElement(web, 'url').text = str(store[0].get('web-url', None))
elif 'web-access' in data and store is None:
XML.SubElement(tfs, 'repositoryBrowser', {'class': 'hudson.'
'plugins.tfs.browsers.'
'TeamSystemWebAccess'
'Browser'})
| 5,348,354 |
def forward_many_to_many_without_pr(request):
"""
Return all the stores with associated books, without using prefetch_related.
100ms overall
8ms on queries
11 queries
1 query to fetch all stores:
SELECT "bookstore_store"."id",
"bookstore_store"."name"
FROM "bookstore_store"
10 separate query to fetch books of each store:
SELECT "bookstore_book"."id",
"bookstore_book"."name",
"bookstore_book"."price",
"bookstore_book"."publisher_id"
FROM "bookstore_book"
INNER JOIN "bookstore_bookinstore" ON ("bookstore_book"."id" = "bookstore_bookinstore"."book_id")
WHERE "bookstore_bookinstore"."store_id" = 1
"""
qs = Store.objects.all()
stores = []
for store in qs:
books = [{'id': book.id, 'name': book.name} for book in store.books.all()]
stores.append({'id': store.id, 'name': store.name, 'books': books})
return Response(stores)
| 5,348,355 |
def generate_combinations (n, rlist):
""" from n choose r elements """
combs = [list(itertools.combinations(n, r)) for r in rlist]
combs = [item for sublist in combs for item in sublist]
return combs
| 5,348,356 |
def test_q_control_true():
"""
Test that when the Q control is enabled the Q limits are respected
"""
options = PowerFlowOptions(SolverType.NR,
control_q=ReactivePowerControlMode.Direct,
retry_with_other_methods=False)
fname = os.path.join('data', 'grids', 'IEEE57.gridcal')
main_circuit = FileOpen(fname).open()
power_flow = PowerFlowDriver(main_circuit, options)
power_flow.run()
nc = compile_snapshot_circuit(main_circuit)
Q = power_flow.results.Sbus.imag
Qmin = nc.Qmin_bus[:, 0] * nc.Sbase
Qmax = nc.Qmax_bus[:, 0] * nc.Sbase
l1 = Q <= Qmax
l2 = Qmin <= Q
ok = l1 * l2
assert ok.all()
| 5,348,357 |
def conv_datetime(dt, version=2):
"""Converts dt to string like
version 1 = 2014:12:15-00:00:00
version 2 = 2014/12/15 00:00:00
version 3 = 2014/12/15 00:00:00
"""
try:
if isinstance(dt, six.string_types):
if _HAS_PANDAS:
dt = pd.to_datetime(dt)
fmt = DATE_FORMATS[int(version)]
return dt.strftime(fmt)
except (ValueError, TypeError):
logger.error(traceback.format_exc())
logger.warning("conv_datetime returns %s" % dt)
return dt
| 5,348,358 |
def get_description_value(name_of_file):
"""
:param name_of_file: Source file for function.
:return: Description value for particular CVE.
"""
line = name_of_file.readline()
while 'value" :' not in line:
line = name_of_file.readline()
tmp_list = line.split(':')
if len(tmp_list) == 2:
value = tmp_list[1][:]
return value
else:
# When description value contains ":" too.
concatenation = ""
for i in range(1, len(tmp_list)-1):
concatenation = concatenation + tmp_list[i] + ":"
concatenation = concatenation + tmp_list[-1]
return concatenation
| 5,348,359 |
def octaves(p, fs, density=False,
frequencies=NOMINAL_OCTAVE_CENTER_FREQUENCIES,
ref=REFERENCE_PRESSURE):
"""Calculate level per 1/1-octave in frequency domain using the FFT.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param density: Power density instead of power.
:param frequencies: Frequencies.
:param ref: Reference value.
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. note:: Based on power spectrum (FFT)
.. seealso:: :attr:`acoustics.bands.OCTAVE_CENTER_FREQUENCIES`
.. note:: Exact center frequencies are always calculated.
"""
fob = OctaveBand(center=frequencies, fraction=1)
f, p = power_spectrum(p, fs)
fnb = EqualBand(f)
power = integrate_bands(p, fnb, fob)
if density:
power /= (fob.bandwidth/fnb.bandwidth)
level = 10.0*np.log10(power / ref**2.0)
return fob, level
| 5,348,360 |
def action_fs_cluster_resize(
compute_client, network_client, blob_client, config,
storage_cluster_id):
# type: (azure.mgmt.compute.ComputeManagementClient,
# azure.mgmt.network.NetworkManagementClient,
# azure.storage.blob.BlockBlobService, dict, str) -> None
"""Action: Fs Cluster Resize
:param azure.mgmt.compute.ComputeManagementClient compute_client:
compute client
:param azure.mgmt.network.NetworkManagementClient network_client:
network client
:param azure.storage.blob.BlockBlobService blob_client: blob client
:param dict config: configuration dict
:param str storage_cluster_id: storage cluster id
"""
remotefs.resize_storage_cluster(
compute_client, network_client, blob_client, config,
storage_cluster_id, _REMOTEFSPREP_FILE[0], _REMOTEFSADDBRICK_FILE[0],
_ALL_REMOTEFS_FILES)
| 5,348,361 |
def test_online_command(state, config_file: str, url: str) -> None:
"""Eze run scan remotely on a server"""
api_key = os.environ.get("EZE_APIKEY", "")
api_url = os.environ.get("EZE_REMOTE_SCAN_ENDPOINT", "")
data = {"remote-url": url}
try:
req = urllib.request.Request(
api_url,
data=pretty_print_json(data).encode("utf-8"),
headers={"Authorization": api_key},
)
with urllib.request.urlopen(req) as response: # nosec # nosemgrep # using urllib.request.Request
url_response = response.read()
log(url_response)
except HTTPError as err:
error_text = err.read().decode()
raise click.ClickException(f"""Error in request: {error_text}""")
| 5,348,362 |
def test_get_prophet_holidays():
"""Tests get_prophet_holidays"""
year_list = list(range(2014, 2030+2))
holiday_lookup_countries = ["UnitedStates", "UnitedKingdom", "India", "France", "China"]
# Default holidays from get_prophet_holidays
actual_holidays = ProphetTemplate().get_prophet_holidays(
year_list=year_list)
# Ensures all given countries' holidays are captured in the given `year_list`.
# Loops through all country level holidays and confirm they are available in actual_holidays.
# Suppresses the warnings such as "We only support Diwali and Holi holidays from 2010 to 2025"
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for ctry in holiday_lookup_countries:
expected_ctry_holidays = make_holidays_df(
year_list=year_list,
country=ctry)
# sort df and reset index to ensure assert_frame_equal works well. Without it, assert throws an error.
expected_ctry_holidays = expected_ctry_holidays.sort_values(by=["ds"]).reset_index(drop=True)
actual = actual_holidays[["ds", "holiday"]] # All actual holidays
actual_ctry_holidays = actual.merge( # Ensure country-level holidays are a subset of actual holidays
expected_ctry_holidays,
on=["ds", "holiday"],
how="inner",
validate="1:1") # Ensures 1:1 mapping
actual_ctry_holidays = actual_ctry_holidays.sort_values(by=["ds"]).reset_index(drop=True)
assert_frame_equal(expected_ctry_holidays, actual_ctry_holidays)
# there are no duplicates at date and holiday level in the final holidays df
actual_holidays_rows = actual_holidays.shape[0]
unique_date_holiday = actual_holidays["ds"].astype(str)+" "+actual_holidays["holiday"]
unique_date_holiday_combinations = pd.unique(unique_date_holiday).shape[0]
assert unique_date_holiday_combinations == actual_holidays_rows
# Tests custom params
lower_window = -1
upper_window = 4
countries = ["UnitedKingdom", "Australia"]
actual_holidays = ProphetTemplate().get_prophet_holidays(
countries=countries,
year_list=year_list,
lower_window=lower_window,
upper_window=upper_window)
assert "Australia Day (Observed)" in actual_holidays["holiday"].values
assert "Chinese New Year" not in actual_holidays["holiday"].values
# all of the expected columns are available in the output
actual_columns = list(actual_holidays.columns)
expected_columns = ["ds", "holiday", "lower_window", "upper_window"]
assert actual_columns == expected_columns
# lower_window and upper_window are accurately assigned
assert actual_holidays["lower_window"].unique() == lower_window
assert actual_holidays["upper_window"].unique() == upper_window
# no countries
actual_holidays = ProphetTemplate().get_prophet_holidays(
countries=[],
year_list=year_list,
lower_window=lower_window,
upper_window=upper_window)
assert actual_holidays is None
actual_holidays = ProphetTemplate().get_prophet_holidays(
countries=None,
year_list=year_list,
lower_window=lower_window,
upper_window=upper_window)
assert actual_holidays is None
# single country
with pytest.raises(ValueError, match="`countries` should be a list, found Australia"):
ProphetTemplate().get_prophet_holidays(
countries="Australia",
year_list=year_list,
lower_window=lower_window,
upper_window=upper_window)
| 5,348,363 |
def build_embed(**kwargs):
"""Creates a discord embed object."""
return create_embed(**kwargs)
| 5,348,364 |
def main():
""" Main """
args = parse_arguments()
create_inputs_file(args)
| 5,348,365 |
def VAMA(data, period=8, column='close'):
"""
Volume Adjusted Moving Average
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:param int period: period used for indicator calculation
:param str column: column used for indicator calculation (default = "close")
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
"""
return TA.VAMA(data, period, column)
| 5,348,366 |
def link_syscall(oldpath, newpath):
"""
http://linux.die.net/man/2/link
"""
# lock to prevent things from changing while we look this up...
filesystemmetadatalock.acquire(True)
# ... but always release it...
try:
trueoldpath = _get_absolute_path(oldpath)
# is the old path there?
if trueoldpath not in fastinodelookuptable:
raise SyscallError("link_syscall","ENOENT","Old path does not exist.")
oldinode = fastinodelookuptable[trueoldpath]
# is oldpath a directory?
if IS_DIR(filesystemmetadata['inodetable'][oldinode]['mode']):
raise SyscallError("link_syscall","EPERM","Old path is a directory.")
# TODO: I should check permissions...
# okay, the old path info seems fine...
truenewpath = _get_absolute_path(newpath)
# does the newpath exist? It shouldn't
if truenewpath in fastinodelookuptable:
raise SyscallError("link_syscall","EEXIST","newpath already exists.")
# okay, it doesn't exist (great!). Does it's parent exist and is it a
# dir?
truenewparentpath = _get_absolute_parent_path(newpath)
if truenewparentpath not in fastinodelookuptable:
raise SyscallError("link_syscall","ENOENT","New path does not exist.")
newparentinode = fastinodelookuptable[truenewparentpath]
if not IS_DIR(filesystemmetadata['inodetable'][newparentinode]['mode']):
raise SyscallError("link_syscall","ENOTDIR","New path's parent is not a directory.")
# TODO: I should check permissions...
# okay, great!!! We're ready to go! Let's make the file...
newfilename = truenewpath.split('/')[-1]
# first, make the directory entry...
filesystemmetadata['inodetable'][newparentinode]['filename_to_inode_dict'][newfilename] = oldinode
# increment the link count on the dir...
filesystemmetadata['inodetable'][newparentinode]['linkcount'] += 1
# ... and the file itself
filesystemmetadata['inodetable'][oldinode]['linkcount'] += 1
# finally, update the fastinodelookuptable and return success!!!
fastinodelookuptable[truenewpath] = oldinode
return 0
finally:
persist_metadata(METADATAFILENAME)
filesystemmetadatalock.release()
| 5,348,367 |
def doc_vector(text, stop_words, model):
"""
计算文档向量,句子向量求平均
:param text: 需要计算的文档
:param stop_words: 停用词表
:param model: 词向量模型
:return: 文档向量
"""
sen_list = get_sentences(text)
sen_list = [x[1] for x in sen_list]
vector = np.zeros(100, )
length = len(sen_list)
for sentence in sen_list:
sen_vec = sentence_vector(sentence, stop_words, model)
vector += sen_vec
return vector / length
| 5,348,368 |
def get_ether_pkt(src, dst, ethertype=ether.ETH_TYPE_IP):
"""Creates a Ether packet"""
return ethernet.ethernet(src=src, dst=dst, ethertype=ethertype)
| 5,348,369 |
def _resolve_dependency(param: inspect.Parameter, class_obj: Instantiable , app: App):
"""
Try to get the instance of a parameter from a bound custom resolver for the class which needs it.
if not able to do the above, try to get a registered binding for the parameter's Annotation.
if no binding is registered for the Annotation, get the default value of the parameter
if the default value is empty, try to instantiate the param's Annnotation class
"""
class_context = app.get_custom_resolver(class_obj)
custom_resolver = (class_context.get(param.name)
or class_context.get(param.annotation)) if class_context else None
resolved = custom_resolver() if callable(custom_resolver) else custom_resolver
if resolved:
return resolved
default = param.default
binding = _get_binding(param.annotation, app) or default
if binding == inspect._empty:
if param.annotation != inspect._empty and _is_not_primitive(param.annotation):
annotation_params = _get_init_params(param.annotation)
binding = init_class(param.annotation, app, annotation_params)
else:
raise BindingResolutionException(
f'Cannot resolve param {param.name} of class {class_obj}'
)
return binding() if callable(binding) else binding
| 5,348,370 |
def parse_compute_hosts(compute_hosts):
""" Transform a coma-separated list of host names into a list.
:param compute_hosts: A coma-separated list of host names.
:type compute_hosts: str
:return: A list of host names.
:rtype: list(str)
"""
return filter(None, re.split('[^a-zA-Z0-9\-_]+', compute_hosts))
| 5,348,371 |
def get_bbox(mask_frame):
"""
get rectangular bounding box for irregular roi
Args:
mask_frame (np.ndarray): the frame containing the mask
Returns:
bbox (np.ndarray): numpy array containing the indexes of the bounding box
"""
bbox = np.zeros(4)
bbox[0] = np.min(np.where(np.max(mask_frame, axis=0))) # x top left
bbox[1] = np.min(np.where(np.max(mask_frame, axis=1))) # y top left
bbox[2] = np.max(np.where(np.max(mask_frame, axis=0))) - bbox[0] # x size
bbox[3] = np.max(np.where(np.max(mask_frame, axis=1))) - bbox[1] # y size
bbox = np.int64(bbox)
return bbox
| 5,348,372 |
def view_create_log_entry_verbose(request):
"""Create a new BehavioralLogEntry. Return the JSON version of the entry"""
return view_create_log_entry(request, is_verbose=True)
| 5,348,373 |
def read_dictionary():
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list
"""
global dictionary
with open(FILE, 'r') as f:
for line in f:
if len(line.strip()) >= 4:
dictionary[line.strip()] = [line.strip()]
| 5,348,374 |
def webhook_server_factory(free_port):
"""For making a server that can accept Onshape webhooks."""
servers = []
threads = []
def _webhook_server_factory():
""" Create a factory to handle webhook notifications coming in.
:param on_recieved: function callback to handle the json response from the webhook.
:return: HTTPServer: server
"""
class myHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
super().__init__(request, client_address, server)
# Holds function that deals with the request.
self.on_recieved = None
def do_POST(self):
content_length = int(self.headers["Content-Length"])
body = self.rfile.read(content_length)
unquoted_s = body.decode("utf-8")
data = json.loads(unquoted_s)
server._message_q.put(data)
# Always return a 200 response to indicate it was gotten
self.send_response(200)
self.end_headers()
def do_GET(self):
raise NotImplementedError()
class WebhookServer(HTTPServer):
def __init__(
self, server_address, RequestHandlerClass, bind_and_activate=True
):
super().__init__(server_address, RequestHandlerClass, bind_and_activate)
self._tunneled_url = None
# Used to communicate to the main thread that a message has been recieved and needs to be processed.
# The main thread is responsible for popping messages off as they are processed.
self._message_q = queue.Queue()
@property
def url(self):
return f"http://localhost:{self.server_port}"
@property
def tunneled_url(self):
if not self._tunneled_url:
self._tunneled_url = tunnel(self.server_port)
return self._tunneled_url
def wait_for_message(
self, message_predicate=lambda m: True, seconds_to_wait=5
):
""" Block progress until a certain message is received that satisfies the passed message_predicate
:param message_predicate: blocking function that takes the message and returns True
if it is the 'right one'.
:param seconds_to_wait: seconds to wait for the message. This will throw a StopIteration
if the time runs out without a valid message.
"""
start_time = time.time()
poll_time = seconds_to_wait / 100
while True:
try:
message = self._message_q.get(timeout=poll_time)
if message_predicate(message):
return
self._message_q.task_done()
except queue.Empty:
pass
if time.time() - start_time > seconds_to_wait:
raise TimeoutError()
server = WebhookServer(("localhost", free_port), myHandler)
servers.append(server)
thread = Thread(target=server.serve_forever)
thread.start()
threads.append(thread)
return server
yield _webhook_server_factory
for server, thread in zip(servers, threads):
server.shutdown()
thread.join()
| 5,348,375 |
def view(args):
"""Visualize runs in a local web application.
Features include:
\b
- View and filter runs
- Compare runs
- Browse run files
- View run images and other media
- View run output
Guild View does not currently support starting or modifying
runs. For these operations, use the applicable command line
interface. Run 'guild help' for a complete list of commands.
By default Guild View shows all runs. You can filter runs using
the command options described below.
{{ runs_support.runs_arg }}
{{ runs_support.all_filters }}
"""
from . import view_impl
view_impl.main(args)
| 5,348,376 |
def toggle():
"""
Toggle the state of the switch
"""
is_on = get()
if is_on:
xbmc.log("WeMo Light: Turn OFF")
off()
else:
xbmc.log("WeMo Light: Turn ON")
on()
| 5,348,377 |
def test_set_script_coordinates(expected_output, input_atoms_section, positions):
"""Test that the coordinates are set correctly in the CPMD script."""
# Create temporary directory.
# TODO: USE ACTUAL TEMPORARY DIRECTORY.
tmp_dir_path = get_tmp_dir()
# Create input script.
script_file_path = os.path.join(tmp_dir_path, 'script.in')
with open(script_file_path, 'w') as f:
f.write(SCRIPT_NO_ATOMS.format(atoms=input_atoms_section))
if not isinstance(expected_output, str):
with pytest.raises(expected_output):
set_script_coordinates(script_file_path, positions)
else:
set_script_coordinates(script_file_path, positions, precision=10)
# Read the atoms section.
with open(script_file_path, 'r') as f:
script = f.read()
assert script == SCRIPT_NO_ATOMS.format(atoms=expected_output)
| 5,348,378 |
def generator_validator(file_path: Text):
"""
Validates that the generator module exists and has all the required
methods
"""
if not exists(file_path):
raise ArgumentTypeError(f"File {file_path} could not be found")
try:
module = import_file("generator", file_path)
except SyntaxError:
doing.logger.exception("Syntax error in generator")
raise ArgumentTypeError(f"File {file_path} has a syntax error")
except ImportError:
raise ArgumentTypeError(f"File {file_path} cannot be imported")
except Exception:
doing.logger.exception("Unknown error while importing generator")
raise ArgumentTypeError(f"Unknown error")
if not hasattr(module, "get_source") or not callable(module.get_source):
raise ArgumentTypeError(
f"Generator does not expose a get_source(environment_name) method"
)
if not hasattr(module, "allow_transfer") or not callable(module.allow_transfer):
raise ArgumentTypeError(
f"Generator does not expose a allow_transfer(origin, target) method"
)
if not hasattr(module, "get_backup_dir") or not callable(module.get_backup_dir):
raise ArgumentTypeError(
f"Generator does not expose a get_backup_dir(environment) method"
)
if not hasattr(module, "get_patch") or not callable(module.get_backup_dir):
raise ArgumentTypeError(
f"Generator does not expose a get_patch(origin, target) method"
)
if not hasattr(module, "get_wp_config") or not callable(module.get_backup_dir):
raise ArgumentTypeError(
f"Generator does not expose a get_wp_config(environment) method"
)
return module
| 5,348,379 |
def main(**kwargs):
"""
main function
"""
# logger = logging.getLogger(sys._getframe().f_code.co_name)
source = Path(kwargs['source'])
destination = Path(kwargs['destination'])
with open(source, 'r', encoding='utf-8') as f:
j = json.load(f)
del f
items = []
for orig_id, obj_data in j.items():
item = {}
for field_name, field_value in obj_data.items():
if field_name not in ['id', 'slug', 'summary']:
if field_value is not None:
item[field_name] = field_value
item['id'] = obj_data['slug']
item['description'] = obj_data['summary']
items.append(item)
payload = {
'items': []
}
for item in items:
payload['items'].append(
{
kwargs['url_path']: item
}
)
with open(destination, 'w', encoding='utf-8') as f:
json.dump(payload, f, ensure_ascii=False, indent=4, sort_keys=True)
del f
| 5,348,380 |
def from_bytes(
data: bytes, idx_list: Optional[List[int]] = None, compression: Optional[str] = "gz"
) -> List[ProgramGraph]:
"""Deserialize Program Graphs from a byte array.
:param data: The serialized Program Graphs.
:param idx_list: A zero-based list of graph indices to return. If not
provided, all graphs are returned.
:param compression: Either :code:`gz` for GZip compression (the default), or
:code:`None` for no compression. Compression increases the cost of
serializing and deserializing but can greatly reduce the size of the
serialized graphs.
:return: A list of Program Graphs.
:raise GraphCreationError: If deserialization fails.
"""
decompressors = {
"gz": gzip.decompress,
None: lambda d: d,
}
if compression not in decompressors:
decompressors = ", ".join(sorted(str(x) for x in decompressors))
raise TypeError(
f"Invalid compression argument: {compression}. "
f"Supported compressions: {decompressors}"
)
decompress = decompressors[compression]
graph_list = ProgramGraphList()
try:
graph_list.ParseFromString(decompress(data))
except (gzip.BadGzipFile, google.protobuf.message.DecodeError) as e:
raise GraphCreationError(str(e)) from e
if idx_list:
return [graph_list.graph[i] for i in idx_list]
return list(graph_list.graph)
| 5,348,381 |
def runTimer(t):
"""t is timer time in milliseconds"""
blinkrow = 500 #in milliseconds
eachrow = t // 5
for i in range(5):
for _ in range(eachrow//(2*blinkrow)):
display.show(Image(hourglassImages[i+1]))
sleep(blinkrow)
display.show(Image(hourglassImages[i]))
sleep(blinkrow)
if button_b.was_pressed():
return()
display.show(Image.HAPPY)
return()
| 5,348,382 |
def ascMoveFormula(note_distance, finger_distance, n1, n2, f1, f2):
"""This is for situations where direction of notes and fingers are opposite,
because either way, you want to add the distance between the fingers.
"""
# The math.ceil part is so it really hits a value in our moveHash.
# This could be fixed if I put more resolution into the moveHash
total_distance = math.ceil(note_distance + finger_distance);
# This adds a small amount for every additional halfstep over 24. Fairly
# representative of what it should be.
if total_distance > 24:
return MOVE_HASH[24] + (total_distance - 24) / 5;
else:
cost = MOVE_HASH[total_distance];
cost += colorRules(n1, n2, f1, f2, finger_distance)
return cost
| 5,348,383 |
def create_stomp_connection(garden: Garden) -> Connection:
"""Create a stomp connection wrapper for a garden
Constructs a stomp connection wrapper from the garden's stomp connection parameters.
Will ignore subscribe_destination as the router shouldn't be subscribing to
anything.
Args:
garden: The garden specifying
Returns:
The created connection wrapper
"""
connection_params = garden.connection_params.get("stomp", {})
connection_params = deepcopy(connection_params)
connection_params["subscribe_destination"] = None
return Connection(**connection_params)
| 5,348,384 |
def W_n(S, n_vals, L_vals, J_vals):
""" Field-free energy. Includes extra correction terms.
-- atomic units --
"""
neff = n_vals - get_qd(S, n_vals, L_vals, J_vals)
energy = np.array([])
for i, n in enumerate(n_vals):
en = -0.5 * (neff[i]**-2.0 - 3.0 * alpha**2.0 / (4.0 * n**4.0) + \
mu_M**2.0 * ((1.0 + (5.0 / 6.0) * (alpha * Z)**2.0)/ n**2.0))
energy = np.append(energy, en)
return energy
| 5,348,385 |
def page_output(output_string):
"""Pipe string to a pager.
If PAGER is an environment then use that as pager, otherwise
use `less`.
Args:
output_string (str): String to put output.
"""
output_string = unidecode(output_string.decode('utf-8'))
if os.environ.get('__TAUCMDR_DISABLE_PAGER__', False):
print output_string
else:
pager_cmd = os.environ.get('PAGER', 'less -F -R -S -X -K').split(' ')
proc = subprocess.Popen(pager_cmd, stdin=subprocess.PIPE)
proc.communicate(output_string)
| 5,348,386 |
def obter_movimento_manual(tab, peca): # tabuleiro x peca -> tuplo de posicoes
"""
Recebe uma peca e um tabuleiro e um movimento/posicao introduzidos
manualmente, dependendo na fase em que esta o programa.
Na fase de colocacao, recebe uma string com uma posicao.
Na fase de movimentacao, recebe uma string com duas posicoes.
:param tab: tabuleiro
:param peca: peca
:return: tuplo com posicoes
"""
linhas = obter_str_linhas()
colunas = obter_str_colunas()
if len(obter_posicoes_jogador(tab, peca)) < 3: # fase de colocacao
pos = str(input('Turno do jogador. Escolha uma posicao: '))
if len(pos) == 2 and pos[0] in colunas and pos[1] in linhas:
pos = cria_posicao(pos[0], pos[1])
if eh_posicao_livre(tab, pos):
return pos,
if len(obter_posicoes_jogador(tab, peca)) == 3: # fase de movimentacao
pos = str(input('Turno do jogador. Escolha um movimento: '))
if len(pos) == 4 and pos[0] in colunas and pos[1] in linhas \
and pos[2] in colunas and pos[3] in linhas:
pos1 = cria_posicao(pos[0], pos[1])
pos2 = cria_posicao(pos[2], pos[3])
if obter_peca(tab, pos1) == peca:
if eh_posicao_livre(tab, pos2):
if pos2 in obter_posicoes_adjacentes(pos1):
return pos1, pos2
if posicoes_iguais(pos1, pos2) and \
len(obter_pos_adj_livres(tab, pos1)) == 0:
return pos1, pos2
raise ValueError('obter_movimento_manual: escolha invalida')
| 5,348,387 |
def parse_header(req_header, taint_value):
"""
从header头中解析污点的位置
"""
import base64
header_raw = base64.b64decode(req_header).decode('utf-8').split('\n')
for header in header_raw:
_header_list = header.split(':')
_header_name = _header_list[0]
_header_value = ':'.join(_header_list[1:])
if equals(taint_value, _header_value):
return _header_name
| 5,348,388 |
def filter_and_sort_files(
fnames: Union[str, List[str]], return_matches: bool = False
):
"""Find all timestamped data files and sort them by their timestamps"""
if isinstance(fnames, (Path, str)):
fnames = os.listdir(fnames)
# use the timestamps from all valid timestamped
# filenames to sort the files as the first index
# in a tuple
matches = zip(map(fname_re.search, map(str, fnames)), fnames)
tups = [(m.group("t0"), f, m) for m, f in matches if m is not None]
# if return_matches is True, return the match object,
# otherwise just return the raw filename
return_idx = 2 if return_matches else 1
return [t[return_idx] for t in sorted(tups)]
| 5,348,389 |
def fake_batch(obs_space, action_space, batch_size=1):
"""Create a fake SampleBatch compatible with Policy.learn_on_batch."""
samples = {
SampleBatch.CUR_OBS: fake_space_samples(obs_space, batch_size),
SampleBatch.ACTIONS: fake_space_samples(action_space, batch_size),
SampleBatch.REWARDS: np.random.randn(batch_size).astype(np.float32),
SampleBatch.NEXT_OBS: fake_space_samples(obs_space, batch_size),
SampleBatch.DONES: np.random.randn(batch_size) > 0,
}
return SampleBatch(samples)
| 5,348,390 |
def multi_variant_endpoint(sagemaker_session):
"""
Sets up the multi variant endpoint before the integration tests run.
Cleans up the multi variant endpoint after the integration tests run.
"""
multi_variant_endpoint.endpoint_name = unique_name_from_base(
"integ-test-multi-variant-endpoint"
)
with tests.integ.timeout.timeout_and_delete_endpoint_by_name(
endpoint_name=multi_variant_endpoint.endpoint_name,
sagemaker_session=sagemaker_session,
hours=2,
):
# Creating a model
bucket = sagemaker_session.default_bucket()
prefix = "sagemaker/DEMO-VariantTargeting"
model_url = S3Uploader.upload(
local_path=XG_BOOST_MODEL_LOCAL_PATH,
desired_s3_uri="s3://{}/{}".format(bucket, prefix),
sagemaker_session=sagemaker_session,
)
image_uri = image_uris.retrieve(
"xgboost",
sagemaker_session.boto_region_name,
version="0.90-1",
instance_type=DEFAULT_INSTANCE_TYPE,
image_scope="inference",
)
multi_variant_endpoint_model = sagemaker_session.create_model(
name=MODEL_NAME,
role=ROLE,
container_defs={"Image": image_uri, "ModelDataUrl": model_url},
)
# Creating a multi variant endpoint
variant1 = production_variant(
model_name=MODEL_NAME,
instance_type=DEFAULT_INSTANCE_TYPE,
initial_instance_count=DEFAULT_INSTANCE_COUNT,
variant_name=TEST_VARIANT_1,
initial_weight=TEST_VARIANT_1_WEIGHT,
)
variant2 = production_variant(
model_name=MODEL_NAME,
instance_type=DEFAULT_INSTANCE_TYPE,
initial_instance_count=DEFAULT_INSTANCE_COUNT,
variant_name=TEST_VARIANT_2,
initial_weight=TEST_VARIANT_2_WEIGHT,
)
sagemaker_session.endpoint_from_production_variants(
name=multi_variant_endpoint.endpoint_name, production_variants=[variant1, variant2]
)
# Yield to run the integration tests
yield multi_variant_endpoint
# Cleanup resources
sagemaker_session.delete_model(multi_variant_endpoint_model)
sagemaker_session.sagemaker_client.delete_endpoint_config(
EndpointConfigName=multi_variant_endpoint.endpoint_name
)
# Validate resource cleanup
with pytest.raises(Exception) as exception:
sagemaker_session.sagemaker_client.describe_model(
ModelName=multi_variant_endpoint_model.name
)
assert "Could not find model" in str(exception.value)
sagemaker_session.sagemaker_client.describe_endpoint_config(
name=multi_variant_endpoint.endpoint_name
)
assert "Could not find endpoint" in str(exception.value)
| 5,348,391 |
def compute_macro_f1(answer_stats, prefix=''):
"""Computes F1, precision, recall for a list of answer scores.
This computes the *language-wise macro F1*. For minimal answers,
we also compute a partial match score that uses F1, which would be
included in this computation via `answer_stats`.
Args:
answer_stats: List of per-example scores.
prefix (''): Prefix to prepend to score dictionary.
Returns:
Dictionary mapping measurement names to scores.
"""
has_gold, has_pred, f1, _ = list(zip(*answer_stats))
macro_precision = eval_utils.safe_divide(sum(f1), sum(has_pred))
macro_recall = eval_utils.safe_divide(sum(f1), sum(has_gold))
macro_f1 = eval_utils.safe_divide(
2 * macro_precision * macro_recall,
macro_precision + macro_recall)
return collections.OrderedDict({
prefix + 'n': len(answer_stats),
prefix + 'f1': macro_f1,
prefix + 'precision': macro_precision,
prefix + 'recall': macro_recall
})
| 5,348,392 |
def piecewise_linear(x, rng, NUM_PEOPLE):
"""
This function samples the piecewise linear viral_load model
Args:
x ([type]): [description]
rng (np.random.RandomState): random number generator
NUM_PEOPLE (int): [description]
Returns:
np.array: [description]
"""
viral_loads = []
for person in range(NUM_PEOPLE):
plateau_height, plateau_start, plateau_end, recovered = _sample_viral_load_piecewise(rng)
viral_load = []
for time_sample in x:
if time_sample < plateau_start:
cur_viral_load = plateau_height * time_sample / plateau_start
elif time_sample < plateau_end:
cur_viral_load = plateau_height
else:
cur_viral_load = plateau_height - plateau_height * (time_sample - plateau_end) / (recovered - plateau_end)
if cur_viral_load < 0:
cur_viral_load = np.array([0.])
viral_load.append(cur_viral_load)
viral_loads.append(np.array(viral_load, dtype=float).flatten())
viral_loads = np.array(viral_loads)
return viral_loads
| 5,348,393 |
def mol_sim_matrix(fingerprints1,
fingerprints2,
method='cosine',
filename=None,
max_size=1000,
print_progress=True):
"""Create Matrix of all molecular similarities (based on molecular fingerprints).
If filename is not None, the result will be saved as npy.
To create molecular fingerprints see mol_fingerprints() function from MS_functions.
Args:
----
fingerprints1: list
List of molecular fingerprints (numpy arrays).
fingerprints2: list
List of molecular fingerprints (numpy arrays).
method: str
Method to compare molecular fingerprints. Can be 'cosine', 'dice' etc.
(see scipy.spatial.distance.cdist).
filename: str
Filename to save results to. OR: If file already exists it will be
loaded instead.
max_size: int
Maximum size of (sub) all-vs-all matrix to handle in one go. Will split
up larger matrices into
max_size x max_size matrices.
print_progress: bool, optional
If True, print phase of the run to indicate progress. Default = True.
"""
if filename is not None:
try:
molecular_similarities = np.load(filename)
print("Molecular similarity scores found and loaded.")
collect_new_data = False
except FileNotFoundError:
print("Could not find file ", filename)
print("Molecular scores will be calculated from scratch.")
collect_new_data = True
else:
collect_new_data = True
if collect_new_data:
# Create array of all finterprints
fingerprints_arr1 = np.array(fingerprints1)
fingerprints_arr2 = np.array(fingerprints2)
# Calculate all-vs-all similarity matrix (similarity here= 1-distance )
matrix_size = (fingerprints_arr1.shape[0], fingerprints_arr2.shape[0])
molecular_similarities = np.zeros(matrix_size)
# Split large matrices up into smaller ones to track progress
splits = int(np.ceil(matrix_size[0]/max_size) * np.ceil(matrix_size[1]/max_size))
count_splits = 0
for i in range(int(np.ceil(matrix_size[0]/max_size))):
low1 = i * max_size
high1 = min((i + 1) * max_size, matrix_size[0])
for j in range(int(np.ceil(matrix_size[1]/max_size))):
low2 = j * max_size
high2 = min((j + 1) * max_size, matrix_size[1])
molecular_similarities[low1:high1, low2:high2] = 1 - spatial.distance.cdist(
fingerprints_arr1[low1:high1],
fingerprints_arr2[low2:high2],
method
)
# Track progress:
count_splits += 1
if print_progress:
print('\r',
"Calculated submatrix {} out of {}".format(count_splits, splits),
end="")
if print_progress:
print(20 * '--')
print("Succesfully calculated matrix with all-vs-all molecular similarity values.")
if filename is not None:
np.save(filename, molecular_similarities)
print("Matrix was saved under:", filename)
return molecular_similarities
| 5,348,394 |
def test_load(elf, expected):
"""Test ELF files that load values from memory into a register.
"""
elf_filename = os.path.join(elf_dir, elf)
revelation = Revelation()
with open(elf_filename, 'rb') as elf:
revelation.init_state(elf, elf_filename, False, is_test=True)
revelation.states[0x808].mem.write(0x00100004, 4, 0xffffffff)
revelation.run()
expected.check(revelation.states[0x808])
| 5,348,395 |
def energies_over_delta(syst, p, k_x):
"""Same as energy_operator(), but returns the
square-root of the eigenvalues"""
operator = energy_operator(syst, p, k_x)
return np.sqrt(np.linalg.eigvalsh(operator))
| 5,348,396 |
def subscribe_user_to_basket(instance_id, newsletters=[]):
"""Subscribe a user to Basket.
This task subscribes a user to Basket, if not already subscribed
and then updates his data on the Phonebook DataExtension. The task
retries on failure at most BASKET_TASK_MAX_RETRIES times and if it
finally doesn't complete successfully, it emails the
settings.BASKET_MANAGERS with details.
"""
from mozillians.users.models import UserProfile
try:
instance = UserProfile.objects.get(pk=instance_id)
except UserProfile.DoesNotExist:
instance = None
if (not BASKET_ENABLED or not instance or not newsletters
or not waffle.switch_is_active('BASKET_SWITCH_ENABLED')):
return
lookup_subtask = lookup_user_task.subtask((instance.email,))
subscribe_subtask = subscribe_user_task.subtask((instance.email, newsletters,))
chain(lookup_subtask | subscribe_subtask)()
| 5,348,397 |
def browserSelectionRecall():
"""try to re-highlight the previously selected item."""
#14.01.13-15.39: implimented selection recall function to prevent losing selected scan while changing file display options
for i in range(uist.listBrowser.count()):
if uist.listBrowser.item(i).text()==browserSelected:
debug("re-selecting: "+browserSelected)
#uist.listBrowser.setItemSelected(uist.listBrowser.item(i),True)
uist.listBrowser.setCurrentItem(uist.listBrowser.item(i))
| 5,348,398 |
def count_matching(d1: Die, d2: Die, num_rolls: int) -> int:
""" Roll the given dice a number of times and count when they match.
Args:
d1 (Die): One Die object (must not be None)
d2 (Die): Another Die object (must not be None)
num_rolls (int): Positive number of rolls to toss.
Returns:
int number of times both dice showed the same number.
"""
matching = 0
for _ in range(num_rolls):
matching += int(d1.roll() == d2.roll())
return matching
| 5,348,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.