content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_level_print_large_tree(large_tree):
"""Confirm the result of level_order_print of large_tree."""
result = print_level_order(large_tree)
assert result == '12 \n 9 2 11 \n 1 3 99 \n 13 14 \n '
| 5,351,100 |
def pp_date(dt):
"""
Human-readable (i.e. pretty-print) dates, e.g. for spreadsheets:
See http://docs.python.org/tutorial/stdlib.html
e.g. 31-Oct-2011
"""
d = date_to_datetime(dt)
return d.strftime('%d-%b-%Y')
| 5,351,101 |
def write_template_vars(template_path, cb):
"""Passes file contents through a callable and writes the result to the file.
Modifies the template_path inplace with results from passing its content
through the callable: cb.
Args:
template_path: The path to the file to be read, processed and then written.
cb: A callable which takes a string (with the template file content) and
returns the modified string that should be written to the file.
"""
with open(template_path, "r") as in_file:
template_content = in_file.read()
with open(template_path, "w") as out_file:
out_file.write(cb(template_content))
| 5,351,102 |
def optimizer_p(cd, path, i, obs, path_penalty):
"""Optimizer of the current path. Reduce the piece-wise path length in the free space of the environment."""
p_tmp = copy.deepcopy(path)
p_tmp[i].x = p_tmp[i].x + cd[0]
p_tmp[i].y = p_tmp[i].y + cd[1]
r1 = math.sqrt((p_tmp[i-1].x - p_tmp[i].x)**2+(p_tmp[i-1].y - p_tmp[i].y)**2)
r2 = math.sqrt((p_tmp[i+1].x - p_tmp[i].x)**2+(p_tmp[i+1].y - p_tmp[i].y)**2)
penalty1 = 0
penalty2 = 0
if obstacles:
for o in obs:
d1 = check_obst(p_tmp[i-1].x, p_tmp[i-1].y, p_tmp[i].x, p_tmp[i].y, o[0].x, o[0].y)
if d1< o[1]:
penalty1 = max(penalty1,(o[1] - d1)*path_penalty)
d2 = check_obst(p_tmp[i].x, p_tmp[i].y, p_tmp[i+1].x, p_tmp[i+1].y, o[0].x, o[0].y)
if d2 < o[1]:
penalty2 = max(penalty1,(o[1] - d1)*path_penalty)
return r1 + r2 + abs(r1-r2) + penalty1 + penalty2
| 5,351,103 |
def create_message(sender_address, receiver_address , subject, email_content):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
Returns:
An object containing a base64url encoded email object.
"""
message = MIMEText(email_content, 'html')
message['to'] = receiver_address
message['from'] = sender_address
message['subject'] = subject
# return {'raw': base64.urlsafe_b64encode(message.as_string())}
b64_bytes = base64.urlsafe_b64encode(message.as_bytes())
b64_string = b64_bytes.decode()
return {'raw': b64_string}
| 5,351,104 |
def retry_on_error(num_tries=5, retriable_exceptions=(socket.error, socket.gaierror,
httplib.HTTPException)):
"""
Retries on a set of allowable exceptions, mimicking boto's behavior by default.
:param num_tries: number of times to try before giving up.
:return: a generator yielding contextmanagers
Retry the correct number of times and then give up and reraise
>>> i = 0
>>> for attempt in retry_on_error(retriable_exceptions=(RuntimeError,)):
... with attempt:
... i += 1
... raise RuntimeError("foo")
Traceback (most recent call last):
...
RuntimeError: foo
>>> i
5
Give up and reraise on any unexpected exceptions
>>> i = 0
>>> for attempt in retry_on_error(num_tries=5, retriable_exceptions=()):
... with attempt:
... i += 1
... raise RuntimeError("foo")
Traceback (most recent call last):
...
RuntimeError: foo
>>> i
1
Do things only once if they succeed!
>>> i = 0
>>> for attempt in retry_on_error():
... with attempt:
... i += 1
>>> i
1
"""
go = [None]
@contextmanager
def attempt(last=False):
try:
yield
except retriable_exceptions as e:
if last:
raise
else:
log.info("Got a retriable exception %s, trying again" % e.__class__.__name__)
else:
go.pop()
while go:
if num_tries == 1:
yield attempt(last=True)
else:
yield attempt()
# It's safe to do this, even with Python's weird default
# arguments behavior, since we are assigning to num_tries
# rather than mutating it.
num_tries -= 1
| 5,351,105 |
def _write_query_function(fid: TextIO) -> None:
"""
Write the function needed to translate query parameters into a string.
:param fid: target
:return:
"""
fid.write('{-| Translates a list of (name, parameter) and a list of (name, optional parameter) to a\n')
fid.write('well-formatted query string.\n')
fid.write('-}\n')
fid.write('paramsToQuery : List ( String, String ) -> List ( String, Maybe String ) -> String\n')
fid.write('paramsToQuery params maybeParams =\n')
fid.write(INDENT + 'let\n')
fid.write(INDENT * 2 + 'queryParams : List String\n')
fid.write(INDENT * 2 + 'queryParams =\n')
fid.write(INDENT * 3 + 'List.map (\\( name, value ) -> name ++ "=" ++ Http.encodeUri value) params\n\n')
fid.write(INDENT * 2 + 'filteredParams : List String\n')
fid.write(INDENT * 2 + 'filteredParams =\n')
fid.write(INDENT * 3 + 'List.filter (\\( _, maybeValue ) -> maybeValue /= Nothing) maybeParams\n')
fid.write(INDENT * 4 + '|> List.map (\\( name, maybeValue ) -> ( name, Maybe.withDefault "" maybeValue ))\n')
fid.write(INDENT * 4 + '|> List.map (\\( name, value ) -> name ++ "=" ++ Http.encodeUri value)\n')
fid.write(INDENT + 'in\n')
fid.write(INDENT + 'List.concat [queryParams, filteredParams]\n')
fid.write(INDENT * 2 + '|> String.join "&"\n')
fid.write(INDENT * 2 + '|> \\str ->\n')
fid.write(INDENT * 3 + 'if String.isEmpty str then\n')
fid.write(INDENT * 4 + '""\n')
fid.write(INDENT * 3 + 'else\n')
fid.write(INDENT * 4 + '"?" ++ str\n')
| 5,351,106 |
def test_application_audit_creation():
"""Test Application History Model creation"""
instance1 = ApplicationHistory(id=1, application_id=10, application_status="New",
form_url="https://testsample.com/api/form/6100fae7ba5ac0627e9eefe6/submission/6101131fc325d44c1d846c13")
assert instance1.id == 1
assert instance1.form_url == "https://testsample.com/api/form/6100fae7ba5ac0627e9eefe6/submission/6101131fc325d44c1d846c13"
| 5,351,107 |
def handle_view_errors(func):
"""
view error handler wrapper
# TODO - raise user related errors here
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
debug: bool = current_app.config.get('DEBUG')
try:
return func(*args, **kwargs)
except ValueError as e:
message: str = str(e)
if debug:
print(message)
raise InputError(status=error_codes.input_error_code, description=message)
except TypeError as e:
message: str = str(e)
if debug:
print(e)
raise InputError(
status=error_codes.input_error_code, description=message)
except BadRequestError as e:
if debug:
print(e)
message: str = '''Bad Request: while connecting to database'''
raise RequestError(status=error_codes.bad_request_error_code, description=message)
except BadQueryError as e:
if debug:
print(e)
message: str = '''Database Query Error: Error while querying database please inform admin'''
raise DataServiceError(
status=error_codes.data_service_error_code, description=message)
except ConnectionRefusedError as e:
if debug:
print(e)
message: str = '''Connection Refused: Unable to connect to database please try again later'''
raise RequestError(status=error_codes.remote_data_error, description=message)
except RetryError as e:
if debug:
print(e)
message: str = '''Retries Exceeded: Unable to connect to database please try again later
or inform the administrator'''
raise RequestError(status=error_codes.remote_data_error, description=message)
except Aborted as e:
if debug:
print(e)
message: str = '''Abort Error: connection refused by remote server'''
raise RequestError(status=error_codes.remote_data_error, description=message)
return wrapper
| 5,351,108 |
def main(notebook_mode=False,config=None):
"""
USER CONTROLS
"""
# terminal mode
if notebook_mode==False:
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
parser.add_argument('--pos_enc', help="Please give a value for pos_enc")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
# parameters
params = config['params']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.pos_enc is not None:
net_params['pos_enc'] = True if args.pos_enc=='True' else False
if args.pos_enc_dim is not None:
net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# notebook mode
if notebook_mode:
# parameters
params = config['params']
# dataset
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
# device
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
out_dir = config['out_dir']
# GNN model
MODEL_NAME = config['model']
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
# ZINC
net_params['num_atom_type'] = dataset.num_atom_type
net_params['num_bond_type'] = dataset.num_bond_type
#added
net_params['in_dim'] = dataset.train[0][0].ndata['feat'][0].size(0)
net_params['in_dim_edge'] = dataset.train[0][0].edata['feat'][0].size(0)
if MODEL_NAME == 'DiffPool':
# calculate assignment dimension: pool_ratio * largest graph's maximum
# number of nodes in the dataset
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
max_num_node = max(num_nodes)
net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']
if MODEL_NAME == 'RingGNN':
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
| 5,351,109 |
def test_string_dunder():
"""Test that the vetter's string method behaves as expected.
Note: We may choose to improve the string representation at some point
"""
v = DefaultVetter()
v_str = str(v)
# No metrics gets returned as an empty dictionary
assert v_str == "{}", v_str
# A metrics dictionary gets returned as a pprinted string
v.metrics = dict(key="value")
v_str = str(v)
assert v_str == "{'key': 'value'}", v_str
w = ModifiedVetter()
w_str = str(w)
expected = "<test_vetters.ModifiedVetter"
assert w_str.startswith(expected), w_str
| 5,351,110 |
def run_workflow_tasks(
context,
tasks,
config_name
):
"""Run given list of workflow tasks with provided configuration within given
`invoke` context.
:param context: Invoke context.
:type context: Context
:param tasks: List of workflow tasks to run.
:type tasks: list[str]
:param config_name: 'Configuration name, i.e. file name of the
configuration without the `json` extension.',
:type config_name: str
"""
timestamp = time.time()
for t in tasks:
cmd = f'src/tasks/{t}.py {config_name} --timestamp {timestamp}'
print(f'Running: {cmd}')
success = run_py(context, cmd)
if not success:
print('Terminated!')
break
| 5,351,111 |
def get_latest_revision_number(request, package_id):
""" returns the latest revision number for given package """
package = get_object_or_404(Package, id_number=package_id)
return HttpResponse(simplejson.dumps({
'revision_number': package.latest.revision_number}))
| 5,351,112 |
def _make_mesh_tensors(inputs: Mapping[K, np.ndarray]) -> Mapping[K, tf.Tensor]:
"""
Computes tensors that are the Cartesian product of the inputs.
This is around 20x faster than constructing this in Python.
Args:
inputs: A mapping from keys to NumPy arrays.
Returns:
A mapping from keys to a Tensor, which takes on values from the corresponding
input NumPy array. Computing the Tensors should yield NumPy arrays equal to
`{k: itertools.product(inputs.values())[i] for i, k in enumerate(inputs.keys())}`.
"""
# SOMEDAY(adam): messy, this would be much nicer in TF2 API
# SOMEDAY(adam): v.dtype may not always match the dtype expected by the models.
# e.g. `stable_baselines.common.input` always maps `MultiDiscrete` to `int32` even though
# Gym reports it as `int64`. The dtypes match with `Box` though, which is the only thing we
# need so far, so ignoring this (change should possibly be made in Stable Baselines).
phs = {k: tf.placeholder(v.dtype, shape=v.shape) for k, v in inputs.items()}
# Increase dimensions for broadcasting
# So first tensor will be a[:, None, ..., None],
# second tensor b[None, :, None, ..., None], ...,
# final tensor z[None, ..., None, :].
tensors = {}
for i, (k, ph) in enumerate(phs.items()):
t = ph
for j in range(len(phs)):
if i != j:
t = tf.expand_dims(t, axis=j)
tensors[k] = t
target_shape = tuple((len(v) for v in inputs.values()))
tensors = {
k: tf.broadcast_to(t, target_shape + inputs[k].shape[1:]) for k, t in tensors.items()
}
target_len = np.product(target_shape)
tensors = {k: tf.reshape(t, (target_len,) + inputs[k].shape[1:]) for k, t in tensors.items()}
handles = {k: tf.get_session_handle(t) for k, t in tensors.items()}
feed_dict = {ph: inputs[k] for k, ph in phs.items()}
return tf.get_default_session().run(handles, feed_dict=feed_dict)
| 5,351,113 |
def main():
"""
You should write your code to make Karel do its task in
this function. Make sure to delete the 'pass' line before
starting to write your own code. You should also delete this
comment and replace it with a better, more descriptive one.
"""
while front_is_clear():
repair_damage()
if front_is_blocked():
last_repair()
| 5,351,114 |
def make_counters():
"""Creates all of the VariantCounters we want to track."""
def _gt_selector(*gt_types):
return lambda v: variantutils.genotype_type(v) in gt_types
return VariantCounters([
('All', lambda v: True),
('SNPs', variantutils.is_snp),
('Indels', variantutils.is_indel),
('BiAllelic', variantutils.is_biallelic),
('MultiAllelic', variantutils.is_multiallelic),
('HomRef', _gt_selector(variantutils.GenotypeType.hom_ref)),
('Het', _gt_selector(variantutils.GenotypeType.het)),
('HomAlt', _gt_selector(variantutils.GenotypeType.hom_var)),
('NonRef',
_gt_selector(variantutils.GenotypeType.het,
variantutils.GenotypeType.hom_var)),
])
| 5,351,115 |
def curse(
snapshot: PathLike,
output_zip: PathLike,
packmodes=None,
force=False,
mpm_filepath=None,
):
"""
Creates a .zip of the same format as curse/twitch that can be used to do a fresh install
of the pack. This will *not* contain mods, but creates a manifest that list them (same
thing curse does)
Arguments
snapshot -- snapshot file generated by 'mpm snapshot'
output_file -- path to the output file to produce
packmodes -- list of packmodes to include into the created .zip. Defaults to everything
force -- erase output_zip if it already exists
mpm_filepath -- if provided, bundle this pack manager into the .zip, so that it is part of the pack.
The argument must be the path to the "mpm.py" file
"""
# File checks and opening
snapshot = Path(snapshot)
output_zip = Path(output_zip)
if not snapshot.is_file() or not zipfile.is_zipfile(snapshot):
zipfile.BadZipFile("%s is not a zip file", snapshot)
with tempfile.TemporaryDirectory(dir=".") as pack_dir:
pack_dir = Path(pack_dir)
with zipfile.ZipFile(snapshot) as zf:
LOGGER.info("Decompressing snapshot in %s", pack_dir)
zf.extractall(pack_dir)
common.check_snapshot_dir(pack_dir)
# Read manifest
pack_manifest = manifest.pack.read_from(pack_dir)
curse_manifest = manifest.curse.read(pack_dir / "manifest.json")
# Check packmodes
if packmodes:
manifest.pack.check_packmodes(pack_manifest["packmodes"], packmodes)
# Open zip archive
LOGGER.info("Creating output .zip file")
archive = zipfile.ZipFile(
output_zip,
mode="w" if force else "x",
compression=zipfile.ZIP_DEFLATED,
compresslevel=6,
)
# Compute mods
if not packmodes:
LOGGER.info("No 'packmodes' argument, using all packmodes")
selected_mods = pack_manifest["mods"]
else:
selected_mods = manifest.pack.get_selected_mods(pack_manifest, packmodes)
LOGGER.debug(
"Selected mods:\n%s",
common.format_modlist(selected_mods, print_version=True),
)
# Create new manifest
LOGGER.info("Generating new manifest with selected mods")
curse_manifest["files"] = [
{"projectID": mod["addonID"], "fileID": mod["fileID"], "required": True}
for mod in selected_mods
]
curse_manifest["version"] = str(pack_manifest["pack-version"])
curse_manifest["overrides"] = "overrides"
with archive.open("manifest.json", mode="w") as f:
f.write(json.dumps(curse_manifest, indent=4).encode("utf-8"))
# Compute overrides
if not packmodes:
LOGGER.info("No 'packmodes' argument, using all overrides")
selected_overrides = pack_manifest["override-cache"]
else:
selected_overrides = manifest.pack.get_selected_overrides(
pack_manifest, packmodes
)
LOGGER.debug(
"Selected overrides:\n - %s", "\n - ".join(selected_overrides.keys()),
)
# Compress overrides
add_overrides(
archive=archive,
overrides=selected_overrides,
from_=pack_dir / "overrides",
to=PurePath("overrides"),
)
# Includes extra files (e.g. modlist)
LOGGER.info("Adding extra modpack files to .zip archives")
for extra in pack_dir.glob("*"):
if extra.is_file() and extra.name not in (
"manifest.json",
"pack-manifest.json",
):
archive.write(filename=extra, arcname=extra.relative_to(pack_dir))
elif extra.is_dir() and extra.stem != "overrides":
for sub_extra in extra.rglob("*"):
archive.write(
filename=sub_extra, arcname=sub_extra.relative_to(pack_dir)
)
## Include MPM
if mpm_filepath is not None:
LOGGER.info("Adding mpm to .zip archive")
mpm_filepath = Path(mpm_filepath)
arcname_root = PurePath("overrides/mpm")
LOGGER.debug("Adding pack-manifest.json")
new_manifest = manifest.pack.copy(
pack_manifest,
current_packmodes=list(packmodes)
if packmodes
else list(pack_manifest["packmodes"].keys()),
)
with archive.open("overrides/pack-manifest.json", mode="w") as fp:
manifest.pack.dump(new_manifest, fp)
LOGGER.debug("Adding %s", mpm_filepath.name)
archive.write(
filename=mpm_filepath, arcname=arcname_root / mpm_filepath.name
)
LOGGER.debug("Adding %s", "requirements.txt")
archive.write(
filename=mpm_filepath.parent / "requirements.txt",
arcname=arcname_root / "requirements.txt",
)
for source_file in _filelist.MPM_SRC_FILES:
LOGGER.debug("Adding %s", source_file.relative_to(mpm_filepath.parent))
archive.write(
filename=source_file,
arcname=arcname_root / source_file.relative_to(mpm_filepath.parent),
)
archive.close()
LOGGER.debug("Cleaning up temporary dir")
LOGGER.info("Done !")
| 5,351,116 |
def connect_registry_client():
"""
connect the module client for the Registry implementation we're using return the client object
"""
client = adapters.RegistryClient()
client.connect(environment.service_connection_string)
return client
| 5,351,117 |
def get_group_average_score(gid=None, name=None):
"""
Get the average score of teams in a group.
Args:
gid: The group id
name: The group name
Returns:
The total score of the group
"""
group_scores = get_group_scores(gid=gid, name=name)
total_score = sum([entry['score'] for entry in group_scores])
return int(total_score / len(group_scores)) if len(group_scores) > 0 else 0
| 5,351,118 |
def delete_transport_entry(sender, instance, **kwargs):
"""Delete a transport entry."""
tr_models.Transport.objects.filter(
pattern="autoreply.{}".format(instance)).delete()
| 5,351,119 |
def solve_with_cdd_for_II(A, verbose=False):
"""This method finds II's minmax strategy for zero-sum game A"""
m = A.shape[0] # number of rows
n = A.shape[1] # number of columns
A = np.column_stack([[0]*m,-A,[1]*m])
I = np.eye(n)
nn = np.column_stack([[0]*n,I,[0]*n])
# non-negativity constraints
n1 = [-1] * n
n1.insert(0,1)
n1.append(0) # n1 = 1,-1,-1,...,-1,0]
n2 = [1] * n
n2.insert(0,-1)
n2.append(0) # n1 = 1,-1,-1,...,-1,0]
d = np.vstack([A,nn,n1,n2])
mat = cdd.Matrix(d.tolist(), number_type='fraction')
mat.obj_type = cdd.LPObjType.MIN
d = [0] * (n+1)
d.append(1) # [0,0,...0,1]
mat.obj_func = d
lp = cdd.LinProg(mat)
lp.solve()
lp.status == cdd.LPStatusType.OPTIMAL
# lp.primal_solution uses fractions, and has value as last entry, so that
# is dropped
p = [float(val) for val in lp.primal_solution[:-1]]
u = float(lp.obj_value)
if verbose:
print("------ Solved with cdd -------------")
print("Optimal strategy:", p)
print("Optimal payoff:", -u)
print("------------------------------------")
return p, -u
| 5,351,120 |
def build_none() -> KeySetNone:
"""Returns NONE."""
return KeySetNone()
| 5,351,121 |
def load_csv(file, shape=None, normalize=False):
"""
Load CSV file.
:param file: CSV file.
:type file: file like object
:param shape : data array is reshape to this shape.
:type shape: tuple of int
:return: numpy array
"""
value_list = []
for row in csv.reader(file):
value_list.append(map(float, row))
if shape is None:
return numpy.array(value_list)
else:
return numpy.array(value_list).reshape(shape)
| 5,351,122 |
def flip_around_axis(
coords: np.ndarray,
axis: Tuple[float, float, float] = (0.2, 0.2, 0.2)
) -> np.ndarray:
"""Flips coordinates randomly w.r.t. each axis with its associated probability."""
for col in range(3):
if np.random.binomial(1, axis[col]):
coords[:, col] = np.negative(coords[:, col])
return coords
| 5,351,123 |
def get_tc(name):
"""Determine the amount of tile columns to use."""
args = ["ffprobe", "-hide_banner", "-select_streams", "v", "-show_streams", name]
proc = sp.run(args, text=True, stdout=sp.PIPE, stderr=sp.DEVNULL)
lines = proc.stdout.splitlines()
d = {}
for ln in lines[1:-1]:
key, value = ln.strip().split("=")
d[key] = value
width = d["width"]
return math.floor(math.log2(math.ceil(float(width) / 64.0)))
| 5,351,124 |
def get_my_nodes():
"""Get nodes assigned to this host for computation
"""
if not os.path.exists("/etc/cluster-hosts"):
raise Exception("No cluster hosts specified")
#grab list of hosts in cluster, in order
hosts = []
with open("/etc/cluster-hosts", "r") as fp:
for line in fp:
hosts.append(line.strip())
d = Differ()
diffnodes = list(d.get_nodes())
#compute node->host assignments (round-robin)
assigns = dict()
dx = 0
for item in diffnodes:
assigns[item] = hosts[dx % len(hosts)]
dx += 1
myitems = []
fqdn = socket.getfqdn()
for (item, host) in assigns.items():
if host == fqdn:
myitems.append(item)
return myitems
| 5,351,125 |
def register_final_images(folder, gene='Nuclei',
sub_pic_frac=0.2, use_MPI=False,
apply_to_corners=True, apply_warping = False,
region=None, compare_in_seq=False):
"""Register stitched images an in all HDF5 file in the folder
Loops the hybridizations in the HDF5 file, takes the stitched
images as indicated by gene and then compares each image to the
first image.
For the comparison only a small patch of the images is used, the
size of this patch can be controlled with "sub_pic_frac".
Parameters:
-----------
folder: str
The name of the folder containing the pickled file with stitching data,
needs a trailing slash ("/").
gene: str
The gene of which the stitched images are present and should be realigned.
Typically this will be 'Nuclei', because the smFISH genes will not have
enough signal to align the pictures properly. (Default: 'Nuclei')
sub_pic_frac: float
The fraction of the size of the original image that should be used to compare
images. (Default: 0.2)
use_MPI: bool
If True open the files in MPI friendly mode, if False open files in normal
single processing mode. (Default: False)
apply_to_corners: bool
Determines if the found registration will be applied to the tile
corners in the pickled stitching data file. (Default: True)
apply_warping: bool
Determines if the found registration will be applied as a warp to the
final pictures in the hdf5 file, should not be used with large datasets.
(Default: False)
region: list
List of length four containing ints. The region that should be compared to determine
the shift needed for registration. Should be in the order: [y_min, y_max, x_min,
x_max]. When region is defined, sub_pic_frac will not be used.
By default the code will determine the region itself taking a area around the
center of the image with a size determined by sub_pic_frac(Default: None)
compare_in_seq: bool
Determines if we should compare images in sequence or if we should compare
all to the first image.
"""
if not compare_in_seq:
file_name_list, file_1, im_file_1, trans, old_size_list, \
max_trans = \
prepare_for_comparing(folder, gene, compare_in_seq,
use_MPI=use_MPI)
# Compare each file to file 1:
for i in range(1, len(file_name_list)):
cur_trans, max_trans, cur_old_size, file_ind = \
get_single_trans(file_name_list, i, gene, im_file_1,
max_trans, sub_pic_frac=sub_pic_frac,
region=region, use_MPI=use_MPI)
trans[file_ind, :] = cur_trans
old_size_list[file_ind, :] = cur_old_size
# Close the hdf5 file.
file_1.close()
trans, new_size = correct_trans_and_size(trans,
old_size_list,
max_trans,
compare_in_seq)
else:
file_name_list, trans_relative, old_size_list, max_trans = \
prepare_for_comparing(folder, gene, compare_in_seq,
use_MPI=use_MPI)
# Compare each file to previous file:
for i in range(1, len(file_name_list)):
cur_trans, max_trans, cur_old_size, file_ind = \
get_single_relative_trans(file_name_list, i, gene,
max_trans,
sub_pic_frac = sub_pic_frac,
region = region,
use_MPI = use_MPI)
trans_relative[file_ind, :] = cur_trans
old_size_list[file_ind, :] = cur_old_size
trans, new_size = correct_trans_and_size(trans_relative,
old_size_list,
max_trans,
compare_in_seq)
logger.debug(
'Files: {} Translations: {}'
.format(file_name_list, trans))
# Apply the translations
for i in range(len(file_name_list)):
if apply_warping:
if use_MPI:
file_n = h5py.File(file_name_list[i], 'r+',
driver='mpio', comm=MPI.COMM_WORLD)
else:
file_n = h5py.File(file_name_list[i], 'r+')
im_file_n = file_n[gene]['StitchedImage']
transform_final_image(im_file_n, trans[i, :], new_size)
file_n.close()
if apply_to_corners:
data_name = (
os.path.split(file_name_list[i])[1].split(sep='.')[0]
+ '_' + gene
+ '_stitching_data')
transform_data_file(folder, data_name, trans[i, :],
new_size)
| 5,351,126 |
def cmap_hex_color(cmap, i):
"""
Convert a Colormap to hex color.
Parameters
----------
cmap : matplotlib.colors.ListedColormap
Represents the Colormap.
i : int
List color index.
Returns
-------
String
Represents corresponding hex string.
"""
return matplotlib.colors.rgb2hex(cmap(i))
| 5,351,127 |
def train_folds(X, y, fold_count, batch_size, get_model_func):
""" K-Fold Cross-Validation for Keras Models
Inspired by PavelOstyakov
https://github.com/PavelOstyakov/toxic/blob/master/toxic/train_utils.py
"""
fold_size = len(X[0]) // fold_count
models = []
for fold_id in range(0, fold_count):
print('===== FOLD {} ====='.format(fold_id+1))
model = get_model_func()
model.compile()
RocAuc = RocAucEvaluation()
RocAuc.set_model(model)
model.fit(
X, y, validation_split=max(1/fold_count, 0.15),
batch_size=batch_size, epochs=20, shuffle=True,
add_callbacks=[RocAuc], verbose=1
)
models.append(model)
return models
| 5,351,128 |
def reset_password(request):
"""
View to handle password reset
"""
helper.log_entrace(logger,request)
postEmail = request.POST.get('email', '')
try:
import socket
user = User.objects.get(email = postEmail)
import os, random, string
chars = string.ascii_letters + string.digits
random.seed = os.urandom(1024)
new_password = ''.join(random.choice(chars) for i in range(EPA_DEFAULT_PASSWORD_LENGTH))
user.set_password(new_password)
try:
send_mail(EPA_FORGOT_PASSWORD_EMAIL_TITLE, EPA_FORGOT_PASSWORD_EMAIL_BODY_TEMPLATE.replace('%username%', user.username)
.replace('%new_password%', new_password), EPA_FORGOT_PASSWORD_EMAIL_SENDER, [user.email])
user.save()
return HttpResponse(json.dumps([{'status': 'ok'}]))
except socket.error:
return HttpResponseServerError(json.dumps({'messages': ['Fail while try connecting to mail server']}))
except (ObjectDoesNotExist, DatabaseError) as e:
res = {"messages": ['User Not found']}
logger.error('Error in method view_prediction_data: ' + str(e.__class__) + e.message)
return HttpResponseNotFound(json.dumps(res))
| 5,351,129 |
def test_get_only_value(test: test_pair):
"""Test function `get_only_value`."""
original = copy.deepcopy(test.input)
result = get_only_value(test.input)
assert original == test.input
assert result == test.expected
| 5,351,130 |
async def cancel(command: HALCommandType, script: str):
"""Cancels the execution of a script."""
try:
await command.actor.helpers.scripts.cancel(script)
except Exception as err:
command.warning(text=f"Error found while trying to cancel {script}.")
return command.fail(error=err)
return command.finish(f"Script {script} has been scheduled for cancellation.")
| 5,351,131 |
def get_nodes_by_betweenness_centrality(query_id, node_number):
"""Get a list of nodes with the top betweenness-centrality.
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database query identifier
required: true
type: integer
- name: node_number
in: path
description: The number of top between-nodes to return
required: true
type: integer
"""
graph = manager.cu_get_graph_from_query_id_or_404(query_id)
if node_number > graph.number_of_nodes():
node_number = graph.number_of_nodes()
bw_dict = nx.betweenness_centrality(graph)
return jsonify([
node.md5
for node, score in sorted(bw_dict.items(), key=itemgetter(1), reverse=True)[:node_number]
])
| 5,351,132 |
def dump(data, path):
"""
Serialize data dict and write to file given by path where serialization is
given by path's extension of either JSON, MsgPack, or CBOR for extension
.json, .mgpk, or .cbor respectively
"""
if ' ' in path:
raise IOError(f"Invalid file path '{path}' contains space.")
root, ext = os.path.splitext(path)
if ext == '.json':
with ocfn(path, "w+b") as f:
json.dump(data, f, indent=2)
f.flush()
os.fsync(f.fileno())
elif ext == '.mgpk':
with ocfn(path, "w+b") as f:
msgpack.dump(data, f)
f.flush()
os.fsync(f.fileno())
elif ext == '.cbor':
with ocfn(path, "w+b") as f:
cbor.dump(data, f)
f.flush()
os.fsync(f.fileno())
else:
raise IOError(f"Invalid file path ext '{path}' "
f"not '.json', '.mgpk', or 'cbor'.")
| 5,351,133 |
def preprocess(function):
"""
Converts a given function from type str to a Sympy object.
Keyword arguments:
function -- a string type representation of the user's math function
"""
import sympy
expr = function
while True:
if '^' in expr:
expr = expr[:expr.index('^')] + '**' + expr[expr.index('^')+1:]
else:
break
expr = sympy.sympify(expr)
return expr
| 5,351,134 |
def tot_changes(changes: str) -> int:
"""Add deletions and insertions."""
insertions_pat = re.compile(r"(\d+) insertion")
deletions_pat = re.compile(r"(\d+) deletion")
insertions = insertions_pat.search(changes)
insertions = int(insertions.group(1)) if insertions else 0
deletions = deletions_pat.search(changes)
deletions = int(deletions.group(1)) if deletions else 0
return insertions + deletions
| 5,351,135 |
def update_from_mcd(full_table, update_table):
# type: (pd.DataFrame, pd.DataFrame) -> pd.DataFrame
"""
Update the full table (aka the PDG extended-style table) with the
up-to-date information from the PDG .mcd file.
Example
-------
>>> new_table = update_from_mcd('mass_width_2008.fwf', 'mass_width_2021.mcd') # doctest: +SKIP
"""
full_table = full_table.copy()
full_table.update(update_table)
update_table_neg = update_table.copy()
update_table_neg.index = -update_table_neg.index
full_table.update(update_table_neg)
return full_table
| 5,351,136 |
def resolve_hostname(host):
"""Get IP address of hostname or URL."""
try:
parsed = urlparse.urlparse(host)
except AttributeError as err:
error = "Hostname `%s`is unparseable. Error: %s" % (host, err)
LOG.exception(error)
raise errors.SatoriInvalidNetloc(error)
# Domain names are in netloc, IP addresses fall into path
hostname = parsed.netloc or parsed.path
# socket.gaierror is not trapped here
address = socket.gethostbyname(hostname)
return address
| 5,351,137 |
def DeconRNASeq_main(rna_df, sig_df, patient_IDs='ALL', args={}):
"""
This function does the following:
- parses the dictionary 'args' for the arguments to pass on to the DeconRNASeq method.
- eliminates genes from rna_df and sig_df that are not present in both data sets
- Runs DeconRNASeq() for each patient specified in patient_IDs' argument
- Combines the resulting frequencies into a pandas dataframe (num_celltypes x num_patients)
Inputs:
- rna_df: pandas df of rna gene expression data.
Rows are genes (indexed by 'Hugo_Symbol') and columns are patients
- sig_df: pandas df of Signature gene expression values for given cell types.
Rows are genes (indexed by 'Hugo_Symbol') and columns are cell types
- patient_IDs: list of patient IDs to run DeconRNASeq for.
Alternatively, can use the string 'ALL' to run for all patients
- args: dictionary containing any of the following:
- check_sig: boolean, whether or not to check the condition number of the signature matrix
- scaling: string, must be either 'None', 'zscore', or 'minmax'. Determines how to scale the signature matrix and mixture data before solving for optimal x
- scaling_axis: 0 or 1. Whether to scale mixture data and signature matrix by normalizing each column (celltype/patient) separately (scaling_axis=0) or each row (gene) separately (scaling_axis=1).
- formulation: see DeconRNASeq()
- reg_constant: see DeconRNASeq()
- print_result: see DeconRNASeq()
Outputs:
- cell_freqs: pandas df. Contains cell type frequencies for each patient in 'patient_IDs' list.
Rows are indexed by cell type, columns are patient IDs
"""
import pandas as pd
import numpy as np
from .data_utils import keep_common_genes
from .data_utils import df_normalization
# Read in optional arguments, or set them as default
# Assert values are of the right data type when passed to DeconRNASeq() function.
# formulation must be 'qp', 'ridge', or 'lasso'
if 'formulation' in args.keys():
formulation = args['formulation']
if formulation not in ['qp','ridge','lasso']:
raise ValueError("Formulation ({!r}) must be set to 'qp', 'ridge', or 'lasso'".format(formulation))
else:
formulation = 'qp'
# reg_constant must be a double
if 'reg_constant' in args.keys():
reg_constant = args['reg_constant']
else:
reg_constant = 1.0
if 'check_sig' in args.keys():
check_sig = args['check_sig']
if not isinstance(check_sig, bool):
raise ValueError("check_sig ({!r}) must be a boolean variable".format(check_sig))
else:
check_sig = False
if 'scaling' in args.keys():
scaling = args['scaling']
if scaling not in ['None', 'none', 'zscore', 'minmax', 'r-zscore']:
raise ValueError("scaling ({!r}) must be set to 'none', 'zscore' or 'minmax'".format(scaling))
else:
scaling = 'minmax'
if 'scaling_axis' in args.keys():
scaling_axis = args['scaling_axis']
if scaling_axis not in [0, 1]:
raise ValueError("scaling_axis ({!r}) must be 0 or 1".format(scaling_axis))
else:
scaling_axis = 0
if 'print_results' in args.keys():
print_results = args['print_results']
if not isinstance(print_results, bool):
raise ValueError("print_results ({!r}) must be a boolean variable".format(print_results))
else:
print_results = False
# eliminate genes not present in both rna and sig dfs, and ensure they are in the same order:
rna_df, sig_df = keep_common_genes(rna_df, sig_df)
# Scale Data:
if scaling in ['zscore', 'minmax', 'r-zscore']:
# R implementation uses zscore scaling.
sig_df = df_normalization(sig_df, scaling=scaling, axis=scaling_axis)
rna_df = df_normalization(rna_df, scaling=scaling, axis=scaling_axis)
# Convert signature to numpy array
Sig = np.array(sig_df)
# Check the condition number of the signature matrix:
if check_sig:
print("Condition number of signature matrix =", np.linalg.cond(Sig))
# Select a patient / list of patients to solve for their cell type frequencies:
# Patient_ID must be 'ALL' or an array of specific patient IDs.
if patient_IDs == 'ALL':
patient_list = rna_df.columns
elif not isinstance(patient_IDs, type([])):
raise ValueError("patient_IDs should be either 'ALL', or an array of IDs (not a single ID)")
else:
patient_list = patient_IDs
# For each patient, run DeconRNASeq to get cell type frequencies, and save results to pandas df:
print("Running DeconRNASeq...")
cell_freqs_df = pd.DataFrame()
cell_freqs_df['Patient_ID'] = sig_df.columns
cell_freqs_df = cell_freqs_df.set_index(['Patient_ID'])
for patient in patient_list:
if patient in rna_df.columns:
Mix = np.array(rna_df[patient])
cell_freqs_df[patient] = DeconRNASeq(Sig, Mix, formulation=formulation, reg_constant=reg_constant, print_results=print_results, label=patient)
else:
raise ValueError("patient_ID ({!r}) not present in rna dataframe".format(patient))
cell_freqs_df = cell_freqs_df.transpose()
return cell_freqs_df
| 5,351,138 |
def test_datetime_to_isoformat():
"""Test the ``datetime_to_isoformat`` function."""
assert timezone.datetime_to_isoformat(None) is None
assert isinstance(timezone.datetime_to_isoformat(timezone.now()), str)
| 5,351,139 |
def parse_c_interface(c_interface_file):
"""
@brief Parses a c-interface file and generates a dictionary of function names to parameter lists.
Exported functions are expected to be preceded by 'DLL_EXPORT'. Python keywords should not be used as variable
names for the function names in the cpp-interface file. If a Python wrapper function shall return the output buffer,
the corresponding parameter has to be preceded by the _OUT_BUFFER_KEYWORD in the C++ file. In this case, we assume
the parameter is a numpy array. The shape and the dtype will be taken from the first input parameter.
"""
_OUT_BUFFER_KEYWORD = "OUT"
with open(c_interface_file, "r") as f:
# read file and remove comments
content = "\n".join([c.split("//")[0] for c in re.sub("/\*.*?\*/", "", f.read(), flags=re.DOTALL).split("\n")])
function_signatures = [x for x in re.findall("DLL_EXPORT.+?\)", content, flags=re.DOTALL)]
function_dict = OrderedDict()
for sig in function_signatures:
params_regex = re.compile("\(.*?\)", flags=re.DOTALL)
# find function name
wo_params = re.sub(params_regex, "", sig)
tokens = re.split("\s", wo_params)
name = tokens[-1]
function_dict[name] = dict()
# find return type and initialize dict
function_dict[name] = {"restype": " ".join(tokens[1:-1]), "params": [], "out_buffers": []}
# find parameters, remove template specifiers, and split at commas
param_fields = re.sub("<.*?>", "", re.search(params_regex, sig).group(0)[1:-1]).split(",")
out_buffer_indices = [i for i, s in enumerate(param_fields)
if _OUT_BUFFER_KEYWORD in [x.strip() for x in s.split(" ")]]
name_position = -1 # last position in C++ should contain the name of the variable
try:
all_parameters = [re.search("[A-Za-z0-9_]+", x[name_position].strip()).group(0)
for x in (re.split("\s", s) for s in param_fields)]
for i, p in enumerate(all_parameters):
if i in out_buffer_indices:
function_dict[name]["out_buffers"].append(p)
else:
function_dict[name]["params"].append(p)
except AttributeError:
pass
return function_dict
| 5,351,140 |
def con_minimize(fun, bounds, constr=(), x0=None, args=(),
callback=None, options={}, workers=None):
"""Constrained minimization of `fun` using Genetic Algorithm.
This function is a wrapper over modetga.minimize().
The constraints are defined as a tuple of functions
(`fcon1(x, *args)`, `fcon2(x, *args)`, `...`).
The algorithm searches for a solution minimizing
`fun(x, *args)` and satisfying the conditions
(`fcon1(x, *args) >= 0`, `fcon2(x, *args) >= 0`, `...`).
`callback` arguments: `x`, `fx`, `ng`, `*args`.
`fx` is the function value at the generation `ng`.
Returns an optimization result object with the following attributes:
- x - numpy 1D array, optimized parameters,
- message - str, exit message,
- ng - int, number of generations,
- fx - float, final function value.
:param fun: function to be minimized
:param bounds: tuple, parameter bounds
:param constr: tuple, functions defining constraints
:param x0: numpy 1D array, initial parameters
:param args: tuple, positional arguments to be passed to `fun` and to `fcon`
:param callback: function, called after every generation
:param options: dict, GA options
:param workers: int, number of processes to use (will use all CPUs if None)
:return: OptRes, optimization result
"""
# Wrap cost function with constraints
def fun_soft_con(x, *augmented_args):
# Unpack constraints and arguments
fcore = augmented_args[0] # Function to be minimized
fcons = augmented_args[1] # Constraints
user_args = augmented_args[2:] # Arguments
# Evaluate core function
ycore = fcore(x, *user_args)
# Initialize penalty
penalty = 0.
# Update penalty
# (the more negative fcon() is, the higher penalty)
for f in fcons:
ycon = np.max([f(x, *user_args) * -1., 0.])
pscale = ycore / (ycon + 1e-6)
penalty += ycon * pscale
return ycore + penalty
# Run minimization
augmented_args = (fun, constr, *args)
res = minimize(
fun=fun_soft_con,
bounds=bounds,
x0=x0,
args=augmented_args,
callback=callback,
options=options,
workers=workers)
# Extend result with contraint violation info
res.constr = [fcon(res.x, *args) for fcon in constr]
return res
| 5,351,141 |
def unique_v2(lst):
"""
Returns a list of all unique elements in the input list "lst."
This algorithm runs in o(n), as it only passes through the list "lst" twice
"""
dd = defaultdict(int) # avoids blank dictionary problem (KeyError when accessing nonexistent entries)
unique_list = []
for val in lst:
dd[val] += 1
for val in lst:
if dd[val] == 1:
unique_list.append(val)
return unique_list
| 5,351,142 |
def is_ip_network(network, strict=False):
"""Returns True/False if a string is a valid network."""
network = str(network)
try:
ipaddress.ip_network(network, strict)
return True
except ValueError:
return False
| 5,351,143 |
def assign_point_of_contact(point_of_contact):
"""
Assign a user to be the point of contact in emails/letters
:param point_of_contact: A string containing the user_guid if point of contact has been set for a request
:return: A User object to be designated as the point of contact for a request
"""
if point_of_contact:
return Users.query.filter(Users.guid == point_of_contact).one_or_none()
else:
return current_user
| 5,351,144 |
def response_with_pagination(guests, previous, nex, count):
"""
Make a http response for GuestList get requests.
:param count: Pagination Total
:param nex: Next page Url if it exists
:param previous: Previous page Url if it exists
:param guests: Guest
:return: Http Json response
"""
return make_response(jsonify({
'status': 'success',
'previous': previous,
'next': nex,
'count': count,
'guests': guests
})), 200
| 5,351,145 |
def put_object(request, old_pid):
"""MNStorage.update(session, pid, object, newPid, sysmeta) → Identifier."""
if django.conf.settings.REQUIRE_WHITELIST_FOR_UPDATE:
d1_gmn.app.auth.assert_create_update_delete_permission(request)
d1_gmn.app.util.coerce_put_post(request)
d1_gmn.app.views.assert_db.post_has_mime_parts(
request, (("field", "newPid"), ("file", "object"), ("file", "sysmeta"))
)
d1_gmn.app.views.assert_db.is_valid_pid_to_be_updated(old_pid)
sysmeta_pyxb = d1_gmn.app.sysmeta.deserialize(request.FILES["sysmeta"])
new_pid = request.POST["newPid"]
d1_gmn.app.views.assert_sysmeta.matches_url_pid(sysmeta_pyxb, new_pid)
d1_gmn.app.views.assert_sysmeta.obsoletes_matches_pid_if_specified(
sysmeta_pyxb, old_pid
)
sysmeta_pyxb.obsoletes = old_pid
sid = d1_common.xml.get_opt_val(sysmeta_pyxb, "seriesId")
d1_gmn.app.views.assert_sysmeta.is_valid_sid_for_chain(old_pid, sid)
d1_gmn.app.views.create.create_sciobj(request, sysmeta_pyxb)
# The create event for the new object is added in create_sciobj(). The update
# event on the old object is added here.
d1_gmn.app.event_log.log_update_event(
old_pid,
request,
timestamp=d1_common.date_time.normalize_datetime_to_utc(
sysmeta_pyxb.dateUploaded
),
)
d1_gmn.app.sysmeta.update_modified_timestamp(old_pid)
return new_pid
| 5,351,146 |
def _match_gelu_pattern(gf, entry_node):
""" Return the nodes that form the subgraph of a GELU layer
"""
try:
if not len(entry_node.outputs) == 3:
return None
pow_1, add_2, mul_3 = [gf[x] for x in entry_node.outputs]
if not (pow_1.op == 'Pow' and add_2.op == 'Add' and mul_3.op == 'Mul'):
return None
const_4 = gf[pow_1.inputs[1]]
if not (const_4.op == 'Const' and int(round(const_4.value.val)) == 3):
return None
mul_5 = gf[pow_1.outputs[0]]
const_6 = gf[mul_5.inputs[0]]
if not (const_6.op == 'Const' and \
abs(const_6.value.val - 0.0447) < 1e-3):
return None
if not (gf[add_2.inputs[0]] == entry_node and \
gf[add_2.inputs[1]] == mul_5):
return None
mul_7 = gf[add_2.outputs[0]]
const_8 = gf[mul_7.inputs[0]]
if not abs(const_8.value.val - np.sqrt(2 / np.pi)) < 1e-3:
return None
tanh_9 = gf[mul_7.outputs[0]]
add_10 = gf[tanh_9.outputs[0]]
const_11 = gf[add_10.inputs[0]]
if not (tanh_9.op == 'Tanh' and add_10.op == 'Add' and \
const_11.op == 'Const' and int(round(const_11.value.val)) == 1):
return None
mul_12 = gf[add_10.outputs[0]]
const_13 = gf[mul_12.inputs[0]]
if not (mul_12.op == 'Mul' and const_13.op == 'Const' and \
abs(const_13.value.val - 0.5) < 1e-3):
return None
if not (gf[mul_3.inputs[0]] == entry_node and \
gf[mul_3.inputs[1]] == mul_12):
return None
gelu_nodes = [pow_1, add_2, mul_3, const_4, mul_5, const_6, mul_7,
const_8, tanh_9, add_10, const_11, mul_12, const_13]
return gelu_nodes
except:
return None
| 5,351,147 |
def assign_obs_error(param, truth_mag, band, run):
"""
Assign errors to Object catalog quantities
Returns
-------
obs_err : float or np.array
The error values in units defined in get_astrometric_error(), get_photometric_error
err_type : str
Type of observational error
"""
if param in ['ra_offset', 'dec_offset', 'Ixx_sqrt', 'Iyy_sqrt', 'x', 'y_obs',]:
obs_err = get_astrometric_error(truth_mag, band=band)
err_type = 'astrometric'
elif param in ['Ixy', 'IxxPSF', 'IxyPSF', 'IyyPSF',]:
# \delta(x^2) = \delta(x) \times 2x
obs_err = 2.0*param_val*get_astrometric_error(truth_mag, band=band)
err_type = 'astrometric'
elif 'Flux' in param: # flux columns
obs_err = get_photometric_error(truth_mag, band=band, run=run)
err_type = 'photometric'
elif param == 'extendedness':
obs_err = np.zeros_like(param_val)
err_type = 'N/A'
else:
raise NotImplementedError
return obs_err, err_type
| 5,351,148 |
def dir_frequency(dirname: str, amount=50) -> List[Tuple[str, int]]:
"""Pipeline of word_frequency from a directory of raw input file."""
md_list = md.collect_md_text(dirname)
return compute_frequency(tokenize(normalize(" ".join(md_list))), amount)
| 5,351,149 |
def test_solver1(N, version='scalar'):
"""
Very simple test case.
Store the solution at every N time level.
"""
def I(x): return sin(2*x*pi/L)
def f(x,t): return 0
solutions = []
# Need time_level_counter as global variable since
# it is assigned in the action function (that makes
# a variable local to that block otherwise).
# The manager class below provides a cleaner solution.
global time_level_counter
time_level_counter = 0
def action(u, t, x):
global time_level_counter
if time_level_counter % N == 0:
solutions.append(u.copy())
time_level_counter += 1
n = 100; tstop = 6; L = 10
dt, x, cpu = solver(I, f, 1.0, lambda t: 0, lambda t: 0,
L, n, 0, tstop,
user_action=action, version=version)
print 'CPU time:', cpu
print 'Max value in final u:', arrmax(solutions[-1])
| 5,351,150 |
def get_auth_data():
"""
Create auth data.
Returns:
return: access token and token expiring time.
"""
payload = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'client_credentials',
}
api_url = '{0}/oauth/access_token'.format(API_BASE_URL)
response = requests.post(url=api_url, data=payload)
response.raise_for_status()
auth_data = response.json()
return auth_data['access_token'], auth_data['expires_in']
| 5,351,151 |
def datatable(module, tag):
"""Mapping for DataTable."""
if tag == "DataTable":
return module, tag
| 5,351,152 |
def remove_prepending(seq):
"""
Method to remove prepending ASs from AS path.
"""
last_add = None
new_seq = []
for x in seq:
if last_add != x:
last_add = x
new_seq.append(x)
is_loopy = False
if len(set(seq)) != len(new_seq):
is_loopy = True
# raise Exception('Routing Loop: {}'.format(seq))
return new_seq, is_loopy
| 5,351,153 |
def github_handle_error(e):
"""
Handles an error from the Github API
an error example: Error in API call [401] - Unauthorized
{"message": "Bad credentials", "documentation_url": "https://docs.github.com/rest"}
The error might contain error_code, error_reason and error_message
The error_reason and error_message might be the same but usually, the error_reason adds more information that
the error_message doesn't provide
examples:
error_code = 401
error_message = 'Bad credentials'
error_reason = 'Unauthorized'
:param e: the client object
:return: error_code and error_message
"""
try:
error_code = ""
error_message = str(e)
if e.__class__ is DemistoException and e.res is not None:
error_res = e.res
if isinstance(error_res, dict):
error_code = str(error_res.get("status"))
error_message = str(error_res.get("detail"))
else:
error_code = e.res.status_code
if not e.res.ok:
if e.res.json():
error_message = error_res.json().get("message", "")
if not error_message:
error_message = error_res.json().get("detail", "")
error_reason = error_res.reason
if error_reason and error_reason != error_message:
error_message += f' {error_reason}'
return error_code, error_message
except Exception as e:
error_code = ""
error_message = str(e)
return error_code, error_message
| 5,351,154 |
def test_get_df_max_value():
"""
Test Command
------------
$ python run_tests.py --module_name plot_playground.tests.test_data_helper:test_get_df_max_value --skip_jupyter 1
"""
df = pd.DataFrame(data=[{
'a': 100,
'b': 200,
'c': 300,
}, {
'a': 50,
'b': 350,
'c': 80,
}])
max_value = data_helper.get_df_max_value(
df=df, columns=['a', 'c'])
assert_equal(max_value, 300)
| 5,351,155 |
def netmask_to_bits(net_mask):
""" Convert netmask to bits
Args:
net_mask ('str'): Net mask IP address
ex.) net_mask = '255.255.255.255'
Raise:
None
Returns:
Net mask bits
"""
return IPAddress(net_mask).netmask_bits()
| 5,351,156 |
def register_corrector(cls=None, *, name=None):
"""A decorator for registering corrector classes."""
def _register(cls):
if name is None:
local_name = cls.__name__
else:
local_name = name
if local_name in _CORRECTORS:
raise ValueError(f'Already registered model with name: {local_name}')
_CORRECTORS[local_name] = cls
return cls
if cls is None:
return _register
else:
return _register(cls)
| 5,351,157 |
def write_video(stream):
""" Write the entire content of the circular buffer to disk. No need to
lock the stream here as we're definitely not writing to it simultaneously.
"""
logger.info("write_video")
with io.open("before.h264", "wb") as output:
for frame in stream.frames:
if frame.frame_type == picamera.PiVideoFrameType.sps_header:
stream.seek(frame.position)
break
while True:
buf = stream.read1()
if not buf:
break
output.write(buf)
# Wipe the circular stream once we're done
stream.seek(0)
stream.truncate()
| 5,351,158 |
def fetch(word):
"""given a single word, fix plural and singular - returning graph picks"""
pass
| 5,351,159 |
def _sample_perc_from_list(lst, perc=100, algorithm="cum_rand", random_state=None):
"""
Sample randomly a certain percentage of items from the given
list. The original order of the items is kept.
:param lst: list, shape = (n,), input items
:param perc: scalar, percentage to sample
:param algorithm: string, which algorithm should be used
"random": Decide for each item to be chosen or not. This
algorithm runs in linear time O(n), but
the percentages might not match exactly.
"cum_rand": O(n log(n) + perc)
:return: list
"""
if perc >= 100:
return lst
if perc <= 0:
return []
# Store old random state and set random state
rs_old = numpy.random.get_state()
numpy.random.seed(random_state)
if algorithm == "random":
lst_sub = [it for it in lst if numpy.random.uniform(high=100) <= perc]
elif algorithm == "cum_rand":
n = len(lst)
n_perc = numpy.round(n * perc / 100.0)
rank_its = numpy.argsort(numpy.random.uniform(size=n))
lst_sub = []
for idx, it in enumerate(lst):
if rank_its[idx] < n_perc:
lst_sub.append(it)
if len(lst_sub) > n_perc:
break
else:
raise ValueError("Invalid sampling algorithm: %s." % algorithm)
# Restore old random stat
numpy.random.set_state(rs_old)
return lst_sub
| 5,351,160 |
def irods_setacls(path, acl_list, verbose=False):
"""
This function will add the ACLs listed in 'acl_list'
to the collection or data object at 'path'.
'acl_list' is a list where each element itself is
a list consisting of the username in name#zone format,
and the access level ('read', 'write', 'own', or 'null').
Access type 'null' removes all ACLs for that user/group.
Note. On an error return, some of the ACLs might have
been applied. The function does not "roll back" on error.
Returns 0 on success, non-zero on error.
"""
if not path or not acl_list:
return 1
for acl in acl_list:
(rc, output) = shell_command(['ichmod', acl[1], acl[0], path])
if rc:
if verbose:
print("Error running 'ichmod %s %s %s': rc = %d:"
% (acl[1], acl[0], path, rc))
print output[1]
return rc
return 0
| 5,351,161 |
def add_years(date_to_change, years):
"""
Return a date that's `years` years after the date (or datetime)
object `date_to_change`. Return the same calendar date (month and day) in the
destination year, if it exists, otherwise use the following day
(thus changing February 29 to March 1).
Args:
date_to_change (date): The date that we're adding years to.
years ([type]): The number of years to add.
Returns:
[date]: The provided date + one year.
"""
try:
return date_to_change.replace(year=date_to_change.year + years)
except ValueError:
return date_to_change + (
datetime.date(date_to_change.year + years, 1, 1)
- datetime.date(date_to_change.year, 1, 1)
)
| 5,351,162 |
def concurrency_update_done(client, function_name, qualifier):
"""wait fn for ProvisionedConcurrencyConfig 'Status'"""
def _concurrency_update_done():
status = client.get_provisioned_concurrency_config(
FunctionName=function_name, Qualifier=qualifier
)["Status"]
if status == "FAILED":
raise ShortCircuitWaitException(f"Concurrency update failed: {status=}")
else:
return status == "READY"
return _concurrency_update_done
| 5,351,163 |
def test_if_endless_max_rounds_is_valid(db, market_update_form_data):
""" If endless = True, max_rounds can be chosen smaller than current round of the market """
market = MarketFactory(round=8)
market_update_form_data['endless'] = True
market_update_form_data['max_rounds'] = 5
form = MarketUpdateForm(market_update_form_data, instance=market)
assert form.is_valid()
| 5,351,164 |
def googleapis_email(url, params):
"""Loads user data from googleapis service, only email so far as it's
described in http://sites.google.com/site/oauthgoog/Home/emaildisplayscope
Parameters must be passed in queryset and Authorization header as described
on Google OAuth documentation at:
http://groups.google.com/group/oauth/browse_thread/thread/d15add9beb418ebc
and: http://code.google.com/apis/accounts/docs/OAuth2.html#CallingAnAPI
"""
request = Request(url + '?' + params, headers={'Authorization': params})
try:
return simplejson.loads(dsa_urlopen(request).read())['data']
except (ValueError, KeyError, IOError):
return None
| 5,351,165 |
def _compose_query_string(ctx, query_string, **args):
"""
Return the SQL for an ad-hoc named query on the given context.
NOTE: This is a debug ONLY method, do NOT use this in production code.
"""
query = _construct_adhoc_query(ctx, query_string, **args)
wrapped_ctx = _CtxWrapper.wrap(ctx)
assert wrapped_ctx.current_conn != None
return query.sql(wrapped_ctx.current_conn, args, _debugging=True)
| 5,351,166 |
def get_img_full_path(path):
""" Checks if file can be found by path specified in the input. Returns the same as input
if can find, otherwise joins current directory full path with path from input and returns it.
:param path: Relative of full path to the image.
:return: Relative of full path to the image (joined with path to current directory if needed).
"""
if os.path.isfile(path):
return path
else:
directory = os.path.dirname(__file__)
new_path = os.path.join(directory, path)
if os.path.isfile(new_path):
return new_path
else:
raise IOError("File not found: " + path)
| 5,351,167 |
def max_pool_2x2(input_):
""" Perform max pool with 2x2 kelner"""
return tf.nn.max_pool(input_, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
| 5,351,168 |
def remove_numerals(df, remove_mixed_strings=True):
"""Removes rows from an ngram table with words that are numerals. This
does not include 4-digit numbers which are interpreted as years.
Arguments:
df {Pandas dataframe} -- A dataframe of with columns 'word', 'count'.
Keyword Arguments:
remove_mixed_strings {bool} -- Whether to remove rows with words that
are mixtures of numerals and letters. (default: {True})
"""
no_numerals_df = df.copy().reset_index()
for i, row in tqdm(no_numerals_df.iterrows(), desc="Removing numerals\n"):
word = row['word']
if remove_mixed_strings:
if any([c.isnumeric() for c in word]) and \
not is_year(word):
no_numerals_df.drop(i, axis=0, inplace=True)
else:
if word.isnumeric() and len(word) != 4:
no_numerals_df.drop(i, axis=0, inplace=True)
return no_numerals_df
| 5,351,169 |
def test_split_buffer_csi_parameter_no_intermediate():
"""Split based on CSI with parameters bytes but no intermediate bytes. Up to
3 bytes."""
csi_up_to_sgr = range(int('40', 16), int('6d', 16))
csi_above_sgr = range(int('6e', 16), int('7f', 16))
for char_id in itertools.chain(csi_up_to_sgr, csi_above_sgr):
for parameter in range(int('30', 16), int('40', 16)):
for count in range(1, 4):
code = chr(parameter) * count + chr(char_id)
data = 'Hello \x1b[{} World'.format(code)
expected = (('Hello ', '\x1b[' + code), (' World', ''))
assert repr(
chromaterm.__main__.split_buffer(data)) == repr(expected)
| 5,351,170 |
def freq_analysis_plot(sup_data, unsup_data_epoch1, unsup_data_epoch49, data_dict):
"""
:param unsup_data_epoch1: Unsupervised data for 1 epoch(dtype:pandas dataframe)
:param unsup_data_epoch49: Unsupervised data for 1 epoch(dtype:pandas dataframe)
:param sup_data: Supervised data(dtype:pandas dataframe)
:param data_dict: dictionary containing input instructions(dtype:dict)
Plots the frequency analysis plot and save it in data_dict["visualize"]["plot_directory"]
"""
sup = sup_data['POS'].to_dict()
sup_freq = Counter(sup.values())
sup_freq = dict(OrderedDict(sup_freq.most_common()))
unsup1 = unsup_data_epoch1['POS'].to_dict()
unsup1_freq = Counter(unsup1.values())
unsup1_freq = dict(OrderedDict(unsup1_freq.most_common()))
unsup49 = unsup_data_epoch49['POS'].to_dict()
unsup49_freq = Counter(unsup49.values())
unsup49_freq = dict(OrderedDict(unsup49_freq.most_common()))
unsup1_dataframe = pd.DataFrame.from_dict(unsup1_freq, orient='index', columns=['unsup-1'])
unsup49_dataframe = pd.DataFrame.from_dict(unsup49_freq, orient='index', columns=['unsup-49'])
sup_dataframe = pd.DataFrame.from_dict(sup_freq, orient='index', columns=['sup'])
unsup1_pos = list(unsup1_dataframe.index)
unsup1_pos_mass_activation = []
for pos in unsup1_pos:
temp = unsup_data_epoch1[unsup_data_epoch1['POS']==pos]
unsup1_pos_mass_activation.append(temp['max_activations'].sum())
sup_pos = list(sup_dataframe.index)
sup_pos_mass_activation = []
for pos in sup_pos:
temp = sup_data[sup_data['POS']==pos]
sup_pos_mass_activation.append(temp['max_activations'].sum())
unsup49_pos = list(unsup49_dataframe.index)
unsup49_pos_mass_activation = []
for pos in unsup49_pos:
temp = unsup_data_epoch49[unsup_data_epoch49['POS']==pos]
unsup49_pos_mass_activation.append(temp['max_activations'].sum())
unsup1_dataframe['unsup1-mass_activation'] = unsup1_pos_mass_activation
unsup49_dataframe['unsup49-mass_activation'] = unsup49_pos_mass_activation
sup_dataframe['sup-mass_activation'] = sup_pos_mass_activation
df = unsup1_dataframe.join(sup_dataframe)
df_ = df.join(unsup49_dataframe)
df_.sort_values(['unsup-1'],inplace=True,ascending=False)
df_['unsup corpus POS freq. %'] = df_['unsup-1'].apply(lambda x:x/df_['unsup-1'].sum())
df_['unsup epoch 1 act. mass %'] = df_['unsup1-mass_activation'].apply(lambda x:x/df_['unsup1-mass_activation'].sum())
df_['unsup epoch 49 act. mass %'] = df_['unsup49-mass_activation'].apply(lambda x:x/df_['unsup49-mass_activation'].sum())
plot_dict = df_[['unsup corpus POS freq. %','unsup epoch 1 act. mass %','unsup epoch 49 act. mass %']].to_dict()
fig = go.Figure()
fig.add_trace(go.Bar(x= list(plot_dict['unsup corpus POS freq. %'].keys()) ,
y= list(plot_dict['unsup corpus POS freq. %'].values()),
name="unsup POS freq. %", marker_color='black'))
fig.add_trace(go.Bar(x= list(plot_dict['unsup epoch 1 act. mass %'].keys()) ,
y= list(plot_dict['unsup epoch 1 act. mass %'].values()),
name="unsup epoch 1 act. mass %", marker_color='gray'))
fig.add_trace(go.Bar(x= list(plot_dict['unsup epoch 49 act. mass %'].keys()) ,
y= list(plot_dict['unsup epoch 49 act. mass %'].values()),
name="unsup epoch 49 act. mass %", marker_color=data_dict['visualize']['viz_colors']['unsup_epoch_49']))
fig.update_layout(barmode='relative',
title_text='% POS activations vs. % POS frequencies',
xaxis_title="POS tags",
yaxis_title="POS %",
)
# fig.write_image(os.path.join(data_dict["visualize"]["plot_directory"], "mass_activation_plot.pdf"))
plotly.offline.plot(fig, filename = os.path.join(data_dict["visualize"]["plot_directory"], "freq_activation_plot.pdf"),
auto_open=False)
fig.show()
| 5,351,171 |
def generate_json(args, df, num_changes, start_dt, finish_dt,
projects, projects_map,
not_found_proj, group=None, groups=[]):
"""
Returns json report from a dataframe for a specific project
"""
log.debug('Generating %s report for %s', args.report_format, group)
log.debug(projects)
if group:
# we want to report on the projects that are common to projects and df
projects_to_report = list(set(projects).intersection(df))
else:
projects_to_report = projects
frames = list()
for project in projects_to_report:
log.debug('%s df:\n%s', project, df[project])
frames.append(df[project])
# TODO wrap this in proper html or a template
if len(frames) <= 0:
return 'No projects in this group'
df_plot = generate_plot(args, df, frames, start_dt)
return df_plot.to_json(orient='table')
| 5,351,172 |
def b32_ntop(*args):
"""LDNS buffer."""
return _ldns.b32_ntop(*args)
| 5,351,173 |
def get_logger(filename, logger_name=None):
"""set logging file and format
Args:
filename: str, full path of the logger file to write
logger_name: str, the logger name, e.g., 'master_logger', 'local_logger'
Return:
logger: python logger
"""
log_format = "%(asctime)s %(message)s"
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt="%m%d %I:%M:%S %p")
# different name is needed when creating multiple logger in one process
logger = logging.getLogger(logger_name)
fh = logging.FileHandler(os.path.join(filename))
fh.setFormatter(logging.Formatter(log_format))
logger.addHandler(fh)
return logger
| 5,351,174 |
def infer_feature_extraction_pytorch(
model: PreTrainedModel, run_on_cuda: bool
) -> Callable[[Dict[str, torch.Tensor]], torch.Tensor]:
"""
Perform Pytorch inference for feature extraction task
:param model: Pytorch model (sentence-transformers)
:param run_on_cuda: True if should be ran on GPU
:return: a function to perform inference
"""
def infer(inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
model_output = model(**inputs).detach() # noqa: F821
if run_on_cuda:
torch.cuda.synchronize()
return model_output
return infer
| 5,351,175 |
def editRole(userSource, oldName, newName):
"""Renames a role in the specified user source.
When altering the Gateway System User Source, the Allow User Admin
setting must be enabled.
Args:
userSource (str): The user source in which the role is found.
Blank will use the default user source.
oldName (str): The role to edit. Role must not be blank and must
exist.
newName (str): The new name for the role. Must not be blank.
Returns:
UIResponse: An object with lists of warnings, errors, and info
about the success or failure of the edit.
"""
print(userSource, oldName, newName)
return UIResponse(Locale.ENGLISH)
| 5,351,176 |
def flatsSingle(processes=6):
"""
Generates normalised flats at several wavelengths. Use all input files.
"""
#search for the right files
files = findFiles()
#generate flats using multiprocessing
pool = Pool(processes=processes)
pool.map(generateFlatsSingle, [(key, files[key]) for key in files.keys()])
| 5,351,177 |
def compute_bspline_dot_product_derivatives(basis_features, basis_dimension):
"""
Compute dot products of B-splines and their derivatives.
Input:
- basis_features: dict
Contain information on the basis for each state
- basis_dimension: dict
Give the number of basis functions for each state
Outputs:
- dot_product_12: ndarray
Array containing the dot products of Legendre polynomials
with their derivatives
- dot_product_22: ndarray
Array containing the dot products of Legendre polynomials
derivatives
"""
# Compute the dimension of the problem
dimension = np.sum([basis_dimension[elt] for elt in basis_dimension])
# Get the knots
t = basis_features['knots']
# FIXME: Consider small parameter to avoid vanishing of the last B-spline
# at 1
eps = 1e-16
dot_product_12 = np.zeros([dimension, dimension])
dot_product_22 = np.zeros([dimension, dimension])
i, j = 0, 0
# Loop over states
for state1 in basis_dimension:
# Get degree of the B-splines of state1
k1 = basis_features[state1]
# Add external knots depending on the degree
t1 = np.r_[(0,)*(k1+1), t, (1,)*(k1+1)]
for state2 in basis_dimension:
# Get degree of the B-splines of state2
k2 = basis_features[state2]
# Add external knots depending on the degree
t2 = np.r_[(0,)*(k2+1), t, (1,)*(k2+1)]
for m in range(basis_dimension[state1]):
# Define m-th B-spline of the state1 basis
spl_m = BSpline.basis_element(t1[m:m+k1+2])
# Reproduce the same spline for differenciation because of
# differenciation problems with BSpline.basis_element()
# FIXME: simplify if possible
# Construct knots by first finding the internal knots and then
# by adding the right numbers of external knots
t1m = t1[m:m+k1+2]
ind_min1 = np.max(np.argwhere(t1m == t1[m]))
ind_max1 = np.min(np.argwhere(t1m == t1[m+k1+1]))
t_m = np.r_[(t1m[ind_min1],)*k1,
t1m[ind_min1:ind_max1+1],
(t1m[ind_max1],)*k1]
x_m = np.linspace(t1m[0], t1m[-1]-eps, 50)
spl_m = make_lsq_spline(x_m, spl_m(x_m), t_m, k1)
# Compute derivative
spl_m_deriv = spl_m.derivative(nu=1)
for n in range(basis_dimension[state2]):
# Define n-th B-spline of the state2 basis
spl_n = BSpline.basis_element(t2[n:n+k2+2])
# FIXME: simplify if possible
# Construct knots by first finding the internal knots and
# then by adding the right numbers of external knots
t2n = t2[n:n+k2+2]
ind_min2 = np.max(np.argwhere(t2n == t2[n]))
ind_max2 = np.min(np.argwhere(t2n == t2[n+k2+1]))
t_n = np.r_[(t2n[ind_min2],)*k2,
t2n[ind_min2:ind_max2+1],
(t2n[ind_max2],)*k2]
x_n = np.linspace(t2n[0], t2n[-1]-eps, 50)
spl_n = make_lsq_spline(x_n, spl_n(x_n), t_n, k2)
# Compute derivative
spl_n_deriv = spl_n.derivative(nu=1)
max_t = max(t1[m], t2[n])
min_t = min(t1[m+k1+1], t2[n+k2+1])
# If intersection of supports then do computations
if max_t < min_t:
# Numerical integration
quad_int_12 = quad(lambda x:
spl_m(x) * spl_n_deriv(x),
max_t, min_t)
quad_int_22 = quad(lambda x:
spl_m_deriv(x) * spl_n_deriv(x),
max_t, min_t)
dot_product_12[i + m, j + n] += quad_int_12[0]
dot_product_22[i + m, j + n] += quad_int_22[0]
j += basis_dimension[state2]
j = 0
i += basis_dimension[state1]
return dot_product_12, dot_product_22
| 5,351,178 |
def create(the_model, extend_lengths, Emod=1.0, nu=0.3, thickness=1.e-9):
"""Create a dummy region by extending the rail at each side. Assign
it a membrane section with parameters, thickness 0.01, Emod, and nu.
.. note:: Requires that the meshed part,
the_model.parts[names.rail_part] contains a surface named
`names.rail_contact_surf`
:param the_model: The model containing the rail part
:type rail_part: Model object (Abaqus)
:param extend_lengths: The (absolute) distance with which the rail will be extended in each end
`[z=0, z=L]`. If any is None, the full contact surface will be extended.
:type extend_lengths: list[ float ], len=2
:param Emod: Dummy stiffness - elastic modulus of shadow membrane
:type Emod: float
:param nu: Dummy Poisson's ratio of shadow membrane
:type nu: float
:param thickness: Thickness of shadow membrane
:type thickness: float
:returns: None
:rtype: None
"""
rail_part = the_model.parts[names.rail_part]
# Create shadow section
the_model.Material(name='RailDummyElastic')
the_model.materials['RailDummyElastic'].Elastic(table=((Emod, nu), ))
the_model.MembraneSection(name=names.rail_shadow_sect, material='RailDummyElastic',
thickness=thickness)
contact_surface = rail_part.surfaces[names.rail_contact_surf]
contact_nodes = contact_surface.nodes
cs_bounding_box = contact_nodes.getBoundingBox()
rail_length = cs_bounding_box['high'][2] - cs_bounding_box['low'][2]
create_mesh(rail_part, contact_surface, z_shift=rail_length,
shadow_size=extend_lengths[0], set_name=names.rail_shadow_sets[0])
create_mesh(rail_part, contact_surface, z_shift=-rail_length,
shadow_size=extend_lengths[1], set_name=names.rail_shadow_sets[1])
add_membrane_elements(rail_part, contact_surface, set_name=names.rail_shadow_sets[2])
shadow_region = rail_part.SetByBoolean(name=names.rail_shadow_set,
sets=tuple([rail_part.sets[name]
for name in names.rail_shadow_sets]))
# Set element type to membrane elements
# Determine the element order by checking how many nodes in each
# element
num_element_nodes = []
for elem in shadow_region.elements:
num_el_nodes = len(elem.connectivity)
if num_el_nodes not in num_element_nodes:
num_element_nodes.append(num_el_nodes)
if (all([n in num_element_nodes for n in [3, 6]])
or all([n in num_element_nodes for n in [4, 6]])):
raise NotImplementedError('Mixed linear and quadratic elements '
+ 'in contact region not implemented')
elif any([n in num_element_nodes for n in [3, 4]]):
et_TRI = mesh.ElemType(elemCode=M3D3, elemLibrary=STANDARD, secondOrderAccuracy=OFF)
et_QUAD = mesh.ElemType(elemCode=M3D4, elemLibrary=STANDARD, secondOrderAccuracy=OFF)
else:
et_TRI = mesh.ElemType(elemCode=M3D6, elemLibrary=STANDARD, secondOrderAccuracy=ON)
et_QUAD = mesh.ElemType(elemCode=M3D8, elemLibrary=STANDARD, secondOrderAccuracy=ON)
membrane_elem_types = (et_TRI, et_QUAD)
rail_part.setElementType(regions=(shadow_region.elements, ), elemTypes=membrane_elem_types)
rail_part.SectionAssignment(region=shadow_region, sectionName=names.rail_shadow_sect)
# Create the rail contact node set
rail_part.Set(name=names.rail_contact_nodes, nodes=contact_nodes)
# Create a surface with all shadow elements and for the full contact
# surface
# Via testing side1Elements seem to be correct. Might be possible to
# verify this by checking for element face normals, should do so if
# it becomes a problem.
contact_surf_elems = {'side1Elements': shadow_region.elements}
#for face in contact_surface.faces:
# contact_surf_elems = mt.get_elem_by_face_type(face, elems=contact_surf_elems)
rail_part.Surface(name=names.rail_full_contact_surf, **contact_surf_elems)
# Cannot use the following boolean operation because that resulted
# in illegal double definition of the full contact surface in the
# input file:
#shadow_surface = rail_part.Surface(name=names.rail_shadow_surf,
# side1Elements=shadow_region.elements)
#rail_part.SurfaceByBoolean(name=names.rail_full_contact_surf,
# surfaces=(contact_surface, shadow_surface))
| 5,351,179 |
def test_regex_ldr_mtd2():
"""TEST REGEX_LDR_MTD2."""
print(REGEX_LDR_MTD2.match("LDR R3, [R4,R3]"), "Should pass mtd2")
| 5,351,180 |
def buffer_to_file(filename, data):
"""
Expects two strings: filename and data which will be written to the file
"""
file = open(filename, "a")
file.write(data)
file.close()
| 5,351,181 |
def validate_table(config, table):
"""Run VALVE validation on a table.
:param config: valve config dictionary
:param table: path to table
:return: list of errors
"""
errors = []
table_name = os.path.splitext(os.path.basename(table))[0]
table_details = config["table_details"]
fields = config["table_fields"].get(table, {})
fields.update(config["table_fields"].get("*", {}))
rules = None
if "table_rules" in config:
rules = config["table_rules"].get(table, {})
rules.update(config.get("*", {}))
row_idx = 0
for row in table_details[table_name]["rows"]:
col_idx = 1
for field, value in row.items():
if not value:
value = ""
# Check for field type
if field in fields:
# Get the expected field type
# This will be validated based on the given datatypes
parsed_type = fields[field]["parsed"]
error_message = fields[field]["message"]
# all values in this field must match the type
messages = validate_condition(
config, parsed_type, table_name, field, row_idx, value, message=error_message
)
if messages:
field_id = fields[field]["field ID"]
for m in messages:
m.update({
"rule ID": "field:" + str(field_id),
"rule": fields[field]["column"],
"level": "ERROR",
})
errors.append(m)
# Check for rules
if rules and field in rules:
# Check if the value meets any of the conditions
for rule in rules[field]:
when_condition = rule["when_condition"]
# Run meets_condition without logging
# as the then-cond check is only run if the value matches the type
messages = validate_condition(
config, when_condition, table_name, field, row_idx, value
)
if not messages:
# The "when" value meets the condition - validate the "then" value
then_column = rule["column"]
# Retrieve the "then" value to check if it meets the "then condition"
then_value = row[then_column]
if not then_value:
then_value = ""
messages = validate_condition(
config,
rule["then_condition"],
table_name,
then_column,
row_idx,
then_value,
message=rule["message"],
)
if messages:
for m in messages:
if rule["message"]:
msg = m["message"]
else:
msg = (
f"because '{value}' is '{parsed_to_str(config, when_condition)}', "
+ m["message"]
)
m.update(
{
"rule ID": "rule:" + str(rule["rule ID"]),
"rule": then_column,
"level": rule["level"],
"message": msg,
}
)
errors.append(m)
col_idx += 1
row_idx += 1
return errors
| 5,351,182 |
def beam_hardening_correction(mat, q, n, opt=True):
"""
Correct the grayscale values of a normalized image using a non-linear
function.
Parameters
----------
mat : array_like
Normalized projection image or sinogram image.
q : float
Positive number. Recommended range [0.005, 50].
n : float
Positive number. Must larger than 1.
opt : bool
True: Curve towards 0.0.
False: Curve towards 1.0.
Returns
-------
array_like
Corrected image.
"""
if np.max(mat) >= 2.0:
raise ValueError("!!! Input image must be normalized, i.e. gray-scales "
"are in the range of [0.0, 1.0]) !!!")
if n < 2.0:
raise ValueError("!!! n must be larger than or equal to 2 !!!")
return np.asarray([non_linear_function(x, q, n, opt) for x in mat])
| 5,351,183 |
def cargar_recursos_vectores_transpuestos():
"""
Se carga la informacion para poder calcular los vectores transpuestos
"""
# Se crea el df
filename = 'csv/' + conf.data['env']['path'] + '/vectores_transpuestos.csv'
recursos_v_transpuestos = pd.read_csv(filename)
# Se cambia el nombre de los index al nombre de los vectores transpuestos
# Se crea un MultiIndex para evitar agregar una columna a los vectores transpuestos
# queda definida por tuples = [('Reg.1','H'), ('Reg.2', 'F'),('Reg.3', 'G'),('Reg.4', 'H'),...,('Reg.10', 'A')]
tuples = [('Reg.' + str(i), vt) for i, vt in zip(recursos_v_transpuestos.pop('num_de_region'), recursos_v_transpuestos.pop('chr_vector_t'))]
recursos_v_transpuestos.index = pd.MultiIndex.from_tuples(tuples)
return recursos_v_transpuestos
| 5,351,184 |
def eric_authors_and_editors():
"""Check «Автор(ы), редакторы и рецензенты (если есть) материалов источника(ов):»."""
yield from eric_head(
'Автор(ы), редакторы и рецензенты (если есть) материалов источника(ов):')
| 5,351,185 |
def read_pdb(file_name, exclude=('SOL',), ignh=False, modelidx=1):
"""
Parse a PDB file to create a molecule.
Parameters
----------
filename: str
The file to read.
exclude: collections.abc.Container[str]
Atoms that have one of these residue names will not be included.
ignh: bool
Whether hydrogen atoms should be ignored.
model: int
If the PDB file contains multiple models, which one to select.
Returns
-------
list[vermouth.molecule.Molecule]
The parsed molecules. Will only contain edges if the PDB file has
CONECT records. Either way, the molecules might be disconnected. Entries
separated by TER, ENDMDL, and END records will result in separate
molecules.
"""
parser = PDBParser(exclude, ignh, modelidx)
with open(str(file_name)) as file_handle:
mols = list(parser.parse(file_handle))
LOGGER.info('Read {} molecules from PDB file {}', len(mols), file_name)
return mols
| 5,351,186 |
def sao_isomorficas(texto1: str, texto2: str) -> bool:
"""
>>> sao_isomorficas('egg', 'add')
True
>>> sao_isomorficas('foo', 'bar')
False
>>> sao_isomorficas('eggs', 'add')
False
"""
# Algoritmo O(n) em tempo e memória
letras_encontradas = {}
if len(texto1) != len(texto2):
return False
for caractere_1, caractere_2 in zip(texto1, texto2):
try:
letra = letras_encontradas[caractere_1]
except KeyError:
letras_encontradas[caractere_1] = caractere_2
else:
if letra is not caractere_2:
return False
return True
| 5,351,187 |
def _is_no_args(fn):
"""Check if function has no arguments.
"""
return getargspec(fn).args == []
| 5,351,188 |
def failure(parsed_args):
"""
:param :py:class:`argparse.Namespace` parsed_args:
:return: Nowcast system message type
:rtype: str
"""
logger.critical(
f"{parsed_args.model_config} {parsed_args.run_type} FVCOM VH-FR run for "
f'{parsed_args.run_date.format("YYYY-MM-DD")} '
f"on {parsed_args.host_name} failed"
)
msg_type = f"failure {parsed_args.model_config} {parsed_args.run_type}"
return msg_type
| 5,351,189 |
def headers_url_generator(resp, fuzzable_req):
"""
Yields tuples containing:
* Newly found URL
* The FuzzableRequest instance passed as parameter
* The HTTPResponse generated by the FuzzableRequest
* Boolean indicating if we trust this reference or not
The newly found URLs are extracted from the http response headers such
as "Location".
:param resp: HTTP response object
:param fuzzable_req: The HTTP request that generated the response
"""
resp_headers = resp.get_headers()
for parser, header_names in URL_HEADERS.iteritems():
for header_name in header_names:
header_value, _ = resp_headers.iget(header_name, None)
if header_value is not None:
header_value = smart_unicode(header_value,
encoding=resp.charset)
for ref in parser(resp, header_name, header_value):
yield ref, fuzzable_req, resp, False
| 5,351,190 |
def good_AP_finder(time,voltage):
"""
This function takes the following input:
time - vector where each element is a time in seconds
voltage - vector where each element is a voltage at a different time
We are assuming that the two vectors are in correspondance (meaning
that at a given index, the time in one corresponds to the voltage in
the other). The vectors must be the same size or the code
won't run
This function returns the following output:
APTimes - all the times where a spike (action potential) was detected
"""
APTimes = []
#Let's make sure the input looks at least reasonable
if (len(voltage) != len(time)):
print "Can't run - the vectors aren't the same length!"
return APTimes
##Your Code Here!
treshold = 0.5 * np.max(voltage)
times_of_APs = time[voltage > treshold]
APTimes =times_of_APs[np.diff(times_of_APs) > 0.0015]
return APTimes
| 5,351,191 |
def parse(excel_sheets: Dict[Any, pd.DataFrame],
dictionary: Dict[str, Any],
verbose: bool = False) -> pd.DataFrame:
"""Parse sheets of an excel file according to instructions in `dictionary`.
"""
redux_dict = recursive_traverse(dictionary)
column_tuples = redux_dict.keys()
tuple_lengths = [len(tuple) for tuple in column_tuples]
if len(set(tuple_lengths)) > 1:
raise ValueError("Depth of provided JSON file is inconsistent. All "
"entries must be located at the same depth.")
multi_index = pd.MultiIndex.from_tuples(tuples=column_tuples)
data_frame = pd.DataFrame(columns=multi_index)
if verbose:
sheets = tqdm(
excel_sheets.items(),
desc="Looping through sheets",
ncols=100
)
else:
sheets = excel_sheets.items()
for sheet_name, sheet in sheets:
new_row = {}
for column, instr in redux_dict.items():
try:
raw = sheet.iloc[instr["row"], instr["col"]].values
except AttributeError:
raw = sheet.iloc[instr["row"], instr["col"]]
except ValueError:
raw = None
try:
func = map_with_dict(instr["choices"])
except KeyError:
func = FUNC_DICT[instr["func"]]
try:
new_row[column] = func(raw)
except:
new_row[column] = None
data_frame = data_frame.append(new_row, ignore_index=True)
return data_frame
| 5,351,192 |
def build_LAMP(prob,T,shrink,untied):
"""
Builds a LAMP network to infer x from prob.y_ = matmul(prob.A,x) + AWGN
return a list of layer info (name,xhat_,newvars)
name : description, e.g. 'LISTA T=1'
xhat_ : that which approximates x_ at some point in the algorithm
newvars : a tuple of layer-specific trainable variables
"""
eta,theta_init = shrinkage.get_shrinkage_function(shrink)
print('theta_init='+repr(theta_init))
layers=[]
A = prob.A
M,N = A.shape
B = A.T / (1.01 * la.norm(A,2)**2)
B_ = tf.Variable(B,dtype=tf.float32,name='B_0')
By_ = tf.matmul( B_ , prob.y_ )
layers.append( ('Linear',By_,None) )
if getattr(prob,'iid',True) == False:
# set up individual parameters for every coordinate
theta_init = theta_init*np.ones( (N,1),dtype=np.float32 )
theta_ = tf.Variable(theta_init,dtype=tf.float32,name='theta_0')
OneOverM = tf.constant(float(1)/M,dtype=tf.float32)
NOverM = tf.constant(float(N)/M,dtype=tf.float32)
rvar_ = tf.reduce_sum(tf.square(prob.y_),0) * OneOverM
(xhat_,dxdr_) = eta( By_,rvar_ , theta_ )
layers.append( ('LAMP-{0} T=1'.format(shrink),xhat_,(theta_,) ) )
vt_ = prob.y_
for t in range(1,T):
if len(dxdr_.get_shape())==2:
dxdr_ = tf.reduce_mean(dxdr_,axis=0)
bt_ = dxdr_ * NOverM
vt_ = prob.y_ - tf.matmul( prob.A_ , xhat_ ) + bt_ * vt_
rvar_ = tf.reduce_sum(tf.square(vt_),0) * OneOverM
theta_ = tf.Variable(theta_init,name='theta_'+str(t))
if untied:
B_ = tf.Variable(B,dtype=tf.float32,name='B_'+str(t))
rhat_ = xhat_ + tf.matmul(B_,vt_)
layers.append( ('LAMP-{0} linear T={1}'.format(shrink,t+1),rhat_ ,(B_,) ) )
else:
rhat_ = xhat_ + tf.matmul(B_,vt_)
(xhat_,dxdr_) = eta( rhat_ ,rvar_ , theta_ )
layers.append( ('LAMP-{0} non-linear T={1}'.format(shrink,t+1),xhat_,(theta_,) ) )
return layers
| 5,351,193 |
def coeffVar(X, precision=3):
"""
Coefficient of variation of the given data (population)
Argument:
X: data points, a list of int, do not mix negative and positive numbers
precision (optional): digits precision after the comma, default=3
Returns:
float, the cv (measure of dispersion) of the input sample
or raise StatsError('mean is zero') if the mean = 0
"""
try:
return round(stdDev(X, precision) / mean(X, precision), precision)
except ZeroDivisionError:
raise StatsError('mean is zero')
| 5,351,194 |
def tostring(node):
"""
Generates a string representation of the tree, in a format determined by the user.
@ In, node, InputNode or InputTree, item to turn into a string
@ Out, tostring, string, full tree in string form
"""
if isinstance(node,InputNode) or isinstance(node,InputTree):
return node.printXML()
else:
raise NotImplementedError('TreeStructure.tostring received "'+str(node)+'" but was expecting InputNode or InputTree.')
| 5,351,195 |
def pick_char_from_dict(char: str, dictionary: Dict[str, str]) -> str:
"""
Picks a random format for the givin letter in the dictionary
"""
return random.choice(dictionary[char])
| 5,351,196 |
def bmeow_to_bilou(tags: List[str]) -> List[str]:
"""Convert BMEOW tags to the BILOU format.
Args:
tags: The BMEOW tags we are converting
Raises:
ValueError: If there were errors in the BMEOW formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return convert_tags(tags, parse_spans_bmeow_with_errors, write_bilou_tags)
| 5,351,197 |
def site_sold_per_category(items):
"""For every category, a (site, count) pair with the number of items sold by the
site in that category.
"""
return [(site,
[(cat, total_sold(cat_items)) for cat, cat_items in
categories])
for site, categories in
category_items_per_site(items).iteritems()]
| 5,351,198 |
def assert_(val: numpy.bool_):
"""
usage.scipy: 503
usage.skimage: 58
"""
...
| 5,351,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.